public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-04-19 10:45 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-04-19 10:45 UTC (permalink / raw
  To: gentoo-commits

commit:     c5d7520a8ab2025158549e082b06f5608b5f4642
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Apr 19 10:45:10 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Apr 19 10:45:10 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c5d7520a

Linux patch 4.16.3

 0000_README             |    4 +
 1002_linux-4.16.3.patch | 3089 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3093 insertions(+)

diff --git a/0000_README b/0000_README
index d6bf216..65c079f 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-4.16.2.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.2
 
+Patch:  1002_linux-4.16.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.3
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1002_linux-4.16.3.patch b/1002_linux-4.16.3.patch
new file mode 100644
index 0000000..f3cc2d8
--- /dev/null
+++ b/1002_linux-4.16.3.patch
@@ -0,0 +1,3089 @@
+diff --git a/Makefile b/Makefile
+index f0040b05df30..38df392e45e4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
+index 16a8a804e958..e8fe51f4e97a 100644
+--- a/arch/arm/boot/compressed/misc.c
++++ b/arch/arm/boot/compressed/misc.c
+@@ -128,12 +128,7 @@ asmlinkage void __div0(void)
+ 	error("Attempting division by 0!");
+ }
+ 
+-unsigned long __stack_chk_guard;
+-
+-void __stack_chk_guard_setup(void)
+-{
+-	__stack_chk_guard = 0x000a0dff;
+-}
++const unsigned long __stack_chk_guard = 0x000a0dff;
+ 
+ void __stack_chk_fail(void)
+ {
+@@ -150,8 +145,6 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
+ {
+ 	int ret;
+ 
+-	__stack_chk_guard_setup();
+-
+ 	output_data		= (unsigned char *)output_start;
+ 	free_mem_ptr		= free_mem_ptr_p;
+ 	free_mem_end_ptr	= free_mem_ptr_end_p;
+diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
+index fdf99e9dd4c3..81df9047e110 100644
+--- a/arch/mips/boot/compressed/decompress.c
++++ b/arch/mips/boot/compressed/decompress.c
+@@ -76,12 +76,7 @@ void error(char *x)
+ #include "../../../../lib/decompress_unxz.c"
+ #endif
+ 
+-unsigned long __stack_chk_guard;
+-
+-void __stack_chk_guard_setup(void)
+-{
+-	__stack_chk_guard = 0x000a0dff;
+-}
++const unsigned long __stack_chk_guard = 0x000a0dff;
+ 
+ void __stack_chk_fail(void)
+ {
+@@ -92,8 +87,6 @@ void decompress_kernel(unsigned long boot_heap_start)
+ {
+ 	unsigned long zimage_start, zimage_size;
+ 
+-	__stack_chk_guard_setup();
+-
+ 	zimage_start = (unsigned long)(&__image_begin);
+ 	zimage_size = (unsigned long)(&__image_end) -
+ 	    (unsigned long)(&__image_begin);
+diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
+index 29b99b8964aa..d4240aa7f8b1 100644
+--- a/arch/parisc/kernel/drivers.c
++++ b/arch/parisc/kernel/drivers.c
+@@ -651,6 +651,10 @@ static int match_pci_device(struct device *dev, int index,
+ 					(modpath->mod == PCI_FUNC(devfn)));
+ 	}
+ 
++	/* index might be out of bounds for bc[] */
++	if (index >= 6)
++		return 0;
++
+ 	id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
+ 	return (modpath->bc[index] == id);
+ }
+diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
+index 8d072c44f300..781c3b9a3e46 100644
+--- a/arch/parisc/kernel/hpmc.S
++++ b/arch/parisc/kernel/hpmc.S
+@@ -84,6 +84,7 @@ END(hpmc_pim_data)
+ 	.text
+ 
+ 	.import intr_save, code
++	.align 16
+ ENTRY_CFI(os_hpmc)
+ .os_hpmc:
+ 
+@@ -300,12 +301,15 @@ os_hpmc_6:
+ 
+ 	b .
+ 	nop
++	.align 16	/* make function length multiple of 16 bytes */
+ ENDPROC_CFI(os_hpmc)
+ .os_hpmc_end:
+ 
+ 
+ 	__INITRODATA
++.globl os_hpmc_size
+ 	.align 4
+-	.export os_hpmc_size
++	.type   os_hpmc_size, @object
++	.size   os_hpmc_size, 4
+ os_hpmc_size:
+ 	.word .os_hpmc_end-.os_hpmc
+diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+index e1c083fbe434..78e6a392330f 100644
+--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
++++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+@@ -470,8 +470,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
+ 		for (i = 0; i < npages; ++i) {
+ 			asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
+ 				     "r" (rbvalues[i]), "r" (kvm->arch.lpid));
+-			trace_tlbie(kvm->arch.lpid, 0, rbvalues[i],
+-				kvm->arch.lpid, 0, 0, 0);
+ 		}
+ 
+ 		if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
+@@ -492,8 +490,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
+ 		for (i = 0; i < npages; ++i) {
+ 			asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
+ 				     "r" (rbvalues[i]), "r" (0));
+-			trace_tlbie(kvm->arch.lpid, 1, rbvalues[i],
+-				0, 0, 0, 0);
+ 		}
+ 		asm volatile("ptesync" : : : "memory");
+ 	}
+diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
+index 18c1eeb847b2..6f2a193ccccc 100644
+--- a/arch/s390/kernel/compat_signal.c
++++ b/arch/s390/kernel/compat_signal.c
+@@ -279,7 +279,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
+ 	if (put_compat_sigset((compat_sigset_t __user *)frame->sc.oldmask,
+ 			      set, sizeof(compat_sigset_t)))
+ 		return -EFAULT;
+-	if (__put_user(ptr_to_compat(&frame->sc), &frame->sc.sregs))
++	if (__put_user(ptr_to_compat(&frame->sregs), &frame->sc.sregs))
+ 		return -EFAULT;
+ 
+ 	/* Store registers needed to create the signal frame */
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index 34477c1aee6d..502c90525a0e 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -776,6 +776,7 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
+ 	/* copy and convert to ebcdic */
+ 	memcpy(ipb->hdr.loadparm, buf, lp_len);
+ 	ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
++	ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID;
+ 	return len;
+ }
+ 
+diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
+index 627ce8e75e01..c15cac9251b9 100644
+--- a/arch/sh/boot/compressed/misc.c
++++ b/arch/sh/boot/compressed/misc.c
+@@ -104,12 +104,7 @@ static void error(char *x)
+ 	while(1);	/* Halt */
+ }
+ 
+-unsigned long __stack_chk_guard;
+-
+-void __stack_chk_guard_setup(void)
+-{
+-	__stack_chk_guard = 0x000a0dff;
+-}
++const unsigned long __stack_chk_guard = 0x000a0dff;
+ 
+ void __stack_chk_fail(void)
+ {
+@@ -130,8 +125,6 @@ void decompress_kernel(void)
+ {
+ 	unsigned long output_addr;
+ 
+-	__stack_chk_guard_setup();
+-
+ #ifdef CONFIG_SUPERH64
+ 	output_addr = (CONFIG_MEMORY_START + 0x2000);
+ #else
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 98722773391d..f01eef8b392e 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -319,7 +319,7 @@ struct apic {
+ 	/* Probe, setup and smpboot functions */
+ 	int	(*probe)(void);
+ 	int	(*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
+-	int	(*apic_id_valid)(int apicid);
++	int	(*apic_id_valid)(u32 apicid);
+ 	int	(*apic_id_registered)(void);
+ 
+ 	bool	(*check_apicid_used)(physid_mask_t *map, int apicid);
+@@ -492,7 +492,7 @@ static inline unsigned int read_apic_id(void)
+ 	return apic->get_apic_id(reg);
+ }
+ 
+-extern int default_apic_id_valid(int apicid);
++extern int default_apic_id_valid(u32 apicid);
+ extern int default_acpi_madt_oem_check(char *, char *);
+ extern void default_setup_apic_routing(void);
+ 
+diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
+index 96ea4b5ba658..340070415c2c 100644
+--- a/arch/x86/include/asm/mce.h
++++ b/arch/x86/include/asm/mce.h
+@@ -346,6 +346,7 @@ enum smca_bank_types {
+ 	SMCA_IF,	/* Instruction Fetch */
+ 	SMCA_L2_CACHE,	/* L2 Cache */
+ 	SMCA_DE,	/* Decoder Unit */
++	SMCA_RESERVED,	/* Reserved */
+ 	SMCA_EX,	/* Execution Unit */
+ 	SMCA_FP,	/* Floating Point */
+ 	SMCA_L3_CACHE,	/* L3 Cache */
+diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
+index aebf60357758..a06cbf019744 100644
+--- a/arch/x86/include/uapi/asm/bootparam.h
++++ b/arch/x86/include/uapi/asm/bootparam.h
+@@ -137,15 +137,15 @@ struct boot_e820_entry {
+  * setup data structure.
+  */
+ struct jailhouse_setup_data {
+-	u16	version;
+-	u16	compatible_version;
+-	u16	pm_timer_address;
+-	u16	num_cpus;
+-	u64	pci_mmconfig_base;
+-	u32	tsc_khz;
+-	u32	apic_khz;
+-	u8	standard_ioapic;
+-	u8	cpu_ids[255];
++	__u16	version;
++	__u16	compatible_version;
++	__u16	pm_timer_address;
++	__u16	num_cpus;
++	__u64	pci_mmconfig_base;
++	__u32	tsc_khz;
++	__u32	apic_khz;
++	__u8	standard_ioapic;
++	__u8	cpu_ids[255];
+ } __attribute__((packed));
+ 
+ /* The so-called "zeropage" */
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 2aa92094b59d..5ee33a6e33bb 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -200,7 +200,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
+ {
+ 	struct acpi_madt_local_x2apic *processor = NULL;
+ #ifdef CONFIG_X86_X2APIC
+-	int apic_id;
++	u32 apic_id;
+ 	u8 enabled;
+ #endif
+ 
+@@ -222,10 +222,13 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
+ 	 * to not preallocating memory for all NR_CPUS
+ 	 * when we use CPU hotplug.
+ 	 */
+-	if (!apic->apic_id_valid(apic_id) && enabled)
+-		printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
+-	else
+-		acpi_register_lapic(apic_id, processor->uid, enabled);
++	if (!apic->apic_id_valid(apic_id)) {
++		if (enabled)
++			pr_warn(PREFIX "x2apic entry ignored\n");
++		return 0;
++	}
++
++	acpi_register_lapic(apic_id, processor->uid, enabled);
+ #else
+ 	printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
+ #endif
+diff --git a/arch/x86/kernel/apic/apic_common.c b/arch/x86/kernel/apic/apic_common.c
+index a360801779ae..02b4839478b1 100644
+--- a/arch/x86/kernel/apic/apic_common.c
++++ b/arch/x86/kernel/apic/apic_common.c
+@@ -40,7 +40,7 @@ int default_check_phys_apicid_present(int phys_apicid)
+ 	return physid_isset(phys_apicid, phys_cpu_present_map);
+ }
+ 
+-int default_apic_id_valid(int apicid)
++int default_apic_id_valid(u32 apicid)
+ {
+ 	return (apicid < 255);
+ }
+diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
+index 134e04506ab4..78778b54f904 100644
+--- a/arch/x86/kernel/apic/apic_numachip.c
++++ b/arch/x86/kernel/apic/apic_numachip.c
+@@ -56,7 +56,7 @@ static u32 numachip2_set_apic_id(unsigned int id)
+ 	return id << 24;
+ }
+ 
+-static int numachip_apic_id_valid(int apicid)
++static int numachip_apic_id_valid(u32 apicid)
+ {
+ 	/* Trust what bootloader passes in MADT */
+ 	return 1;
+diff --git a/arch/x86/kernel/apic/x2apic.h b/arch/x86/kernel/apic/x2apic.h
+index b107de381cb5..a49b3604027f 100644
+--- a/arch/x86/kernel/apic/x2apic.h
++++ b/arch/x86/kernel/apic/x2apic.h
+@@ -1,6 +1,6 @@
+ /* Common bits for X2APIC cluster/physical modes. */
+ 
+-int x2apic_apic_id_valid(int apicid);
++int x2apic_apic_id_valid(u32 apicid);
+ int x2apic_apic_id_registered(void);
+ void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest);
+ unsigned int x2apic_get_apic_id(unsigned long id);
+diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
+index f8d9d69994e6..e972405eb2b5 100644
+--- a/arch/x86/kernel/apic/x2apic_phys.c
++++ b/arch/x86/kernel/apic/x2apic_phys.c
+@@ -101,7 +101,7 @@ static int x2apic_phys_probe(void)
+ }
+ 
+ /* Common x2apic functions, also used by x2apic_cluster */
+-int x2apic_apic_id_valid(int apicid)
++int x2apic_apic_id_valid(u32 apicid)
+ {
+ 	return 1;
+ }
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index f11910b44638..efaf2d4f9c3c 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -557,7 +557,7 @@ static void uv_send_IPI_all(int vector)
+ 	uv_send_IPI_mask(cpu_online_mask, vector);
+ }
+ 
+-static int uv_apic_id_valid(int apicid)
++static int uv_apic_id_valid(u32 apicid)
+ {
+ 	return 1;
+ }
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+index 0f32ad242324..12bc2863a4d6 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -82,6 +82,7 @@ static struct smca_bank_name smca_names[] = {
+ 	[SMCA_IF]	= { "insn_fetch",	"Instruction Fetch Unit" },
+ 	[SMCA_L2_CACHE]	= { "l2_cache",		"L2 Cache" },
+ 	[SMCA_DE]	= { "decode_unit",	"Decode Unit" },
++	[SMCA_RESERVED]	= { "reserved",		"Reserved" },
+ 	[SMCA_EX]	= { "execution_unit",	"Execution Unit" },
+ 	[SMCA_FP]	= { "floating_point",	"Floating Point Unit" },
+ 	[SMCA_L3_CACHE]	= { "l3_cache",		"L3 Cache" },
+@@ -110,14 +111,14 @@ const char *smca_get_long_name(enum smca_bank_types t)
+ }
+ EXPORT_SYMBOL_GPL(smca_get_long_name);
+ 
+-static enum smca_bank_types smca_get_bank_type(struct mce *m)
++static enum smca_bank_types smca_get_bank_type(unsigned int bank)
+ {
+ 	struct smca_bank *b;
+ 
+-	if (m->bank >= N_SMCA_BANK_TYPES)
++	if (bank >= MAX_NR_BANKS)
+ 		return N_SMCA_BANK_TYPES;
+ 
+-	b = &smca_banks[m->bank];
++	b = &smca_banks[bank];
+ 	if (!b->hwid)
+ 		return N_SMCA_BANK_TYPES;
+ 
+@@ -127,6 +128,9 @@ static enum smca_bank_types smca_get_bank_type(struct mce *m)
+ static struct smca_hwid smca_hwid_mcatypes[] = {
+ 	/* { bank_type, hwid_mcatype, xec_bitmap } */
+ 
++	/* Reserved type */
++	{ SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 },
++
+ 	/* ZN Core (HWID=0xB0) MCA types */
+ 	{ SMCA_LS,	 HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF },
+ 	{ SMCA_IF,	 HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
+@@ -432,7 +436,25 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi
+ {
+ 	u32 addr = 0, offset = 0;
+ 
++	if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
++		return addr;
++
++	/* Get address from already initialized block. */
++	if (per_cpu(threshold_banks, cpu)) {
++		struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank];
++
++		if (bankp && bankp->blocks) {
++			struct threshold_block *blockp = &bankp->blocks[block];
++
++			if (blockp)
++				return blockp->address;
++		}
++	}
++
+ 	if (mce_flags.smca) {
++		if (smca_get_bank_type(bank) == SMCA_RESERVED)
++			return addr;
++
+ 		if (!block) {
+ 			addr = MSR_AMD64_SMCA_MCx_MISC(bank);
+ 		} else {
+@@ -760,7 +782,7 @@ bool amd_mce_is_memory_error(struct mce *m)
+ 	u8 xec = (m->status >> 16) & 0x1f;
+ 
+ 	if (mce_flags.smca)
+-		return smca_get_bank_type(m) == SMCA_UMC && xec == 0x0;
++		return smca_get_bank_type(m->bank) == SMCA_UMC && xec == 0x0;
+ 
+ 	return m->bank == 4 && xec == 0x8;
+ }
+@@ -1063,7 +1085,7 @@ static struct kobj_type threshold_ktype = {
+ 
+ static const char *get_name(unsigned int bank, struct threshold_block *b)
+ {
+-	unsigned int bank_type;
++	enum smca_bank_types bank_type;
+ 
+ 	if (!mce_flags.smca) {
+ 		if (b && bank == 4)
+@@ -1072,11 +1094,10 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
+ 		return th_names[bank];
+ 	}
+ 
+-	if (!smca_banks[bank].hwid)
++	bank_type = smca_get_bank_type(bank);
++	if (bank_type >= N_SMCA_BANK_TYPES)
+ 		return NULL;
+ 
+-	bank_type = smca_banks[bank].hwid->bank_type;
+-
+ 	if (b && bank_type == SMCA_UMC) {
+ 		if (b->block < ARRAY_SIZE(smca_umc_block_names))
+ 			return smca_umc_block_names[b->block];
+diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
+index de58533d3664..2fa79e2e73ea 100644
+--- a/arch/x86/xen/apic.c
++++ b/arch/x86/xen/apic.c
+@@ -112,7 +112,7 @@ static int xen_madt_oem_check(char *oem_id, char *oem_table_id)
+ 	return xen_pv_domain();
+ }
+ 
+-static int xen_id_always_valid(int apicid)
++static int xen_id_always_valid(u32 apicid)
+ {
+ 	return 1;
+ }
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 6d82c4f7fadd..3b489527c8f2 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -827,7 +827,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
+ 		bool success = false;
+ 		int ret;
+ 
+-		rcu_read_lock_sched();
++		rcu_read_lock();
+ 		if (percpu_ref_tryget_live(&q->q_usage_counter)) {
+ 			/*
+ 			 * The code that sets the PREEMPT_ONLY flag is
+@@ -840,7 +840,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
+ 				percpu_ref_put(&q->q_usage_counter);
+ 			}
+ 		}
+-		rcu_read_unlock_sched();
++		rcu_read_unlock();
+ 
+ 		if (success)
+ 			return 0;
+diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
+index 9f8cffc8a701..3eb169f15842 100644
+--- a/block/blk-mq-cpumap.c
++++ b/block/blk-mq-cpumap.c
+@@ -16,11 +16,6 @@
+ 
+ static int cpu_to_queue_index(unsigned int nr_queues, const int cpu)
+ {
+-	/*
+-	 * Non present CPU will be mapped to queue index 0.
+-	 */
+-	if (!cpu_present(cpu))
+-		return 0;
+ 	return cpu % nr_queues;
+ }
+ 
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 16e83e6df404..56e0c3699f9e 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1188,7 +1188,12 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
+ 		struct blk_mq_queue_data bd;
+ 
+ 		rq = list_first_entry(list, struct request, queuelist);
+-		if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
++
++		hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
++		if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
++			break;
++
++		if (!blk_mq_get_driver_tag(rq, NULL, false)) {
+ 			/*
+ 			 * The initial allocation attempt failed, so we need to
+ 			 * rerun the hardware queue when a tag is freed. The
+@@ -1197,8 +1202,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
+ 			 * we'll re-run it below.
+ 			 */
+ 			if (!blk_mq_mark_tag_wait(&hctx, rq)) {
+-				if (got_budget)
+-					blk_mq_put_dispatch_budget(hctx);
++				blk_mq_put_dispatch_budget(hctx);
+ 				/*
+ 				 * For non-shared tags, the RESTART check
+ 				 * will suffice.
+@@ -1209,11 +1213,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
+ 			}
+ 		}
+ 
+-		if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
+-			blk_mq_put_driver_tag(rq);
+-			break;
+-		}
+-
+ 		list_del_init(&rq->queuelist);
+ 
+ 		bd.rq = rq;
+@@ -1812,11 +1811,11 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+ 	if (q->elevator && !bypass_insert)
+ 		goto insert;
+ 
+-	if (!blk_mq_get_driver_tag(rq, NULL, false))
++	if (!blk_mq_get_dispatch_budget(hctx))
+ 		goto insert;
+ 
+-	if (!blk_mq_get_dispatch_budget(hctx)) {
+-		blk_mq_put_driver_tag(rq);
++	if (!blk_mq_get_driver_tag(rq, NULL, false)) {
++		blk_mq_put_dispatch_budget(hctx);
+ 		goto insert;
+ 	}
+ 
+@@ -2440,6 +2439,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
+ 		 */
+ 		hctx->next_cpu = cpumask_first_and(hctx->cpumask,
+ 				cpu_online_mask);
++		if (hctx->next_cpu >= nr_cpu_ids)
++			hctx->next_cpu = cpumask_first(hctx->cpumask);
+ 		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
+ 	}
+ }
+diff --git a/block/blk-timeout.c b/block/blk-timeout.c
+index a05e3676d24a..f0e6e412891f 100644
+--- a/block/blk-timeout.c
++++ b/block/blk-timeout.c
+@@ -165,7 +165,7 @@ void blk_abort_request(struct request *req)
+ 		 * No need for fancy synchronizations.
+ 		 */
+ 		blk_rq_set_deadline(req, jiffies);
+-		mod_timer(&req->q->timeout, 0);
++		kblockd_schedule_work(&req->q->timeout_work);
+ 	} else {
+ 		if (blk_mark_rq_complete(req))
+ 			return;
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index eb09ef55c38a..9f8f39d49396 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -3024,15 +3024,21 @@ static void acpi_nfit_scrub(struct work_struct *work)
+ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
+ {
+ 	struct nfit_spa *nfit_spa;
+-	int rc;
+ 
+-	list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+-		if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
+-			/* BLK regions don't need to wait for ars results */
+-			rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
+-			if (rc)
+-				return rc;
+-		}
++	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
++		int rc, type = nfit_spa_type(nfit_spa->spa);
++
++		/* PMEM and VMEM will be registered by the ARS workqueue */
++		if (type == NFIT_SPA_PM || type == NFIT_SPA_VOLATILE)
++			continue;
++		/* BLK apertures belong to BLK region registration below */
++		if (type == NFIT_SPA_BDW)
++			continue;
++		/* BLK regions don't need to wait for ARS results */
++		rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
++		if (rc)
++			return rc;
++	}
+ 
+ 	acpi_desc->ars_start_flags = 0;
+ 	if (!acpi_desc->cancel)
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index ee62d2d517bf..fe92cb972dd1 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1103,11 +1103,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
+ 	if (info->lo_encrypt_type) {
+ 		unsigned int type = info->lo_encrypt_type;
+ 
+-		if (type >= MAX_LO_CRYPT)
+-			return -EINVAL;
++		if (type >= MAX_LO_CRYPT) {
++			err = -EINVAL;
++			goto exit;
++		}
+ 		xfer = xfer_funcs[type];
+-		if (xfer == NULL)
+-			return -EINVAL;
++		if (xfer == NULL) {
++			err = -EINVAL;
++			goto exit;
++		}
+ 	} else
+ 		xfer = NULL;
+ 
+diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
+index 47a4127a6067..1a81f6b8c2ce 100644
+--- a/drivers/bluetooth/hci_bcm.c
++++ b/drivers/bluetooth/hci_bcm.c
+@@ -795,22 +795,6 @@ static const struct acpi_gpio_mapping acpi_bcm_int_first_gpios[] = {
+ #ifdef CONFIG_ACPI
+ /* IRQ polarity of some chipsets are not defined correctly in ACPI table. */
+ static const struct dmi_system_id bcm_active_low_irq_dmi_table[] = {
+-	{
+-		.ident = "Asus T100TA",
+-		.matches = {
+-			DMI_EXACT_MATCH(DMI_SYS_VENDOR,
+-					"ASUSTeK COMPUTER INC."),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+-		},
+-	},
+-	{
+-		.ident = "Asus T100CHI",
+-		.matches = {
+-			DMI_EXACT_MATCH(DMI_SYS_VENDOR,
+-					"ASUSTeK COMPUTER INC."),
+-			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100CHI"),
+-		},
+-	},
+ 	{	/* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */
+ 		.ident = "Lenovo ThinkPad 8",
+ 		.matches = {
+@@ -838,7 +822,9 @@ static int bcm_resource(struct acpi_resource *ares, void *data)
+ 	switch (ares->type) {
+ 	case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+ 		irq = &ares->data.extended_irq;
+-		dev->irq_active_low = irq->polarity == ACPI_ACTIVE_LOW;
++		if (irq->polarity != ACPI_ACTIVE_LOW)
++			dev_info(dev->dev, "ACPI Interrupt resource is active-high, this is usually wrong, treating the IRQ as active-low\n");
++		dev->irq_active_low = true;
+ 		break;
+ 
+ 	case ACPI_RESOURCE_TYPE_GPIO:
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index 6768cb2dd740..f5b2d69316a1 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -252,6 +252,9 @@ struct smi_info {
+ 	/* Default driver model device. */
+ 	struct platform_device *pdev;
+ 
++	/* Have we added the device group to the device? */
++	bool dev_group_added;
++
+ 	/* Counters and things for the proc filesystem. */
+ 	atomic_t stats[SI_NUM_STATS];
+ 
+@@ -2027,8 +2030,8 @@ int ipmi_si_add_smi(struct si_sm_io *io)
+ 	if (initialized) {
+ 		rv = try_smi_init(new_smi);
+ 		if (rv) {
+-			mutex_unlock(&smi_infos_lock);
+ 			cleanup_one_si(new_smi);
++			mutex_unlock(&smi_infos_lock);
+ 			return rv;
+ 		}
+ 	}
+@@ -2187,6 +2190,7 @@ static int try_smi_init(struct smi_info *new_smi)
+ 			rv);
+ 		goto out_err_stop_timer;
+ 	}
++	new_smi->dev_group_added = true;
+ 
+ 	rv = ipmi_register_smi(&handlers,
+ 			       new_smi,
+@@ -2240,7 +2244,10 @@ static int try_smi_init(struct smi_info *new_smi)
+ 	return 0;
+ 
+ out_err_remove_attrs:
+-	device_remove_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
++	if (new_smi->dev_group_added) {
++		device_remove_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
++		new_smi->dev_group_added = false;
++	}
+ 	dev_set_drvdata(new_smi->io.dev, NULL);
+ 
+ out_err_stop_timer:
+@@ -2288,6 +2295,7 @@ static int try_smi_init(struct smi_info *new_smi)
+ 		else
+ 			platform_device_put(new_smi->pdev);
+ 		new_smi->pdev = NULL;
++		new_smi->io.dev = NULL;
+ 	}
+ 
+ 	kfree(init_name);
+@@ -2384,8 +2392,10 @@ static void cleanup_one_si(struct smi_info *to_clean)
+ 		}
+ 	}
+ 
+-	device_remove_group(to_clean->io.dev, &ipmi_si_dev_attr_group);
+-	dev_set_drvdata(to_clean->io.dev, NULL);
++	if (to_clean->dev_group_added)
++		device_remove_group(to_clean->io.dev, &ipmi_si_dev_attr_group);
++	if (to_clean->io.dev)
++		dev_set_drvdata(to_clean->io.dev, NULL);
+ 
+ 	list_del(&to_clean->link);
+ 
+diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
+index a11a671c7a38..2ab4d61ee47e 100644
+--- a/drivers/edac/mce_amd.c
++++ b/drivers/edac/mce_amd.c
+@@ -854,21 +854,24 @@ static void decode_mc6_mce(struct mce *m)
+ static void decode_smca_error(struct mce *m)
+ {
+ 	struct smca_hwid *hwid;
+-	unsigned int bank_type;
++	enum smca_bank_types bank_type;
+ 	const char *ip_name;
+ 	u8 xec = XEC(m->status, xec_mask);
+ 
+ 	if (m->bank >= ARRAY_SIZE(smca_banks))
+ 		return;
+ 
+-	if (x86_family(m->cpuid) >= 0x17 && m->bank == 4)
+-		pr_emerg(HW_ERR "Bank 4 is reserved on Fam17h.\n");
+-
+ 	hwid = smca_banks[m->bank].hwid;
+ 	if (!hwid)
+ 		return;
+ 
+ 	bank_type = hwid->bank_type;
++
++	if (bank_type == SMCA_RESERVED) {
++		pr_emerg(HW_ERR "Bank %d is reserved.\n", m->bank);
++		return;
++	}
++
+ 	ip_name = smca_get_long_name(bank_type);
+ 
+ 	pr_emerg(HW_ERR "%s Extended Error Code: %d\n", ip_name, xec);
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 31f5ad605e59..5b6aeccd3d90 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -240,9 +240,10 @@ int radeon_bo_create(struct radeon_device *rdev,
+ 	 * may be slow
+ 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
+ 	 */
+-
++#ifndef CONFIG_COMPILE_TEST
+ #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
+ 	 thanks to write-combining
++#endif
+ 
+ 	if (bo->flags & RADEON_GEM_GTT_WC)
+ 		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index c21020b69114..55ee5e87073a 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -71,7 +71,7 @@ static const struct vmbus_device vmbus_devs[] = {
+ 	/* PCIE */
+ 	{ .dev_type = HV_PCIE,
+ 	  HV_PCIE_GUID,
+-	  .perf_device = true,
++	  .perf_device = false,
+ 	},
+ 
+ 	/* Synthetic Frame Buffer */
+diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
+index 4257451f1bd8..0b86ed01e85d 100644
+--- a/drivers/media/platform/vsp1/vsp1_dl.c
++++ b/drivers/media/platform/vsp1/vsp1_dl.c
+@@ -509,7 +509,8 @@ static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
+ 		return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE)
+ 			  & VI6_DL_BODY_SIZE_UPD);
+ 	else
+-		return !!(vsp1_read(vsp1, VI6_CMD(dlm->index) & VI6_CMD_UPDHDR));
++		return !!(vsp1_read(vsp1, VI6_CMD(dlm->index))
++			  & VI6_CMD_UPDHDR);
+ }
+ 
+ static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index 5198c9eeb348..4312935f1dfc 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -101,7 +101,7 @@ static int get_v4l2_window32(struct v4l2_window __user *kp,
+ static int put_v4l2_window32(struct v4l2_window __user *kp,
+ 			     struct v4l2_window32 __user *up)
+ {
+-	struct v4l2_clip __user *kclips = kp->clips;
++	struct v4l2_clip __user *kclips;
+ 	struct v4l2_clip32 __user *uclips;
+ 	compat_caddr_t p;
+ 	u32 clipcount;
+@@ -116,6 +116,8 @@ static int put_v4l2_window32(struct v4l2_window __user *kp,
+ 	if (!clipcount)
+ 		return 0;
+ 
++	if (get_user(kclips, &kp->clips))
++		return -EFAULT;
+ 	if (get_user(p, &up->clips))
+ 		return -EFAULT;
+ 	uclips = compat_ptr(p);
+diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
+index 0301fe426a43..1d0b2208e8fb 100644
+--- a/drivers/media/v4l2-core/v4l2-dev.c
++++ b/drivers/media/v4l2-core/v4l2-dev.c
+@@ -939,10 +939,14 @@ int __video_register_device(struct video_device *vdev,
+ #endif
+ 	vdev->minor = i + minor_offset;
+ 	vdev->num = nr;
+-	devnode_set(vdev);
+ 
+ 	/* Should not happen since we thought this minor was free */
+-	WARN_ON(video_device[vdev->minor] != NULL);
++	if (WARN_ON(video_device[vdev->minor])) {
++		mutex_unlock(&videodev_lock);
++		printk(KERN_ERR "video_device not empty!\n");
++		return -ENFILE;
++	}
++	devnode_set(vdev);
+ 	vdev->index = get_index(vdev);
+ 	video_device[vdev->minor] = vdev;
+ 	mutex_unlock(&videodev_lock);
+diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
+index 5782733959f0..f4e93f5fc204 100644
+--- a/drivers/net/slip/slhc.c
++++ b/drivers/net/slip/slhc.c
+@@ -509,6 +509,10 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
+ 		if(x < 0 || x > comp->rslot_limit)
+ 			goto bad;
+ 
++		/* Check if the cstate is initialized */
++		if (!comp->rstate[x].initialized)
++			goto bad;
++
+ 		comp->flags &=~ SLF_TOSS;
+ 		comp->recv_current = x;
+ 	} else {
+@@ -673,6 +677,7 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
+ 	if (cs->cs_tcp.doff > 5)
+ 	  memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
+ 	cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
++	cs->initialized = true;
+ 	/* Put headers back on packet
+ 	 * Neither header checksum is recalculated
+ 	 */
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index fff4b13eece2..5c42cf81a08b 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -901,6 +901,12 @@ static const struct usb_device_id	products[] = {
+ 				      USB_CDC_SUBCLASS_ETHERNET,
+ 				      USB_CDC_PROTO_NONE),
+ 	.driver_info = (unsigned long)&wwan_info,
++}, {
++	/* Cinterion AHS3 modem by GEMALTO */
++	USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM,
++				      USB_CDC_SUBCLASS_ETHERNET,
++				      USB_CDC_PROTO_NONE),
++	.driver_info = (unsigned long)&wwan_info,
+ }, {
+ 	USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
+ 			USB_CDC_PROTO_NONE),
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 55a78eb96961..32cf21716f19 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -928,7 +928,8 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
+ 			offset += 0x100;
+ 		else
+ 			ret = -EINVAL;
+-		ret = lan78xx_read_raw_otp(dev, offset, length, data);
++		if (!ret)
++			ret = lan78xx_read_raw_otp(dev, offset, length, data);
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 396bf05c6bf6..d8b041f48ca8 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -2892,6 +2892,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
+ 	struct ath_txq *txq;
+ 	int tidno;
+ 
++	rcu_read_lock();
++
+ 	for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
+ 		tid = ath_node_to_tid(an, tidno);
+ 		txq = tid->txq;
+@@ -2909,6 +2911,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
+ 		if (!an->sta)
+ 			break; /* just one multicast ath_atx_tid */
+ 	}
++
++	rcu_read_unlock();
+ }
+ 
+ #ifdef CONFIG_ATH9K_TX99
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index e323d3abb6ac..959de2f8bb28 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -8,6 +8,7 @@
+  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * Copyright(c) 2016-2017 Intel Deutschland GmbH
++ * Copyright(c) 2018        Intel Corporation
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of version 2 of the GNU General Public License as
+@@ -36,6 +37,7 @@
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * All rights reserved.
+  * Copyright(c) 2017 Intel Deutschland GmbH
++ * Copyright(c) 2018        Intel Corporation
+  *
+  * Redistribution and use in source and binary forms, with or without
+  * modification, are permitted provided that the following conditions
+@@ -517,9 +519,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
+ 
+ /* 9000 Series */
+-	{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)},
+@@ -544,11 +546,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+-	{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
+@@ -569,16 +575,42 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_shared_clk)},
+ 	{IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
+ 	{IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_shared_clk)},
+@@ -595,12 +627,94 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_shared_clk)},
+ 	{IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_shared_clk)},
+ 	{IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_shared_clk)},
++	{IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)},
++	{IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)},
++	{IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)},
+ 	{IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
+ 	{IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_shared_clk)},
+ 	{IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
++	{IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
++	{IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
+ 	{IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
+@@ -626,11 +740,44 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
+@@ -647,10 +794,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
+ 	{IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA370, 0x4234, iwl9560_2ac_cfg_soc)},
++	{IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
+ 
+ /* 22000 Series */
+ 	{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
+diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+index 121b94f09714..9a1d15b3ce45 100644
+--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
++++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+@@ -1450,6 +1450,7 @@ static int rtl8187_probe(struct usb_interface *intf,
+ 		goto err_free_dev;
+ 	}
+ 	mutex_init(&priv->io_mutex);
++	mutex_init(&priv->conf_mutex);
+ 
+ 	SET_IEEE80211_DEV(dev, &intf->dev);
+ 	usb_set_intfdata(intf, dev);
+@@ -1625,7 +1626,6 @@ static int rtl8187_probe(struct usb_interface *intf,
+ 		printk(KERN_ERR "rtl8187: Cannot register device\n");
+ 		goto err_free_dmabuf;
+ 	}
+-	mutex_init(&priv->conf_mutex);
+ 	skb_queue_head_init(&priv->b_tx_status.queue);
+ 
+ 	wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 7aeca5db7916..0b9e60861e53 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2793,6 +2793,7 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
+ 
+ 	list_for_each_entry(h, &subsys->nsheads, entry) {
+ 		if (nvme_ns_ids_valid(&new->ids) &&
++		    !list_empty(&h->list) &&
+ 		    nvme_ns_ids_equal(&new->ids, &h->ids))
+ 			return -EINVAL;
+ 	}
+diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
+index 2faf38eab785..cb694d2a1228 100644
+--- a/drivers/pci/host/pci-hyperv.c
++++ b/drivers/pci/host/pci-hyperv.c
+@@ -447,7 +447,6 @@ struct hv_pcibus_device {
+ 	spinlock_t device_list_lock;	/* Protect lists below */
+ 	void __iomem *cfg_addr;
+ 
+-	struct semaphore enum_sem;
+ 	struct list_head resources_for_children;
+ 
+ 	struct list_head children;
+@@ -461,6 +460,8 @@ struct hv_pcibus_device {
+ 	struct retarget_msi_interrupt retarget_msi_interrupt_params;
+ 
+ 	spinlock_t retarget_msi_interrupt_lock;
++
++	struct workqueue_struct *wq;
+ };
+ 
+ /*
+@@ -520,6 +521,8 @@ struct hv_pci_compl {
+ 	s32 completion_status;
+ };
+ 
++static void hv_pci_onchannelcallback(void *context);
++
+ /**
+  * hv_pci_generic_compl() - Invoked for a completion packet
+  * @context:		Set up by the sender of the packet.
+@@ -664,6 +667,31 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
+ 	}
+ }
+ 
++static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
++{
++	u16 ret;
++	unsigned long flags;
++	void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET +
++			     PCI_VENDOR_ID;
++
++	spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
++
++	/* Choose the function to be read. (See comment above) */
++	writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
++	/* Make sure the function was chosen before we start reading. */
++	mb();
++	/* Read from that function's config space. */
++	ret = readw(addr);
++	/*
++	 * mb() is not required here, because the spin_unlock_irqrestore()
++	 * is a barrier.
++	 */
++
++	spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
++
++	return ret;
++}
++
+ /**
+  * _hv_pcifront_write_config() - Internal PCI config write
+  * @hpdev:	The PCI driver's representation of the device
+@@ -1106,8 +1134,37 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+ 	 * Since this function is called with IRQ locks held, can't
+ 	 * do normal wait for completion; instead poll.
+ 	 */
+-	while (!try_wait_for_completion(&comp.comp_pkt.host_event))
++	while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
++		/* 0xFFFF means an invalid PCI VENDOR ID. */
++		if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
++			dev_err_once(&hbus->hdev->device,
++				     "the device has gone\n");
++			goto free_int_desc;
++		}
++
++		/*
++		 * When the higher level interrupt code calls us with
++		 * interrupt disabled, we must poll the channel by calling
++		 * the channel callback directly when channel->target_cpu is
++		 * the current CPU. When the higher level interrupt code
++		 * calls us with interrupt enabled, let's add the
++		 * local_bh_disable()/enable() to avoid race.
++		 */
++		local_bh_disable();
++
++		if (hbus->hdev->channel->target_cpu == smp_processor_id())
++			hv_pci_onchannelcallback(hbus);
++
++		local_bh_enable();
++
++		if (hpdev->state == hv_pcichild_ejecting) {
++			dev_err_once(&hbus->hdev->device,
++				     "the device is being ejected\n");
++			goto free_int_desc;
++		}
++
+ 		udelay(100);
++	}
+ 
+ 	if (comp.comp_pkt.completion_status < 0) {
+ 		dev_err(&hbus->hdev->device,
+@@ -1590,12 +1647,8 @@ static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
+  * It must also treat the omission of a previously observed device as
+  * notification that the device no longer exists.
+  *
+- * Note that this function is a work item, and it may not be
+- * invoked in the order that it was queued.  Back to back
+- * updates of the list of present devices may involve queuing
+- * multiple work items, and this one may run before ones that
+- * were sent later. As such, this function only does something
+- * if is the last one in the queue.
++ * Note that this function is serialized with hv_eject_device_work(),
++ * because both are pushed to the ordered workqueue hbus->wq.
+  */
+ static void pci_devices_present_work(struct work_struct *work)
+ {
+@@ -1616,11 +1669,6 @@ static void pci_devices_present_work(struct work_struct *work)
+ 
+ 	INIT_LIST_HEAD(&removed);
+ 
+-	if (down_interruptible(&hbus->enum_sem)) {
+-		put_hvpcibus(hbus);
+-		return;
+-	}
+-
+ 	/* Pull this off the queue and process it if it was the last one. */
+ 	spin_lock_irqsave(&hbus->device_list_lock, flags);
+ 	while (!list_empty(&hbus->dr_list)) {
+@@ -1637,7 +1685,6 @@ static void pci_devices_present_work(struct work_struct *work)
+ 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+ 
+ 	if (!dr) {
+-		up(&hbus->enum_sem);
+ 		put_hvpcibus(hbus);
+ 		return;
+ 	}
+@@ -1724,7 +1771,6 @@ static void pci_devices_present_work(struct work_struct *work)
+ 		break;
+ 	}
+ 
+-	up(&hbus->enum_sem);
+ 	put_hvpcibus(hbus);
+ 	kfree(dr);
+ }
+@@ -1770,7 +1816,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
+ 	spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+ 
+ 	get_hvpcibus(hbus);
+-	schedule_work(&dr_wrk->wrk);
++	queue_work(hbus->wq, &dr_wrk->wrk);
+ }
+ 
+ /**
+@@ -1848,7 +1894,7 @@ static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
+ 	get_pcichild(hpdev, hv_pcidev_ref_pnp);
+ 	INIT_WORK(&hpdev->wrk, hv_eject_device_work);
+ 	get_hvpcibus(hpdev->hbus);
+-	schedule_work(&hpdev->wrk);
++	queue_work(hpdev->hbus->wq, &hpdev->wrk);
+ }
+ 
+ /**
+@@ -2461,13 +2507,18 @@ static int hv_pci_probe(struct hv_device *hdev,
+ 	spin_lock_init(&hbus->config_lock);
+ 	spin_lock_init(&hbus->device_list_lock);
+ 	spin_lock_init(&hbus->retarget_msi_interrupt_lock);
+-	sema_init(&hbus->enum_sem, 1);
+ 	init_completion(&hbus->remove_event);
++	hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
++					   hbus->sysdata.domain);
++	if (!hbus->wq) {
++		ret = -ENOMEM;
++		goto free_bus;
++	}
+ 
+ 	ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
+ 			 hv_pci_onchannelcallback, hbus);
+ 	if (ret)
+-		goto free_bus;
++		goto destroy_wq;
+ 
+ 	hv_set_drvdata(hdev, hbus);
+ 
+@@ -2536,6 +2587,8 @@ static int hv_pci_probe(struct hv_device *hdev,
+ 	hv_free_config_window(hbus);
+ close:
+ 	vmbus_close(hdev->channel);
++destroy_wq:
++	destroy_workqueue(hbus->wq);
+ free_bus:
+ 	free_page((unsigned long)hbus);
+ 	return ret;
+@@ -2615,6 +2668,7 @@ static int hv_pci_remove(struct hv_device *hdev)
+ 	irq_domain_free_fwnode(hbus->sysdata.fwnode);
+ 	put_hvpcibus(hbus);
+ 	wait_for_completion(&hbus->remove_event);
++	destroy_workqueue(hbus->wq);
+ 	free_page((unsigned long)hbus);
+ 	return 0;
+ }
+diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
+index d5b02de02a3a..bfad63b5a13d 100644
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -128,7 +128,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
+ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
+ 			int start, int count, int auto_ack)
+ {
+-	int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
++	int rc, tmp_count = count, tmp_start = start, nr = q->nr;
+ 	unsigned int ccq = 0;
+ 
+ 	qperf_inc(q, eqbs);
+@@ -151,14 +151,7 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
+ 		qperf_inc(q, eqbs_partial);
+ 		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
+ 			tmp_count);
+-		/*
+-		 * Retry once, if that fails bail out and process the
+-		 * extracted buffers before trying again.
+-		 */
+-		if (!retried++)
+-			goto again;
+-		else
+-			return count - tmp_count;
++		return count - tmp_count;
+ 	}
+ 
+ 	DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
+@@ -214,7 +207,10 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
+ 	return 0;
+ }
+ 
+-/* returns number of examined buffers and their common state in *state */
++/*
++ * Returns number of examined buffers and their common state in *state.
++ * Requested number of buffers-to-examine must be > 0.
++ */
+ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
+ 				 unsigned char *state, unsigned int count,
+ 				 int auto_ack, int merge_pending)
+@@ -225,17 +221,23 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
+ 	if (is_qebsm(q))
+ 		return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
+ 
+-	for (i = 0; i < count; i++) {
+-		if (!__state) {
+-			__state = q->slsb.val[bufnr];
+-			if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+-				__state = SLSB_P_OUTPUT_EMPTY;
+-		} else if (merge_pending) {
+-			if ((q->slsb.val[bufnr] & __state) != __state)
+-				break;
+-		} else if (q->slsb.val[bufnr] != __state)
+-			break;
++	/* get initial state: */
++	__state = q->slsb.val[bufnr];
++	if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
++		__state = SLSB_P_OUTPUT_EMPTY;
++
++	for (i = 1; i < count; i++) {
+ 		bufnr = next_buf(bufnr);
++
++		/* merge PENDING into EMPTY: */
++		if (merge_pending &&
++		    q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
++		    __state == SLSB_P_OUTPUT_EMPTY)
++			continue;
++
++		/* stop if next state differs from initial state: */
++		if (q->slsb.val[bufnr] != __state)
++			break;
+ 	}
+ 	*state = __state;
+ 	return i;
+diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
+index 03dc04739225..c44d7c7ffc92 100644
+--- a/drivers/sbus/char/oradax.c
++++ b/drivers/sbus/char/oradax.c
+@@ -880,7 +880,7 @@ static int dax_ccb_exec(struct dax_ctx *ctx, const char __user *buf,
+ 	dax_dbg("args: ccb_buf_len=%ld, idx=%d", count, idx);
+ 
+ 	/* for given index and length, verify ca_buf range exists */
+-	if (idx + nccbs >= DAX_CA_ELEMS) {
++	if (idx < 0 || idx > (DAX_CA_ELEMS - nccbs)) {
+ 		ctx->result.exec.status = DAX_SUBMIT_ERR_NO_CA_AVAIL;
+ 		return 0;
+ 	}
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 5c5dcca4d1da..e1cf8c0d73dd 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -471,9 +471,6 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
+ 
+ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
+ {
+-	if (!ha->req_q_map)
+-		return;
+-
+ 	if (IS_QLAFX00(ha)) {
+ 		if (req && req->ring_fx00)
+ 			dma_free_coherent(&ha->pdev->dev,
+@@ -484,17 +481,14 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
+ 		(req->length + 1) * sizeof(request_t),
+ 		req->ring, req->dma);
+ 
+-	if (req) {
++	if (req)
+ 		kfree(req->outstanding_cmds);
+-		kfree(req);
+-	}
++
++	kfree(req);
+ }
+ 
+ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
+ {
+-	if (!ha->rsp_q_map)
+-		return;
+-
+ 	if (IS_QLAFX00(ha)) {
+ 		if (rsp && rsp->ring)
+ 			dma_free_coherent(&ha->pdev->dev,
+@@ -505,8 +499,7 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
+ 		(rsp->length + 1) * sizeof(response_t),
+ 		rsp->ring, rsp->dma);
+ 	}
+-	if (rsp)
+-		kfree(rsp);
++	kfree(rsp);
+ }
+ 
+ static void qla2x00_free_queues(struct qla_hw_data *ha)
+@@ -3107,7 +3100,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 		goto probe_failed;
+ 
+ 	/* Alloc arrays of request and response ring ptrs */
+-	if (qla2x00_alloc_queues(ha, req, rsp)) {
++	ret = qla2x00_alloc_queues(ha, req, rsp);
++	if (ret) {
+ 		ql_log(ql_log_fatal, base_vha, 0x003d,
+ 		    "Failed to allocate memory for queue pointers..."
+ 		    "aborting.\n");
+@@ -3408,8 +3402,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ 	}
+ 
+ 	qla2x00_free_device(base_vha);
+-
+ 	scsi_host_put(base_vha->host);
++	/*
++	 * Need to NULL out local req/rsp after
++	 * qla2x00_free_device => qla2x00_free_queues frees
++	 * what these are pointing to. Or else we'll
++	 * fall over below in qla2x00_free_req/rsp_que.
++	 */
++	req = NULL;
++	rsp = NULL;
+ 
+ probe_hw_failed:
+ 	qla2x00_mem_free(ha);
+@@ -4115,6 +4116,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ 	(*rsp)->dma = 0;
+ fail_rsp_ring:
+ 	kfree(*rsp);
++	*rsp = NULL;
+ fail_rsp:
+ 	dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
+ 		sizeof(request_t), (*req)->ring, (*req)->dma);
+@@ -4122,6 +4124,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ 	(*req)->dma = 0;
+ fail_req_ring:
+ 	kfree(*req);
++	*req = NULL;
+ fail_req:
+ 	dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
+ 		ha->ct_sns, ha->ct_sns_dma);
+@@ -4509,16 +4512,11 @@ qla2x00_mem_free(struct qla_hw_data *ha)
+ 		dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
+ 			ha->init_cb, ha->init_cb_dma);
+ 
+-	if (ha->optrom_buffer)
+-		vfree(ha->optrom_buffer);
+-	if (ha->nvram)
+-		kfree(ha->nvram);
+-	if (ha->npiv_info)
+-		kfree(ha->npiv_info);
+-	if (ha->swl)
+-		kfree(ha->swl);
+-	if (ha->loop_id_map)
+-		kfree(ha->loop_id_map);
++	vfree(ha->optrom_buffer);
++	kfree(ha->nvram);
++	kfree(ha->npiv_info);
++	kfree(ha->swl);
++	kfree(ha->loop_id_map);
+ 
+ 	ha->srb_mempool = NULL;
+ 	ha->ctx_mempool = NULL;
+diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
+index b88b5dbbc444..188f30572aa1 100644
+--- a/drivers/scsi/scsi_dh.c
++++ b/drivers/scsi/scsi_dh.c
+@@ -112,6 +112,9 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name)
+ {
+ 	struct scsi_device_handler *dh;
+ 
++	if (!name || strlen(name) == 0)
++		return NULL;
++
+ 	dh = __scsi_dh_lookup(name);
+ 	if (!dh) {
+ 		request_module("scsi_dh_%s", name);
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index c84f931388f2..912eacdc2d83 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -721,8 +721,6 @@ static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd,
+ 		int result)
+ {
+ 	switch (host_byte(result)) {
+-	case DID_OK:
+-		return BLK_STS_OK;
+ 	case DID_TRANSPORT_FAILFAST:
+ 		return BLK_STS_TRANSPORT;
+ 	case DID_TARGET_FAILURE:
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 5320039671b7..be6a4b6a76c6 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -744,7 +744,7 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
+ 		struct iov_iter t;
+ 		void __user *uaddr = vhost_vq_meta_fetch(vq,
+ 				     (u64)(uintptr_t)to, size,
+-				     VHOST_ADDR_DESC);
++				     VHOST_ADDR_USED);
+ 
+ 		if (uaddr)
+ 			return __copy_to_user(uaddr, from, size);
+@@ -1244,10 +1244,12 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
+ /* Caller should have vq mutex and device mutex */
+ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
+ {
+-	int ret = vq_log_access_ok(vq, vq->log_base);
++	if (!vq_log_access_ok(vq, vq->log_base))
++		return 0;
+ 
+-	if (ret || vq->iotlb)
+-		return ret;
++	/* Access validation occurs at prefetch time with IOTLB */
++	if (vq->iotlb)
++		return 1;
+ 
+ 	return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
+ }
+diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
+index a493e99bed21..81a84b3c1c50 100644
+--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
+@@ -365,7 +365,7 @@ void xenbus_dev_queue_reply(struct xb_req_data *req)
+ 			if (WARN_ON(rc))
+ 				goto out;
+ 		}
+-	} else if (req->msg.type == XS_TRANSACTION_END) {
++	} else if (req->type == XS_TRANSACTION_END) {
+ 		trans = xenbus_get_transaction(u, req->msg.tx_id);
+ 		if (WARN_ON(!trans))
+ 			goto out;
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index aa720cc44509..b9d93fd532a9 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -191,8 +191,9 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
+ 	if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
+ 		p->max_search = sbi->max_victim_search;
+ 
+-	/* let's select beginning hot/small space first */
+-	if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
++	/* let's select beginning hot/small space first in no_heap mode*/
++	if (test_opt(sbi, NOHEAP) &&
++		(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
+ 		p->offset = 0;
+ 	else
+ 		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index b16a8e6625aa..205b0d934c44 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -2164,7 +2164,8 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
+ 	if (sbi->segs_per_sec != 1)
+ 		return CURSEG_I(sbi, type)->segno;
+ 
+-	if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
++	if (test_opt(sbi, NOHEAP) &&
++		(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
+ 		return 0;
+ 
+ 	if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index b9a254dcc0e7..d508c7844681 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -138,10 +138,14 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ 
+ 	/*
+ 	 * page based offset in vm_pgoff could be sufficiently large to
+-	 * overflow a (l)off_t when converted to byte offset.
++	 * overflow a loff_t when converted to byte offset.  This can
++	 * only happen on architectures where sizeof(loff_t) ==
++	 * sizeof(unsigned long).  So, only check in those instances.
+ 	 */
+-	if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
+-		return -EINVAL;
++	if (sizeof(unsigned long) == sizeof(loff_t)) {
++		if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
++			return -EINVAL;
++	}
+ 
+ 	/* must be huge page aligned */
+ 	if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
+diff --git a/fs/namei.c b/fs/namei.c
+index cafa365eeb70..b61d6aa9279d 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -222,9 +222,10 @@ getname_kernel(const char * filename)
+ 	if (len <= EMBEDDED_NAME_MAX) {
+ 		result->name = (char *)result->iname;
+ 	} else if (len <= PATH_MAX) {
++		const size_t size = offsetof(struct filename, iname[1]);
+ 		struct filename *tmp;
+ 
+-		tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
++		tmp = kmalloc(size, GFP_KERNEL);
+ 		if (unlikely(!tmp)) {
+ 			__putname(result);
+ 			return ERR_PTR(-ENOMEM);
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index a0bed2b2004d..7fce5c3540ce 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -32,6 +32,7 @@
+  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  */
++#include <linux/fs_struct.h>
+ #include <linux/file.h>
+ #include <linux/falloc.h>
+ #include <linux/slab.h>
+@@ -252,11 +253,13 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru
+ 		 * Note: create modes (UNCHECKED,GUARDED...) are the same
+ 		 * in NFSv4 as in v3 except EXCLUSIVE4_1.
+ 		 */
++		current->fs->umask = open->op_umask;
+ 		status = do_nfsd_create(rqstp, current_fh, open->op_fname.data,
+ 					open->op_fname.len, &open->op_iattr,
+ 					*resfh, open->op_createmode,
+ 					(u32 *)open->op_verf.data,
+ 					&open->op_truncate, &open->op_created);
++		current->fs->umask = 0;
+ 
+ 		if (!status && open->op_label.len)
+ 			nfsd4_security_inode_setsecctx(*resfh, &open->op_label, open->op_bmval);
+@@ -603,6 +606,7 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	if (status)
+ 		return status;
+ 
++	current->fs->umask = create->cr_umask;
+ 	switch (create->cr_type) {
+ 	case NF4LNK:
+ 		status = nfsd_symlink(rqstp, &cstate->current_fh,
+@@ -611,20 +615,22 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		break;
+ 
+ 	case NF4BLK:
++		status = nfserr_inval;
+ 		rdev = MKDEV(create->cr_specdata1, create->cr_specdata2);
+ 		if (MAJOR(rdev) != create->cr_specdata1 ||
+ 		    MINOR(rdev) != create->cr_specdata2)
+-			return nfserr_inval;
++			goto out_umask;
+ 		status = nfsd_create(rqstp, &cstate->current_fh,
+ 				     create->cr_name, create->cr_namelen,
+ 				     &create->cr_iattr, S_IFBLK, rdev, &resfh);
+ 		break;
+ 
+ 	case NF4CHR:
++		status = nfserr_inval;
+ 		rdev = MKDEV(create->cr_specdata1, create->cr_specdata2);
+ 		if (MAJOR(rdev) != create->cr_specdata1 ||
+ 		    MINOR(rdev) != create->cr_specdata2)
+-			return nfserr_inval;
++			goto out_umask;
+ 		status = nfsd_create(rqstp, &cstate->current_fh,
+ 				     create->cr_name, create->cr_namelen,
+ 				     &create->cr_iattr,S_IFCHR, rdev, &resfh);
+@@ -668,6 +674,8 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 	fh_dup2(&cstate->current_fh, &resfh);
+ out:
+ 	fh_put(&resfh);
++out_umask:
++	current->fs->umask = 0;
+ 	return status;
+ }
+ 
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index e502fd16246b..45f0f0500ee4 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -33,7 +33,6 @@
+  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+  */
+ 
+-#include <linux/fs_struct.h>
+ #include <linux/file.h>
+ #include <linux/slab.h>
+ #include <linux/namei.h>
+@@ -682,7 +681,7 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
+ 
+ 	status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr,
+ 				    &create->cr_acl, &create->cr_label,
+-				    &current->fs->umask);
++				    &create->cr_umask);
+ 	if (status)
+ 		goto out;
+ 
+@@ -927,7 +926,6 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
+ 	case NFS4_OPEN_NOCREATE:
+ 		break;
+ 	case NFS4_OPEN_CREATE:
+-		current->fs->umask = 0;
+ 		READ_BUF(4);
+ 		open->op_createmode = be32_to_cpup(p++);
+ 		switch (open->op_createmode) {
+@@ -935,7 +933,7 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
+ 		case NFS4_CREATE_GUARDED:
+ 			status = nfsd4_decode_fattr(argp, open->op_bmval,
+ 				&open->op_iattr, &open->op_acl, &open->op_label,
+-				&current->fs->umask);
++				&open->op_umask);
+ 			if (status)
+ 				goto out;
+ 			break;
+@@ -950,7 +948,7 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
+ 			COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
+ 			status = nfsd4_decode_fattr(argp, open->op_bmval,
+ 				&open->op_iattr, &open->op_acl, &open->op_label,
+-				&current->fs->umask);
++				&open->op_umask);
+ 			if (status)
+ 				goto out;
+ 			break;
+diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
+index bc29511b6405..f47c392cbd57 100644
+--- a/fs/nfsd/xdr4.h
++++ b/fs/nfsd/xdr4.h
+@@ -118,6 +118,7 @@ struct nfsd4_create {
+ 	} u;
+ 	u32		cr_bmval[3];        /* request */
+ 	struct iattr	cr_iattr;           /* request */
++	int		cr_umask;           /* request */
+ 	struct nfsd4_change_info  cr_cinfo; /* response */
+ 	struct nfs4_acl *cr_acl;
+ 	struct xdr_netobj cr_label;
+@@ -228,6 +229,7 @@ struct nfsd4_open {
+ 	u32		op_why_no_deleg;    /* response - DELEG_NONE_EXT only */
+ 	u32		op_create;     	    /* request */
+ 	u32		op_createmode;      /* request */
++	int		op_umask;           /* request */
+ 	u32		op_bmval[3];        /* request */
+ 	struct iattr	op_iattr;           /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
+ 	nfs4_verifier	op_verf __attribute__((aligned(32)));
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index 3b1bd469accd..1d75b2e96c96 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -118,13 +118,10 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
+ 			 */
+ 			if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) ||
+ 			    (!ovl_verify_lower(dentry->d_sb) &&
+-			     (is_dir || lowerstat.nlink == 1)))
++			     (is_dir || lowerstat.nlink == 1))) {
+ 				stat->ino = lowerstat.ino;
+-
+-			if (samefs)
+-				WARN_ON_ONCE(stat->dev != lowerstat.dev);
+-			else
+ 				stat->dev = ovl_get_pseudo_dev(dentry);
++			}
+ 		}
+ 		if (samefs) {
+ 			/*
+@@ -459,9 +456,20 @@ static inline void ovl_lockdep_annotate_inode_mutex_key(struct inode *inode)
+ #endif
+ }
+ 
+-static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev)
++static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
++			   unsigned long ino)
+ {
+-	inode->i_ino = get_next_ino();
++	/*
++	 * When NFS export is enabled and d_ino is consistent with st_ino
++	 * (samefs), set the same value to i_ino, because nfsd readdirplus
++	 * compares d_ino values to i_ino values of child entries. When called
++	 * from ovl_new_inode(), ino arg is 0, so i_ino will be updated to real
++	 * upper inode i_ino on ovl_inode_init() or ovl_inode_update().
++	 */
++	if (inode->i_sb->s_export_op && ovl_same_sb(inode->i_sb))
++		inode->i_ino = ino;
++	else
++		inode->i_ino = get_next_ino();
+ 	inode->i_mode = mode;
+ 	inode->i_flags |= S_NOCMTIME;
+ #ifdef CONFIG_FS_POSIX_ACL
+@@ -597,7 +605,7 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev)
+ 
+ 	inode = new_inode(sb);
+ 	if (inode)
+-		ovl_fill_inode(inode, mode, rdev);
++		ovl_fill_inode(inode, mode, rdev, 0);
+ 
+ 	return inode;
+ }
+@@ -710,6 +718,7 @@ struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry,
+ 	struct inode *inode;
+ 	bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry, index);
+ 	bool is_dir;
++	unsigned long ino = 0;
+ 
+ 	if (!realinode)
+ 		realinode = d_inode(lowerdentry);
+@@ -748,13 +757,14 @@ struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry,
+ 		if (!is_dir)
+ 			nlink = ovl_get_nlink(lowerdentry, upperdentry, nlink);
+ 		set_nlink(inode, nlink);
++		ino = key->i_ino;
+ 	} else {
+ 		/* Lower hardlink that will be broken on copy up */
+ 		inode = new_inode(sb);
+ 		if (!inode)
+ 			goto out_nomem;
+ 	}
+-	ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev);
++	ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev, ino);
+ 	ovl_inode_init(inode, upperdentry, lowerdentry);
+ 
+ 	if (upperdentry && ovl_is_impuredir(upperdentry))
+diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
+index 70fcfcc684cc..35418317ecf2 100644
+--- a/fs/overlayfs/namei.c
++++ b/fs/overlayfs/namei.c
+@@ -56,6 +56,15 @@ static int ovl_check_redirect(struct dentry *dentry, struct ovl_lookup_data *d,
+ 			if (s == next)
+ 				goto invalid;
+ 		}
++		/*
++		 * One of the ancestor path elements in an absolute path
++		 * lookup in ovl_lookup_layer() could have been opaque and
++		 * that will stop further lookup in lower layers (d->stop=true)
++		 * But we have found an absolute redirect in decendant path
++		 * element and that should force continue lookup in lower
++		 * layers (reset d->stop).
++		 */
++		d->stop = false;
+ 	} else {
+ 		if (strchr(buf, '/') != NULL)
+ 			goto invalid;
+@@ -815,7 +824,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+ 		.is_dir = false,
+ 		.opaque = false,
+ 		.stop = false,
+-		.last = !poe->numlower,
++		.last = ofs->config.redirect_follow ? false : !poe->numlower,
+ 		.redirect = NULL,
+ 	};
+ 
+@@ -873,7 +882,11 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
+ 	for (i = 0; !d.stop && i < poe->numlower; i++) {
+ 		struct ovl_path lower = poe->lowerstack[i];
+ 
+-		d.last = i == poe->numlower - 1;
++		if (!ofs->config.redirect_follow)
++			d.last = i == poe->numlower - 1;
++		else
++			d.last = lower.layer->idx == roe->numlower;
++
+ 		err = ovl_lookup_layer(lower.dentry, &d, &this);
+ 		if (err)
+ 			goto out_put;
+diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
+index 930784a26623..493f9b76fbf6 100644
+--- a/fs/overlayfs/util.c
++++ b/fs/overlayfs/util.c
+@@ -279,12 +279,16 @@ void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect)
+ void ovl_inode_init(struct inode *inode, struct dentry *upperdentry,
+ 		    struct dentry *lowerdentry)
+ {
++	struct inode *realinode = d_inode(upperdentry ?: lowerdentry);
++
+ 	if (upperdentry)
+ 		OVL_I(inode)->__upperdentry = upperdentry;
+ 	if (lowerdentry)
+ 		OVL_I(inode)->lower = igrab(d_inode(lowerdentry));
+ 
+-	ovl_copyattr(d_inode(upperdentry ?: lowerdentry), inode);
++	ovl_copyattr(realinode, inode);
++	if (!inode->i_ino)
++		inode->i_ino = realinode->i_ino;
+ }
+ 
+ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
+@@ -299,6 +303,8 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
+ 	smp_wmb();
+ 	OVL_I(inode)->__upperdentry = upperdentry;
+ 	if (inode_unhashed(inode)) {
++		if (!inode->i_ino)
++			inode->i_ino = upperinode->i_ino;
+ 		inode->i_private = upperinode;
+ 		__insert_inode_hash(inode, (unsigned long) upperinode);
+ 	}
+diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
+index 53f32022fabe..7f0bda760a58 100644
+--- a/include/media/v4l2-dev.h
++++ b/include/media/v4l2-dev.h
+@@ -33,13 +33,13 @@
+  */
+ enum vfl_devnode_type {
+ 	VFL_TYPE_GRABBER	= 0,
+-	VFL_TYPE_VBI		= 1,
+-	VFL_TYPE_RADIO		= 2,
+-	VFL_TYPE_SUBDEV		= 3,
+-	VFL_TYPE_SDR		= 4,
+-	VFL_TYPE_TOUCH		= 5,
++	VFL_TYPE_VBI,
++	VFL_TYPE_RADIO,
++	VFL_TYPE_SUBDEV,
++	VFL_TYPE_SDR,
++	VFL_TYPE_TOUCH,
++	VFL_TYPE_MAX /* Shall be the last one */
+ };
+-#define VFL_TYPE_MAX VFL_TYPE_TOUCH
+ 
+ /**
+  * enum  vfl_direction - Identifies if a &struct video_device corresponds
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 95ccc1eef558..b619a190ff12 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -895,7 +895,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+ 				     u16 conn_timeout);
+ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ 				u8 dst_type, u8 sec_level, u16 conn_timeout,
+-				u8 role);
++				u8 role, bdaddr_t *direct_rpa);
+ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+ 				 u8 sec_level, u8 auth_type);
+ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+diff --git a/include/net/slhc_vj.h b/include/net/slhc_vj.h
+index 8716d5942b65..8fcf8908a694 100644
+--- a/include/net/slhc_vj.h
++++ b/include/net/slhc_vj.h
+@@ -127,6 +127,7 @@ typedef __u32 int32;
+  */
+ struct cstate {
+ 	byte_t	cs_this;	/* connection id number (xmit) */
++	bool	initialized;	/* true if initialized */
+ 	struct cstate *next;	/* next in ring (xmit) */
+ 	struct iphdr cs_ip;	/* ip/tcp hdr from most recent packet */
+ 	struct tcphdr cs_tcp;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 709a55b9ad97..b32bc0698a2a 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4123,6 +4123,9 @@ static void _free_event(struct perf_event *event)
+ 	if (event->ctx)
+ 		put_ctx(event->ctx);
+ 
++	if (event->hw.target)
++		put_task_struct(event->hw.target);
++
+ 	exclusive_event_destroy(event);
+ 	module_put(event->pmu->module);
+ 
+@@ -9488,6 +9491,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ 		 * and we cannot use the ctx information because we need the
+ 		 * pmu before we get a ctx.
+ 		 */
++		get_task_struct(task);
+ 		event->hw.target = task;
+ 	}
+ 
+@@ -9603,6 +9607,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ 		perf_detach_cgroup(event);
+ 	if (event->ns)
+ 		put_pid_ns(event->ns);
++	if (event->hw.target)
++		put_task_struct(event->hw.target);
+ 	kfree(event);
+ 
+ 	return ERR_PTR(err);
+diff --git a/lib/bitmap.c b/lib/bitmap.c
+index 9e498c77ed0e..a42eff7e8c48 100644
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -607,7 +607,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+ 		/* if no digit is after '-', it's wrong*/
+ 		if (at_start && in_range)
+ 			return -EINVAL;
+-		if (!(a <= b) || !(used_size <= group_size))
++		if (!(a <= b) || group_size == 0 || !(used_size <= group_size))
+ 			return -EINVAL;
+ 		if (b >= nmaskbits)
+ 			return -ERANGE;
+diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
+index b3f235baa05d..413367cf569e 100644
+--- a/lib/test_bitmap.c
++++ b/lib/test_bitmap.c
+@@ -255,6 +255,10 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
+ 	{-EINVAL, "-1",	NULL, 8, 0},
+ 	{-EINVAL, "-0",	NULL, 8, 0},
+ 	{-EINVAL, "10-1", NULL, 8, 0},
++	{-EINVAL, "0-31:", NULL, 8, 0},
++	{-EINVAL, "0-31:0", NULL, 8, 0},
++	{-EINVAL, "0-31:0/0", NULL, 8, 0},
++	{-EINVAL, "0-31:1/0", NULL, 8, 0},
+ 	{-EINVAL, "0-31:10/1", NULL, 8, 0},
+ };
+ 
+diff --git a/mm/gup.c b/mm/gup.c
+index 6afae32571ca..8f3a06408e28 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1806,9 +1806,12 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ 	len = (unsigned long) nr_pages << PAGE_SHIFT;
+ 	end = start + len;
+ 
++	if (nr_pages <= 0)
++		return 0;
++
+ 	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+ 					(void __user *)start, len)))
+-		return 0;
++		return -EFAULT;
+ 
+ 	if (gup_fast_permitted(start, nr_pages, write)) {
+ 		local_irq_disable();
+diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
+index 5c8e2abeaa15..0f44759486e2 100644
+--- a/mm/gup_benchmark.c
++++ b/mm/gup_benchmark.c
+@@ -23,7 +23,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
+ 	struct page **pages;
+ 
+ 	nr_pages = gup->size / PAGE_SIZE;
+-	pages = kvmalloc(sizeof(void *) * nr_pages, GFP_KERNEL);
++	pages = kvzalloc(sizeof(void *) * nr_pages, GFP_KERNEL);
+ 	if (!pages)
+ 		return -ENOMEM;
+ 
+@@ -41,6 +41,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
+ 		}
+ 
+ 		nr = get_user_pages_fast(addr, nr, gup->flags & 1, pages + i);
++		if (nr <= 0)
++			break;
+ 		i += nr;
+ 	}
+ 	end_time = ktime_get();
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index a9682534c377..45ff5dc124cc 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -749,18 +749,31 @@ static bool conn_use_rpa(struct hci_conn *conn)
+ }
+ 
+ static void hci_req_add_le_create_conn(struct hci_request *req,
+-				       struct hci_conn *conn)
++				       struct hci_conn *conn,
++				       bdaddr_t *direct_rpa)
+ {
+ 	struct hci_cp_le_create_conn cp;
+ 	struct hci_dev *hdev = conn->hdev;
+ 	u8 own_addr_type;
+ 
+-	/* Update random address, but set require_privacy to false so
+-	 * that we never connect with an non-resolvable address.
++	/* If direct address was provided we use it instead of current
++	 * address.
+ 	 */
+-	if (hci_update_random_address(req, false, conn_use_rpa(conn),
+-				      &own_addr_type))
+-		return;
++	if (direct_rpa) {
++		if (bacmp(&req->hdev->random_addr, direct_rpa))
++			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
++								direct_rpa);
++
++		/* direct address is always RPA */
++		own_addr_type = ADDR_LE_DEV_RANDOM;
++	} else {
++		/* Update random address, but set require_privacy to false so
++		 * that we never connect with an non-resolvable address.
++		 */
++		if (hci_update_random_address(req, false, conn_use_rpa(conn),
++					      &own_addr_type))
++			return;
++	}
+ 
+ 	memset(&cp, 0, sizeof(cp));
+ 
+@@ -825,7 +838,7 @@ static void hci_req_directed_advertising(struct hci_request *req,
+ 
+ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ 				u8 dst_type, u8 sec_level, u16 conn_timeout,
+-				u8 role)
++				u8 role, bdaddr_t *direct_rpa)
+ {
+ 	struct hci_conn_params *params;
+ 	struct hci_conn *conn;
+@@ -940,7 +953,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+ 		hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
+ 	}
+ 
+-	hci_req_add_le_create_conn(&req, conn);
++	hci_req_add_le_create_conn(&req, conn, direct_rpa);
+ 
+ create_conn:
+ 	err = hci_req_run(&req, create_le_conn_complete);
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index cd3bbb766c24..139707cd9d35 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -4648,7 +4648,8 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
+ /* This function requires the caller holds hdev->lock */
+ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
+ 					      bdaddr_t *addr,
+-					      u8 addr_type, u8 adv_type)
++					      u8 addr_type, u8 adv_type,
++					      bdaddr_t *direct_rpa)
+ {
+ 	struct hci_conn *conn;
+ 	struct hci_conn_params *params;
+@@ -4699,7 +4700,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
+ 	}
+ 
+ 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
+-			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
++			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
++			      direct_rpa);
+ 	if (!IS_ERR(conn)) {
+ 		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
+ 		 * by higher layer that tried to connect, if no then
+@@ -4808,8 +4810,13 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
+ 		bdaddr_type = irk->addr_type;
+ 	}
+ 
+-	/* Check if we have been requested to connect to this device */
+-	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
++	/* Check if we have been requested to connect to this device.
++	 *
++	 * direct_addr is set only for directed advertising reports (it is NULL
++	 * for advertising reports) and is already verified to be RPA above.
++	 */
++	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
++								direct_addr);
+ 	if (conn && type == LE_ADV_IND) {
+ 		/* Store report for later inclusion by
+ 		 * mgmt_device_connected
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index fc6615d59165..9b7907ebfa01 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -7156,7 +7156,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+ 			hcon = hci_connect_le(hdev, dst, dst_type,
+ 					      chan->sec_level,
+ 					      HCI_LE_CONN_TIMEOUT,
+-					      HCI_ROLE_SLAVE);
++					      HCI_ROLE_SLAVE, NULL);
+ 		else
+ 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
+ 						   chan->sec_level,
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 0901de42ed85..586a008b1642 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -778,8 +778,14 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
+ 		    tunnel->encap.type == TUNNEL_ENCAP_NONE) {
+ 			dev->features |= NETIF_F_GSO_SOFTWARE;
+ 			dev->hw_features |= NETIF_F_GSO_SOFTWARE;
++		} else {
++			dev->features &= ~NETIF_F_GSO_SOFTWARE;
++			dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
+ 		}
+ 		dev->features |= NETIF_F_LLTX;
++	} else {
++		dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
++		dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
+ 	}
+ }
+ 
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 14b67dfacc4b..0fbd3ee26165 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -335,26 +335,6 @@ int l2tp_session_register(struct l2tp_session *session,
+ }
+ EXPORT_SYMBOL_GPL(l2tp_session_register);
+ 
+-/* Lookup a tunnel by id
+- */
+-struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id)
+-{
+-	struct l2tp_tunnel *tunnel;
+-	struct l2tp_net *pn = l2tp_pernet(net);
+-
+-	rcu_read_lock_bh();
+-	list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+-		if (tunnel->tunnel_id == tunnel_id) {
+-			rcu_read_unlock_bh();
+-			return tunnel;
+-		}
+-	}
+-	rcu_read_unlock_bh();
+-
+-	return NULL;
+-}
+-EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
+-
+ struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth)
+ {
+ 	struct l2tp_net *pn = l2tp_pernet(net);
+@@ -1436,74 +1416,11 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
+ {
+ 	struct l2tp_tunnel *tunnel = NULL;
+ 	int err;
+-	struct socket *sock = NULL;
+-	struct sock *sk = NULL;
+-	struct l2tp_net *pn;
+ 	enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
+ 
+-	/* Get the tunnel socket from the fd, which was opened by
+-	 * the userspace L2TP daemon. If not specified, create a
+-	 * kernel socket.
+-	 */
+-	if (fd < 0) {
+-		err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
+-				cfg, &sock);
+-		if (err < 0)
+-			goto err;
+-	} else {
+-		sock = sockfd_lookup(fd, &err);
+-		if (!sock) {
+-			pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
+-			       tunnel_id, fd, err);
+-			err = -EBADF;
+-			goto err;
+-		}
+-
+-		/* Reject namespace mismatches */
+-		if (!net_eq(sock_net(sock->sk), net)) {
+-			pr_err("tunl %u: netns mismatch\n", tunnel_id);
+-			err = -EINVAL;
+-			goto err;
+-		}
+-	}
+-
+-	sk = sock->sk;
+-
+ 	if (cfg != NULL)
+ 		encap = cfg->encap;
+ 
+-	/* Quick sanity checks */
+-	err = -EPROTONOSUPPORT;
+-	if (sk->sk_type != SOCK_DGRAM) {
+-		pr_debug("tunl %hu: fd %d wrong socket type\n",
+-			 tunnel_id, fd);
+-		goto err;
+-	}
+-	switch (encap) {
+-	case L2TP_ENCAPTYPE_UDP:
+-		if (sk->sk_protocol != IPPROTO_UDP) {
+-			pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+-			       tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
+-			goto err;
+-		}
+-		break;
+-	case L2TP_ENCAPTYPE_IP:
+-		if (sk->sk_protocol != IPPROTO_L2TP) {
+-			pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+-			       tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
+-			goto err;
+-		}
+-		break;
+-	}
+-
+-	/* Check if this socket has already been prepped */
+-	tunnel = l2tp_tunnel(sk);
+-	if (tunnel != NULL) {
+-		/* This socket has already been prepped */
+-		err = -EBUSY;
+-		goto err;
+-	}
+-
+ 	tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
+ 	if (tunnel == NULL) {
+ 		err = -ENOMEM;
+@@ -1520,72 +1437,126 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
+ 	rwlock_init(&tunnel->hlist_lock);
+ 	tunnel->acpt_newsess = true;
+ 
+-	/* The net we belong to */
+-	tunnel->l2tp_net = net;
+-	pn = l2tp_pernet(net);
+-
+ 	if (cfg != NULL)
+ 		tunnel->debug = cfg->debug;
+ 
+-	/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
+ 	tunnel->encap = encap;
+-	if (encap == L2TP_ENCAPTYPE_UDP) {
+-		struct udp_tunnel_sock_cfg udp_cfg = { };
+-
+-		udp_cfg.sk_user_data = tunnel;
+-		udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
+-		udp_cfg.encap_rcv = l2tp_udp_encap_recv;
+-		udp_cfg.encap_destroy = l2tp_udp_encap_destroy;
+-
+-		setup_udp_tunnel_sock(net, sock, &udp_cfg);
+-	} else {
+-		sk->sk_user_data = tunnel;
+-	}
+ 
+-	/* Bump the reference count. The tunnel context is deleted
+-	 * only when this drops to zero. A reference is also held on
+-	 * the tunnel socket to ensure that it is not released while
+-	 * the tunnel is extant. Must be done before sk_destruct is
+-	 * set.
+-	 */
+ 	refcount_set(&tunnel->ref_count, 1);
+-	sock_hold(sk);
+-	tunnel->sock = sk;
+ 	tunnel->fd = fd;
+ 
+-	/* Hook on the tunnel socket destructor so that we can cleanup
+-	 * if the tunnel socket goes away.
+-	 */
+-	tunnel->old_sk_destruct = sk->sk_destruct;
+-	sk->sk_destruct = &l2tp_tunnel_destruct;
+-	lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
+-
+-	sk->sk_allocation = GFP_ATOMIC;
+-
+ 	/* Init delete workqueue struct */
+ 	INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
+ 
+-	/* Add tunnel to our list */
+ 	INIT_LIST_HEAD(&tunnel->list);
+-	spin_lock_bh(&pn->l2tp_tunnel_list_lock);
+-	list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
+-	spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
+ 
+ 	err = 0;
+ err:
+ 	if (tunnelp)
+ 		*tunnelp = tunnel;
+ 
+-	/* If tunnel's socket was created by the kernel, it doesn't
+-	 *  have a file.
+-	 */
+-	if (sock && sock->file)
+-		sockfd_put(sock);
+-
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
+ 
++static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
++				enum l2tp_encap_type encap)
++{
++	if (!net_eq(sock_net(sk), net))
++		return -EINVAL;
++
++	if (sk->sk_type != SOCK_DGRAM)
++		return -EPROTONOSUPPORT;
++
++	if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
++	    (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
++		return -EPROTONOSUPPORT;
++
++	if (sk->sk_user_data)
++		return -EBUSY;
++
++	return 0;
++}
++
++int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
++			 struct l2tp_tunnel_cfg *cfg)
++{
++	struct l2tp_tunnel *tunnel_walk;
++	struct l2tp_net *pn;
++	struct socket *sock;
++	struct sock *sk;
++	int ret;
++
++	if (tunnel->fd < 0) {
++		ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
++					      tunnel->peer_tunnel_id, cfg,
++					      &sock);
++		if (ret < 0)
++			goto err;
++	} else {
++		sock = sockfd_lookup(tunnel->fd, &ret);
++		if (!sock)
++			goto err;
++
++		ret = l2tp_validate_socket(sock->sk, net, tunnel->encap);
++		if (ret < 0)
++			goto err_sock;
++	}
++
++	sk = sock->sk;
++
++	sock_hold(sk);
++	tunnel->sock = sk;
++	tunnel->l2tp_net = net;
++
++	pn = l2tp_pernet(net);
++
++	spin_lock_bh(&pn->l2tp_tunnel_list_lock);
++	list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
++		if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
++			spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
++
++			ret = -EEXIST;
++			goto err_sock;
++		}
++	}
++	list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
++	spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
++
++	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
++		struct udp_tunnel_sock_cfg udp_cfg = {
++			.sk_user_data = tunnel,
++			.encap_type = UDP_ENCAP_L2TPINUDP,
++			.encap_rcv = l2tp_udp_encap_recv,
++			.encap_destroy = l2tp_udp_encap_destroy,
++		};
++
++		setup_udp_tunnel_sock(net, sock, &udp_cfg);
++	} else {
++		sk->sk_user_data = tunnel;
++	}
++
++	tunnel->old_sk_destruct = sk->sk_destruct;
++	sk->sk_destruct = &l2tp_tunnel_destruct;
++	lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
++				   "l2tp_sock");
++	sk->sk_allocation = GFP_ATOMIC;
++
++	if (tunnel->fd >= 0)
++		sockfd_put(sock);
++
++	return 0;
++
++err_sock:
++	if (tunnel->fd < 0)
++		sock_release(sock);
++	else
++		sockfd_put(sock);
++err:
++	return ret;
++}
++EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
++
+ /* This function is used by the netlink TUNNEL_DELETE command.
+  */
+ void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index 2718d0b284d0..ba33cbec71eb 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -220,12 +220,14 @@ struct l2tp_session *l2tp_session_get(const struct net *net,
+ struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth);
+ struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
+ 						const char *ifname);
+-struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id);
+ struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth);
+ 
+ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
+ 		       u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
+ 		       struct l2tp_tunnel **tunnelp);
++int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
++			 struct l2tp_tunnel_cfg *cfg);
++
+ void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+ void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+ struct l2tp_session *l2tp_session_create(int priv_size,
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index e7ea9c4b89ff..b05dbd9ffcb2 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -236,12 +236,6 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
+ 	if (info->attrs[L2TP_ATTR_DEBUG])
+ 		cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
+ 
+-	tunnel = l2tp_tunnel_find(net, tunnel_id);
+-	if (tunnel != NULL) {
+-		ret = -EEXIST;
+-		goto out;
+-	}
+-
+ 	ret = -EINVAL;
+ 	switch (cfg.encap) {
+ 	case L2TP_ENCAPTYPE_UDP:
+@@ -251,9 +245,19 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
+ 		break;
+ 	}
+ 
+-	if (ret >= 0)
+-		ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
+-					 tunnel, L2TP_CMD_TUNNEL_CREATE);
++	if (ret < 0)
++		goto out;
++
++	l2tp_tunnel_inc_refcount(tunnel);
++	ret = l2tp_tunnel_register(tunnel, net, &cfg);
++	if (ret < 0) {
++		kfree(tunnel);
++		goto out;
++	}
++	ret = l2tp_tunnel_notify(&l2tp_nl_family, info, tunnel,
++				 L2TP_CMD_TUNNEL_CREATE);
++	l2tp_tunnel_dec_refcount(tunnel);
++
+ out:
+ 	return ret;
+ }
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 3b02f24ea9ec..3d7887cc599b 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -698,6 +698,15 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 			error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel);
+ 			if (error < 0)
+ 				goto end;
++
++			l2tp_tunnel_inc_refcount(tunnel);
++			error = l2tp_tunnel_register(tunnel, sock_net(sk),
++						     &tcfg);
++			if (error < 0) {
++				kfree(tunnel);
++				goto end;
++			}
++			drop_tunnel = true;
+ 		}
+ 	} else {
+ 		/* Error if we can't find the tunnel */
+diff --git a/net/rds/send.c b/net/rds/send.c
+index b1b0022b8370..85734e5a018e 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (c) 2006 Oracle.  All rights reserved.
++ * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
+  *
+  * This software is available to you under a choice of one of two
+  * licenses.  You may choose to be licensed under the terms of the GNU
+@@ -997,10 +997,15 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
+ 	if (conn->c_npaths == 0 && hash != 0) {
+ 		rds_send_ping(conn, 0);
+ 
+-		if (conn->c_npaths == 0) {
+-			wait_event_interruptible(conn->c_hs_waitq,
+-						 (conn->c_npaths != 0));
+-		}
++		/* The underlying connection is not up yet.  Need to wait
++		 * until it is up to be sure that the non-zero c_path can be
++		 * used.  But if we are interrupted, we have to use the zero
++		 * c_path in case the connection ends up being non-MP capable.
++		 */
++		if (conn->c_npaths == 0)
++			if (wait_event_interruptible(conn->c_hs_waitq,
++						     conn->c_npaths != 0))
++				hash = 0;
+ 		if (conn->c_npaths == 1)
+ 			hash = 0;
+ 	}
+diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
+index 12649c9fedab..8654494b4d0a 100644
+--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
++++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
+@@ -237,9 +237,6 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
+ 
+ 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+ 
+-	err = crypto_ahash_init(req);
+-	if (err)
+-		goto out;
+ 	err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
+ 	if (err)
+ 		goto out;
+diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
+index a9428daa69f3..b28c55447e63 100644
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -1189,9 +1189,7 @@ static int seq_ns_level_show(struct seq_file *seq, void *v)
+ static int seq_ns_name_show(struct seq_file *seq, void *v)
+ {
+ 	struct aa_label *label = begin_current_label_crit_section();
+-
+-	seq_printf(seq, "%s\n", aa_ns_name(labels_ns(label),
+-					   labels_ns(label), true));
++	seq_printf(seq, "%s\n", labels_ns(label)->base.name);
+ 	end_current_label_crit_section(label);
+ 
+ 	return 0;
+diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
+index 4ac095118717..2ebc00a579fd 100644
+--- a/security/apparmor/include/audit.h
++++ b/security/apparmor/include/audit.h
+@@ -126,6 +126,10 @@ struct apparmor_audit_data {
+ 					const char *target;
+ 					kuid_t ouid;
+ 				} fs;
++				struct {
++					int rlim;
++					unsigned long max;
++				} rlim;
+ 				int signal;
+ 			};
+ 		};
+@@ -134,10 +138,6 @@ struct apparmor_audit_data {
+ 			const char *ns;
+ 			long pos;
+ 		} iface;
+-		struct {
+-			int rlim;
+-			unsigned long max;
+-		} rlim;
+ 		struct {
+ 			const char *src_name;
+ 			const char *type;
+diff --git a/security/apparmor/include/sig_names.h b/security/apparmor/include/sig_names.h
+index 92e62fe95292..5ca47c50dfa7 100644
+--- a/security/apparmor/include/sig_names.h
++++ b/security/apparmor/include/sig_names.h
+@@ -2,6 +2,8 @@
+ 
+ #define SIGUNKNOWN 0
+ #define MAXMAPPED_SIG 35
++#define MAXMAPPED_SIGNAME (MAXMAPPED_SIG + 1)
++
+ /* provide a mapping of arch signal to internal signal # for mediation
+  * those that are always an alias SIGCLD for SIGCLHD and SIGPOLL for SIGIO
+  * map to the same entry those that may/or may not get a separate entry
+@@ -56,7 +58,7 @@ static const int sig_map[MAXMAPPED_SIG] = {
+ };
+ 
+ /* this table is ordered post sig_map[sig] mapping */
+-static const char *const sig_names[MAXMAPPED_SIG + 1] = {
++static const char *const sig_names[MAXMAPPED_SIGNAME] = {
+ 	"unknown",
+ 	"hup",
+ 	"int",
+diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
+index b40678f3c1d5..586facd35f7c 100644
+--- a/security/apparmor/ipc.c
++++ b/security/apparmor/ipc.c
+@@ -174,7 +174,7 @@ static void audit_signal_cb(struct audit_buffer *ab, void *va)
+ 			audit_signal_mask(ab, aad(sa)->denied);
+ 		}
+ 	}
+-	if (aad(sa)->signal < MAXMAPPED_SIG)
++	if (aad(sa)->signal < MAXMAPPED_SIGNAME)
+ 		audit_log_format(ab, " signal=%s", sig_names[aad(sa)->signal]);
+ 	else
+ 		audit_log_format(ab, " signal=rtmin+%d",
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index aa1593ce551d..f9157aed1289 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -1378,6 +1378,7 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
+ 	intel_pt_clear_tx_flags(decoder);
+ 	decoder->have_tma = false;
+ 	decoder->cbr = 0;
++	decoder->timestamp_insn_cnt = 0;
+ 	decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
+ 	decoder->overflow = true;
+ 	return -EOVERFLOW;
+@@ -1616,6 +1617,7 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
+ 		case INTEL_PT_PWRX:
+ 			intel_pt_log("ERROR: Missing TIP after FUP\n");
+ 			decoder->pkt_state = INTEL_PT_STATE_ERR3;
++			decoder->pkt_step = 0;
+ 			return -ENOENT;
+ 
+ 		case INTEL_PT_OVF:
+@@ -2390,14 +2392,6 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
+ 	return &decoder->state;
+ }
+ 
+-static bool intel_pt_at_psb(unsigned char *buf, size_t len)
+-{
+-	if (len < INTEL_PT_PSB_LEN)
+-		return false;
+-	return memmem(buf, INTEL_PT_PSB_LEN, INTEL_PT_PSB_STR,
+-		      INTEL_PT_PSB_LEN);
+-}
+-
+ /**
+  * intel_pt_next_psb - move buffer pointer to the start of the next PSB packet.
+  * @buf: pointer to buffer pointer
+@@ -2486,6 +2480,7 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
+  * @buf: buffer
+  * @len: size of buffer
+  * @tsc: TSC value returned
++ * @rem: returns remaining size when TSC is found
+  *
+  * Find a TSC packet in @buf and return the TSC value.  This function assumes
+  * that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
+@@ -2493,7 +2488,8 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
+  *
+  * Return: %true if TSC is found, false otherwise.
+  */
+-static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
++static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc,
++			      size_t *rem)
+ {
+ 	struct intel_pt_pkt packet;
+ 	int ret;
+@@ -2504,6 +2500,7 @@ static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
+ 			return false;
+ 		if (packet.type == INTEL_PT_TSC) {
+ 			*tsc = packet.payload;
++			*rem = len;
+ 			return true;
+ 		}
+ 		if (packet.type == INTEL_PT_PSBEND)
+@@ -2554,6 +2551,8 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
+  * @len_a: size of first buffer
+  * @buf_b: second buffer
+  * @len_b: size of second buffer
++ * @consecutive: returns true if there is data in buf_b that is consecutive
++ *               to buf_a
+  *
+  * If the trace contains TSC we can look at the last TSC of @buf_a and the
+  * first TSC of @buf_b in order to determine if the buffers overlap, and then
+@@ -2566,33 +2565,41 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
+ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
+ 						size_t len_a,
+ 						unsigned char *buf_b,
+-						size_t len_b)
++						size_t len_b, bool *consecutive)
+ {
+ 	uint64_t tsc_a, tsc_b;
+ 	unsigned char *p;
+-	size_t len;
++	size_t len, rem_a, rem_b;
+ 
+ 	p = intel_pt_last_psb(buf_a, len_a);
+ 	if (!p)
+ 		return buf_b; /* No PSB in buf_a => no overlap */
+ 
+ 	len = len_a - (p - buf_a);
+-	if (!intel_pt_next_tsc(p, len, &tsc_a)) {
++	if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) {
+ 		/* The last PSB+ in buf_a is incomplete, so go back one more */
+ 		len_a -= len;
+ 		p = intel_pt_last_psb(buf_a, len_a);
+ 		if (!p)
+ 			return buf_b; /* No full PSB+ => assume no overlap */
+ 		len = len_a - (p - buf_a);
+-		if (!intel_pt_next_tsc(p, len, &tsc_a))
++		if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a))
+ 			return buf_b; /* No TSC in buf_a => assume no overlap */
+ 	}
+ 
+ 	while (1) {
+ 		/* Ignore PSB+ with no TSC */
+-		if (intel_pt_next_tsc(buf_b, len_b, &tsc_b) &&
+-		    intel_pt_tsc_cmp(tsc_a, tsc_b) < 0)
+-			return buf_b; /* tsc_a < tsc_b => no overlap */
++		if (intel_pt_next_tsc(buf_b, len_b, &tsc_b, &rem_b)) {
++			int cmp = intel_pt_tsc_cmp(tsc_a, tsc_b);
++
++			/* Same TSC, so buffers are consecutive */
++			if (!cmp && rem_b >= rem_a) {
++				*consecutive = true;
++				return buf_b + len_b - (rem_b - rem_a);
++			}
++			if (cmp < 0)
++				return buf_b; /* tsc_a < tsc_b => no overlap */
++		}
+ 
+ 		if (!intel_pt_step_psb(&buf_b, &len_b))
+ 			return buf_b + len_b; /* No PSB in buf_b => no data */
+@@ -2606,6 +2613,8 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
+  * @buf_b: second buffer
+  * @len_b: size of second buffer
+  * @have_tsc: can use TSC packets to detect overlap
++ * @consecutive: returns true if there is data in buf_b that is consecutive
++ *               to buf_a
+  *
+  * When trace samples or snapshots are recorded there is the possibility that
+  * the data overlaps.  Note that, for the purposes of decoding, data is only
+@@ -2616,7 +2625,7 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
+  */
+ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
+ 				     unsigned char *buf_b, size_t len_b,
+-				     bool have_tsc)
++				     bool have_tsc, bool *consecutive)
+ {
+ 	unsigned char *found;
+ 
+@@ -2628,7 +2637,8 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
+ 		return buf_b; /* No overlap */
+ 
+ 	if (have_tsc) {
+-		found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b);
++		found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
++						  consecutive);
+ 		if (found)
+ 			return found;
+ 	}
+@@ -2643,28 +2653,16 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
+ 	}
+ 
+ 	/* Now len_b >= len_a */
+-	if (len_b > len_a) {
+-		/* The leftover buffer 'b' must start at a PSB */
+-		while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
+-			if (!intel_pt_step_psb(&buf_a, &len_a))
+-				return buf_b; /* No overlap */
+-		}
+-	}
+-
+ 	while (1) {
+ 		/* Potential overlap so check the bytes */
+ 		found = memmem(buf_a, len_a, buf_b, len_a);
+-		if (found)
++		if (found) {
++			*consecutive = true;
+ 			return buf_b + len_a;
++		}
+ 
+ 		/* Try again at next PSB in buffer 'a' */
+ 		if (!intel_pt_step_psb(&buf_a, &len_a))
+ 			return buf_b; /* No overlap */
+-
+-		/* The leftover buffer 'b' must start at a PSB */
+-		while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
+-			if (!intel_pt_step_psb(&buf_a, &len_a))
+-				return buf_b; /* No overlap */
+-		}
+ 	}
+ }
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+index 921b22e8ca0e..fc1752d50019 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+@@ -117,7 +117,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder);
+ 
+ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
+ 				     unsigned char *buf_b, size_t len_b,
+-				     bool have_tsc);
++				     bool have_tsc, bool *consecutive);
+ 
+ int intel_pt__strerror(int code, char *buf, size_t buflen);
+ 
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index 3773d9c54f45..0979a6e8b2b7 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -143,6 +143,7 @@ struct intel_pt_queue {
+ 	bool stop;
+ 	bool step_through_buffers;
+ 	bool use_buffer_pid_tid;
++	bool sync_switch;
+ 	pid_t pid, tid;
+ 	int cpu;
+ 	int switch_state;
+@@ -207,14 +208,17 @@ static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
+ static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
+ 				   struct auxtrace_buffer *b)
+ {
++	bool consecutive = false;
+ 	void *start;
+ 
+ 	start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
+-				      pt->have_tsc);
++				      pt->have_tsc, &consecutive);
+ 	if (!start)
+ 		return -EINVAL;
+ 	b->use_size = b->data + b->size - start;
+ 	b->use_data = start;
++	if (b->use_size && consecutive)
++		b->consecutive = true;
+ 	return 0;
+ }
+ 
+@@ -960,10 +964,12 @@ static int intel_pt_setup_queue(struct intel_pt *pt,
+ 			if (pt->timeless_decoding || !pt->have_sched_switch)
+ 				ptq->use_buffer_pid_tid = true;
+ 		}
++
++		ptq->sync_switch = pt->sync_switch;
+ 	}
+ 
+ 	if (!ptq->on_heap &&
+-	    (!pt->sync_switch ||
++	    (!ptq->sync_switch ||
+ 	     ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
+ 		const struct intel_pt_state *state;
+ 		int ret;
+@@ -1546,7 +1552,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
+ 	if (pt->synth_opts.last_branch)
+ 		intel_pt_update_last_branch_rb(ptq);
+ 
+-	if (!pt->sync_switch)
++	if (!ptq->sync_switch)
+ 		return 0;
+ 
+ 	if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
+@@ -1627,6 +1633,21 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
+ 	return switch_ip;
+ }
+ 
++static void intel_pt_enable_sync_switch(struct intel_pt *pt)
++{
++	unsigned int i;
++
++	pt->sync_switch = true;
++
++	for (i = 0; i < pt->queues.nr_queues; i++) {
++		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
++		struct intel_pt_queue *ptq = queue->priv;
++
++		if (ptq)
++			ptq->sync_switch = true;
++	}
++}
++
+ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
+ {
+ 	const struct intel_pt_state *state = ptq->state;
+@@ -1643,7 +1664,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
+ 			if (pt->switch_ip) {
+ 				intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
+ 					     pt->switch_ip, pt->ptss_ip);
+-				pt->sync_switch = true;
++				intel_pt_enable_sync_switch(pt);
+ 			}
+ 		}
+ 	}
+@@ -1659,9 +1680,9 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
+ 		if (state->err) {
+ 			if (state->err == INTEL_PT_ERR_NODATA)
+ 				return 1;
+-			if (pt->sync_switch &&
++			if (ptq->sync_switch &&
+ 			    state->from_ip >= pt->kernel_start) {
+-				pt->sync_switch = false;
++				ptq->sync_switch = false;
+ 				intel_pt_next_tid(pt, ptq);
+ 			}
+ 			if (pt->synth_opts.errors) {
+@@ -1687,7 +1708,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
+ 				     state->timestamp, state->est_timestamp);
+ 			ptq->timestamp = state->est_timestamp;
+ 		/* Use estimated TSC in unknown switch state */
+-		} else if (pt->sync_switch &&
++		} else if (ptq->sync_switch &&
+ 			   ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
+ 			   intel_pt_is_switch_ip(ptq, state->to_ip) &&
+ 			   ptq->next_tid == -1) {
+@@ -1834,7 +1855,7 @@ static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
+ 		return 1;
+ 
+ 	ptq = intel_pt_cpu_to_ptq(pt, cpu);
+-	if (!ptq)
++	if (!ptq || !ptq->sync_switch)
+ 		return 1;
+ 
+ 	switch (ptq->switch_state) {


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-06-26 16:13 Alice Ferrazzi
  0 siblings, 0 replies; 20+ messages in thread
From: Alice Ferrazzi @ 2018-06-26 16:13 UTC (permalink / raw
  To: gentoo-commits

commit:     63a8f3beb3c3ac5417e18aad9ed0b4001e5c92b3
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Tue Jun 26 16:08:06 2018 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Tue Jun 26 16:08:06 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=63a8f3be

linux kernel 4.16.18

 0000_README              |    4 +
 1017_linux-4.16.18.patch | 2048 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2052 insertions(+)

diff --git a/0000_README b/0000_README
index c683722..89e91eb 100644
--- a/0000_README
+++ b/0000_README
@@ -111,6 +111,10 @@ Patch:  1016_linux-4.16.17.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.17
 
+Patch:  1017_linux-4.16.18.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.18
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1017_linux-4.16.18.patch b/1017_linux-4.16.18.patch
new file mode 100644
index 0000000..a9651a5
--- /dev/null
+++ b/1017_linux-4.16.18.patch
@@ -0,0 +1,2048 @@
+diff --git a/Makefile b/Makefile
+index 02a4f7f8c613..8a34b54f2a06 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 17
++SUBLEVEL = 18
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index f01eef8b392e..3d2693fef937 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -442,6 +442,8 @@ static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {}
+ 
+ #endif /* CONFIG_X86_LOCAL_APIC */
+ 
++extern void apic_ack_irq(struct irq_data *data);
++
+ static inline void ack_APIC_irq(void)
+ {
+ 	/*
+diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
+index 22647a642e98..0af81b590a0c 100644
+--- a/arch/x86/include/asm/trace/irq_vectors.h
++++ b/arch/x86/include/asm/trace/irq_vectors.h
+@@ -236,7 +236,7 @@ TRACE_EVENT(vector_alloc,
+ 	TP_PROTO(unsigned int irq, unsigned int vector, bool reserved,
+ 		 int ret),
+ 
+-	TP_ARGS(irq, vector, ret, reserved),
++	TP_ARGS(irq, vector, reserved, ret),
+ 
+ 	TP_STRUCT__entry(
+ 		__field(	unsigned int,	irq		)
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 7c5538769f7e..71e912d73c3d 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1859,7 +1859,7 @@ static void ioapic_ir_ack_level(struct irq_data *irq_data)
+ 	 * intr-remapping table entry. Hence for the io-apic
+ 	 * EOI we use the pin number.
+ 	 */
+-	ack_APIC_irq();
++	apic_ack_irq(irq_data);
+ 	eoi_ioapic_pin(data->entry.vector, data);
+ }
+ 
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index bb6f7a2148d7..b708f597eee3 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -235,6 +235,15 @@ static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
+ 	if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
+ 		return 0;
+ 
++	/*
++	 * Careful here. @apicd might either have move_in_progress set or
++	 * be enqueued for cleanup. Assigning a new vector would either
++	 * leave a stale vector on some CPU around or in case of a pending
++	 * cleanup corrupt the hlist.
++	 */
++	if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
++		return -EBUSY;
++
+ 	vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
+ 	if (vector > 0)
+ 		apic_update_vector(irqd, vector, cpu);
+@@ -800,13 +809,18 @@ static int apic_retrigger_irq(struct irq_data *irqd)
+ 	return 1;
+ }
+ 
+-void apic_ack_edge(struct irq_data *irqd)
++void apic_ack_irq(struct irq_data *irqd)
+ {
+-	irq_complete_move(irqd_cfg(irqd));
+ 	irq_move_irq(irqd);
+ 	ack_APIC_irq();
+ }
+ 
++void apic_ack_edge(struct irq_data *irqd)
++{
++	irq_complete_move(irqd_cfg(irqd));
++	apic_ack_irq(irqd);
++}
++
+ static struct irq_chip lapic_controller = {
+ 	.name			= "APIC",
+ 	.irq_ack		= apic_ack_edge,
+diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
+index 589b948e6e01..316a8875bd90 100644
+--- a/arch/x86/kernel/cpu/intel_rdt.c
++++ b/arch/x86/kernel/cpu/intel_rdt.c
+@@ -821,6 +821,8 @@ static __init void rdt_quirks(void)
+ 	case INTEL_FAM6_SKYLAKE_X:
+ 		if (boot_cpu_data.x86_stepping <= 4)
+ 			set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
++		else
++			set_rdt_options("!l3cat");
+ 	}
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
+index 231ad23b24a9..8fec687b3e44 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
++++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
+@@ -48,7 +48,7 @@ static struct dentry *dfs_inj;
+ 
+ static u8 n_banks;
+ 
+-#define MAX_FLAG_OPT_SIZE	3
++#define MAX_FLAG_OPT_SIZE	4
+ #define NBCFG			0x44
+ 
+ enum injection_type {
+diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
+index e4cb9f4cde8a..fc13cbbb2dce 100644
+--- a/arch/x86/platform/uv/uv_irq.c
++++ b/arch/x86/platform/uv/uv_irq.c
+@@ -47,11 +47,6 @@ static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
+ 
+ static void uv_noop(struct irq_data *data) { }
+ 
+-static void uv_ack_apic(struct irq_data *data)
+-{
+-	ack_APIC_irq();
+-}
+-
+ static int
+ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
+ 		    bool force)
+@@ -73,7 +68,7 @@ static struct irq_chip uv_irq_chip = {
+ 	.name			= "UV-CORE",
+ 	.irq_mask		= uv_noop,
+ 	.irq_unmask		= uv_noop,
+-	.irq_eoi		= uv_ack_apic,
++	.irq_eoi		= apic_ack_irq,
+ 	.irq_set_affinity	= uv_set_irq_affinity,
+ };
+ 
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 00e16588b169..be0e2c95db22 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2513,7 +2513,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
+ 
+ 	mutex_lock(&set->tag_list_lock);
+ 	list_del_rcu(&q->tag_set_list);
+-	INIT_LIST_HEAD(&q->tag_set_list);
+ 	if (list_is_singular(&set->tag_list)) {
+ 		/* just transitioned to unshared */
+ 		set->flags &= ~BLK_MQ_F_TAG_SHARED;
+@@ -2521,8 +2520,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
+ 		blk_mq_update_tag_set_depth(set, false);
+ 	}
+ 	mutex_unlock(&set->tag_list_lock);
+-
+ 	synchronize_rcu();
++	INIT_LIST_HEAD(&q->tag_set_list);
+ }
+ 
+ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index d4fb9e0c29ee..d8d45072e4ad 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4553,9 +4553,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM |
+ 						ATA_HORKAGE_NOLPM, },
+ 
+-	/* Sandisk devices which are known to not handle LPM well */
+-	{ "SanDisk SD7UB3Q*G1001",	NULL,	ATA_HORKAGE_NOLPM, },
+-
+ 	/* devices that don't properly handle queued TRIM commands */
+ 	{ "Micron_M500IT_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
+index de4ddd0e8550..b3ed8f9953a8 100644
+--- a/drivers/ata/libata-zpodd.c
++++ b/drivers/ata/libata-zpodd.c
+@@ -35,7 +35,7 @@ struct zpodd {
+ static int eject_tray(struct ata_device *dev)
+ {
+ 	struct ata_taskfile tf;
+-	static const char cdb[] = {  GPCMD_START_STOP_UNIT,
++	static const char cdb[ATAPI_CDB_LEN] = {  GPCMD_START_STOP_UNIT,
+ 		0, 0, 0,
+ 		0x02,     /* LoEj */
+ 		0, 0, 0, 0, 0, 0, 0,
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 5847364f25d9..074a3d063a73 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -1458,7 +1458,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
+ 
+ 	dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ 	if (!dir)
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	dir->class = class;
+ 	kobject_init(&dir->kobj, &class_dir_ktype);
+@@ -1468,7 +1468,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
+ 	retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
+ 	if (retval < 0) {
+ 		kobject_put(&dir->kobj);
+-		return NULL;
++		return ERR_PTR(retval);
+ 	}
+ 	return &dir->kobj;
+ }
+@@ -1775,6 +1775,10 @@ int device_add(struct device *dev)
+ 
+ 	parent = get_device(dev->parent);
+ 	kobj = get_device_parent(dev, parent);
++	if (IS_ERR(kobj)) {
++		error = PTR_ERR(kobj);
++		goto parent_error;
++	}
+ 	if (kobj)
+ 		dev->kobj.parent = kobj;
+ 
+@@ -1873,6 +1877,7 @@ int device_add(struct device *dev)
+ 	kobject_del(&dev->kobj);
+  Error:
+ 	cleanup_glue_dir(dev, glue_dir);
++parent_error:
+ 	put_device(parent);
+ name_error:
+ 	kfree(dev->p);
+@@ -2692,6 +2697,11 @@ int device_move(struct device *dev, struct device *new_parent,
+ 	device_pm_lock();
+ 	new_parent = get_device(new_parent);
+ 	new_parent_kobj = get_device_parent(dev, new_parent);
++	if (IS_ERR(new_parent_kobj)) {
++		error = PTR_ERR(new_parent_kobj);
++		put_device(new_parent);
++		goto out;
++	}
+ 
+ 	pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
+ 		 __func__, new_parent ? dev_name(new_parent) : "<NULL>");
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 86258b00a1d4..6fb64e73bc96 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -173,9 +173,12 @@ static const struct device_attribute pid_attr = {
+ static void nbd_dev_remove(struct nbd_device *nbd)
+ {
+ 	struct gendisk *disk = nbd->disk;
++	struct request_queue *q;
++
+ 	if (disk) {
++		q = disk->queue;
+ 		del_gendisk(disk);
+-		blk_cleanup_queue(disk->queue);
++		blk_cleanup_queue(q);
+ 		blk_mq_free_tag_set(&nbd->tag_set);
+ 		disk->private_data = NULL;
+ 		put_disk(disk);
+@@ -231,9 +234,18 @@ static void nbd_size_clear(struct nbd_device *nbd)
+ static void nbd_size_update(struct nbd_device *nbd)
+ {
+ 	struct nbd_config *config = nbd->config;
++	struct block_device *bdev = bdget_disk(nbd->disk, 0);
++
+ 	blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
+ 	blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
+ 	set_capacity(nbd->disk, config->bytesize >> 9);
++	if (bdev) {
++		if (bdev->bd_disk)
++			bd_set_size(bdev, config->bytesize);
++		else
++			bdev->bd_invalidated = 1;
++		bdput(bdev);
++	}
+ 	kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
+ }
+ 
+@@ -243,6 +255,8 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
+ 	struct nbd_config *config = nbd->config;
+ 	config->blksize = blocksize;
+ 	config->bytesize = blocksize * nr_blocks;
++	if (nbd->task_recv != NULL)
++		nbd_size_update(nbd);
+ }
+ 
+ static void nbd_complete_rq(struct request *req)
+@@ -1109,7 +1123,6 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
+ 	if (ret)
+ 		return ret;
+ 
+-	bd_set_size(bdev, config->bytesize);
+ 	if (max_part)
+ 		bdev->bd_invalidated = 1;
+ 	mutex_unlock(&nbd->config_lock);
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 8814c572e263..e8fa2fc43b75 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -703,6 +703,8 @@ static ssize_t store_##file_name					\
+ 	struct cpufreq_policy new_policy;				\
+ 									\
+ 	memcpy(&new_policy, policy, sizeof(*policy));			\
++	new_policy.min = policy->user_policy.min;			\
++	new_policy.max = policy->user_policy.max;			\
+ 									\
+ 	ret = sscanf(buf, "%u", &new_policy.object);			\
+ 	if (ret != 1)							\
+diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
+index ca38229b045a..43e14bb512c8 100644
+--- a/drivers/cpufreq/cpufreq_governor.c
++++ b/drivers/cpufreq/cpufreq_governor.c
+@@ -165,7 +165,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
+ 			 * calls, so the previous load value can be used then.
+ 			 */
+ 			load = j_cdbs->prev_load;
+-		} else if (unlikely(time_elapsed > 2 * sampling_rate &&
++		} else if (unlikely((int)idle_time > 2 * sampling_rate &&
+ 				    j_cdbs->prev_load)) {
+ 			/*
+ 			 * If the CPU had gone completely idle and a task has
+@@ -185,10 +185,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
+ 			 * clear prev_load to guarantee that the load will be
+ 			 * computed again next time.
+ 			 *
+-			 * Detecting this situation is easy: the governor's
+-			 * utilization update handler would not have run during
+-			 * CPU-idle periods.  Hence, an unusually large
+-			 * 'time_elapsed' (as compared to the sampling rate)
++			 * Detecting this situation is easy: an unusually large
++			 * 'idle_time' (as compared to the sampling rate)
+ 			 * indicates this scenario.
+ 			 */
+ 			load = j_cdbs->prev_load;
+@@ -217,8 +215,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
+ 			j_cdbs->prev_load = load;
+ 		}
+ 
+-		if (time_elapsed > 2 * sampling_rate) {
+-			unsigned int periods = time_elapsed / sampling_rate;
++		if (unlikely((int)idle_time > 2 * sampling_rate)) {
++			unsigned int periods = idle_time / sampling_rate;
+ 
+ 			if (periods < idle_periods)
+ 				idle_periods = periods;
+diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
+index a099b7bf74cd..46d1ab2dea87 100644
+--- a/drivers/cpufreq/ti-cpufreq.c
++++ b/drivers/cpufreq/ti-cpufreq.c
+@@ -226,7 +226,7 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
+ 	opp_data->cpu_dev = get_cpu_device(0);
+ 	if (!opp_data->cpu_dev) {
+ 		pr_err("%s: Failed to get device for CPU0\n", __func__);
+-		ret = ENODEV;
++		ret = -ENODEV;
+ 		goto free_opp_data;
+ 	}
+ 
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+index 582e449be9fe..a2c53ea3b5ed 100644
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -205,8 +205,7 @@ static void ish_remove(struct pci_dev *pdev)
+ 	kfree(ishtp_dev);
+ }
+ 
+-#ifdef CONFIG_PM
+-static struct device *ish_resume_device;
++static struct device __maybe_unused *ish_resume_device;
+ 
+ /* 50ms to get resume response */
+ #define WAIT_FOR_RESUME_ACK_MS		50
+@@ -220,7 +219,7 @@ static struct device *ish_resume_device;
+  * in that case a simple resume message is enough, others we need
+  * a reset sequence.
+  */
+-static void ish_resume_handler(struct work_struct *work)
++static void __maybe_unused ish_resume_handler(struct work_struct *work)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(ish_resume_device);
+ 	struct ishtp_device *dev = pci_get_drvdata(pdev);
+@@ -262,7 +261,7 @@ static void ish_resume_handler(struct work_struct *work)
+  *
+  * Return: 0 to the pm core
+  */
+-static int ish_suspend(struct device *device)
++static int __maybe_unused ish_suspend(struct device *device)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(device);
+ 	struct ishtp_device *dev = pci_get_drvdata(pdev);
+@@ -288,7 +287,7 @@ static int ish_suspend(struct device *device)
+ 	return 0;
+ }
+ 
+-static DECLARE_WORK(resume_work, ish_resume_handler);
++static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
+ /**
+  * ish_resume() - ISH resume callback
+  * @device:	device pointer
+@@ -297,7 +296,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler);
+  *
+  * Return: 0 to the pm core
+  */
+-static int ish_resume(struct device *device)
++static int __maybe_unused ish_resume(struct device *device)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(device);
+ 	struct ishtp_device *dev = pci_get_drvdata(pdev);
+@@ -311,21 +310,14 @@ static int ish_resume(struct device *device)
+ 	return 0;
+ }
+ 
+-static const struct dev_pm_ops ish_pm_ops = {
+-	.suspend = ish_suspend,
+-	.resume = ish_resume,
+-};
+-#define ISHTP_ISH_PM_OPS	(&ish_pm_ops)
+-#else
+-#define ISHTP_ISH_PM_OPS	NULL
+-#endif /* CONFIG_PM */
++static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
+ 
+ static struct pci_driver ish_driver = {
+ 	.name = KBUILD_MODNAME,
+ 	.id_table = ish_pci_tbl,
+ 	.probe = ish_probe,
+ 	.remove = ish_remove,
+-	.driver.pm = ISHTP_ISH_PM_OPS,
++	.driver.pm = &ish_pm_ops,
+ };
+ 
+ module_pci_driver(ish_driver);
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index ee7a37eb159a..545986cfb978 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -395,6 +395,14 @@ static void wacom_usage_mapping(struct hid_device *hdev,
+ 		}
+ 	}
+ 
++	/* 2nd-generation Intuos Pro Large has incorrect Y maximum */
++	if (hdev->vendor == USB_VENDOR_ID_WACOM &&
++	    hdev->product == 0x0358 &&
++	    WACOM_PEN_FIELD(field) &&
++	    wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
++		field->logical_maximum = 43200;
++	}
++
+ 	switch (usage->hid) {
+ 	case HID_GD_X:
+ 		features->x_max = field->logical_maximum;
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 8b591c192daf..fedaa53684d8 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -4352,7 +4352,7 @@ static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
+ 
+ static struct irq_chip amd_ir_chip = {
+ 	.name			= "AMD-IR",
+-	.irq_ack		= ir_ack_apic_edge,
++	.irq_ack		= apic_ack_irq,
+ 	.irq_set_affinity	= amd_ir_set_affinity,
+ 	.irq_set_vcpu_affinity	= amd_ir_set_vcpu_affinity,
+ 	.irq_compose_msi_msg	= ir_compose_msi_msg,
+diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
+index 3062a154a9fb..967450bd421a 100644
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -1223,7 +1223,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
+ 
+ static struct irq_chip intel_ir_chip = {
+ 	.name			= "INTEL-IR",
+-	.irq_ack		= ir_ack_apic_edge,
++	.irq_ack		= apic_ack_irq,
+ 	.irq_set_affinity	= intel_ir_set_affinity,
+ 	.irq_compose_msi_msg	= intel_ir_compose_msi_msg,
+ 	.irq_set_vcpu_affinity	= intel_ir_set_vcpu_affinity,
+diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
+index 49721b4e1975..65cdf09c2599 100644
+--- a/drivers/iommu/irq_remapping.c
++++ b/drivers/iommu/irq_remapping.c
+@@ -156,11 +156,6 @@ void panic_if_irq_remap(const char *msg)
+ 		panic(msg);
+ }
+ 
+-void ir_ack_apic_edge(struct irq_data *data)
+-{
+-	ack_APIC_irq();
+-}
+-
+ /**
+  * irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU
+  *				     device serving request @info
+diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
+index 039c7af7b190..0afef6e43be4 100644
+--- a/drivers/iommu/irq_remapping.h
++++ b/drivers/iommu/irq_remapping.h
+@@ -65,8 +65,6 @@ struct irq_remap_ops {
+ extern struct irq_remap_ops intel_irq_remap_ops;
+ extern struct irq_remap_ops amd_iommu_irq_ops;
+ 
+-extern void ir_ack_apic_edge(struct irq_data *data);
+-
+ #else  /* CONFIG_IRQ_REMAP */
+ 
+ #define irq_remapping_enabled 0
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 58c705f24f96..b594bae1adbd 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1142,6 +1142,7 @@ static int bond_option_primary_set(struct bonding *bond,
+ 				   slave->dev->name);
+ 			rcu_assign_pointer(bond->primary_slave, slave);
+ 			strcpy(bond->params.primary, slave->dev->name);
++			bond->force_primary = true;
+ 			bond_select_active_slave(bond);
+ 			goto out;
+ 		}
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 2a7752c113df..adbfa82b76e9 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -126,8 +126,10 @@ static int netvsc_open(struct net_device *net)
+ 	}
+ 
+ 	rdev = nvdev->extension;
+-	if (!rdev->link_state)
++	if (!rdev->link_state) {
+ 		netif_carrier_on(net);
++		netif_tx_wake_all_queues(net);
++	}
+ 
+ 	if (vf_netdev) {
+ 		/* Setting synthetic device up transparently sets
+diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
+index cd09c3af2117..6e8e42361fd5 100644
+--- a/drivers/net/phy/dp83848.c
++++ b/drivers/net/phy/dp83848.c
+@@ -74,6 +74,25 @@ static int dp83848_config_intr(struct phy_device *phydev)
+ 	return phy_write(phydev, DP83848_MICR, control);
+ }
+ 
++static int dp83848_config_init(struct phy_device *phydev)
++{
++	int err;
++	int val;
++
++	err = genphy_config_init(phydev);
++	if (err < 0)
++		return err;
++
++	/* DP83620 always reports Auto Negotiation Ability on BMSR. Instead,
++	 * we check initial value of BMCR Auto negotiation enable bit
++	 */
++	val = phy_read(phydev, MII_BMCR);
++	if (!(val & BMCR_ANENABLE))
++		phydev->autoneg = AUTONEG_DISABLE;
++
++	return 0;
++}
++
+ static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
+ 	{ TI_DP83848C_PHY_ID, 0xfffffff0 },
+ 	{ NS_DP83848C_PHY_ID, 0xfffffff0 },
+@@ -83,7 +102,7 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
+ };
+ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+ 
+-#define DP83848_PHY_DRIVER(_id, _name)				\
++#define DP83848_PHY_DRIVER(_id, _name, _config_init)		\
+ 	{							\
+ 		.phy_id		= _id,				\
+ 		.phy_id_mask	= 0xfffffff0,			\
+@@ -92,7 +111,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+ 		.flags		= PHY_HAS_INTERRUPT,		\
+ 								\
+ 		.soft_reset	= genphy_soft_reset,		\
+-		.config_init	= genphy_config_init,		\
++		.config_init	= _config_init,			\
+ 		.suspend	= genphy_suspend,		\
+ 		.resume		= genphy_resume,		\
+ 								\
+@@ -102,10 +121,14 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
+ 	}
+ 
+ static struct phy_driver dp83848_driver[] = {
+-	DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
+-	DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
+-	DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
+-	DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
++	DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY",
++			   genphy_config_init),
++	DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY",
++			   genphy_config_init),
++	DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY",
++			   dp83848_config_init),
++	DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY",
++			   genphy_config_init),
+ };
+ module_phy_driver(dp83848_driver);
+ 
+diff --git a/drivers/net/tap.c b/drivers/net/tap.c
+index 9b6cb780affe..f0f7cd977667 100644
+--- a/drivers/net/tap.c
++++ b/drivers/net/tap.c
+@@ -774,13 +774,16 @@ static ssize_t tap_put_user(struct tap_queue *q,
+ 	int total;
+ 
+ 	if (q->flags & IFF_VNET_HDR) {
++		int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
+ 		struct virtio_net_hdr vnet_hdr;
++
+ 		vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+ 		if (iov_iter_count(iter) < vnet_hdr_len)
+ 			return -EINVAL;
+ 
+ 		if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
+-					    tap_is_little_endian(q), true))
++					    tap_is_little_endian(q), true,
++					    vlan_hlen))
+ 			BUG();
+ 
+ 		if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 24e645c86ae7..b3c58890ef33 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -2062,7 +2062,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ 			return -EINVAL;
+ 
+ 		if (virtio_net_hdr_from_skb(skb, &gso,
+-					    tun_is_little_endian(tun), true)) {
++					    tun_is_little_endian(tun), true,
++					    vlan_hlen)) {
+ 			struct skb_shared_info *sinfo = skb_shinfo(skb);
+ 			pr_err("unexpected GSO type: "
+ 			       "0x%x, gso_size %d, hdr_len %d\n",
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 9e1b74590682..f5316ab68a0a 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -1124,7 +1124,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
+ 	 * accordingly. Otherwise, we should check here.
+ 	 */
+ 	if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+-		delayed_ndp_size = ctx->max_ndp_size;
++		delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
+ 	else
+ 		delayed_ndp_size = 0;
+ 
+@@ -1285,7 +1285,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
+ 	/* If requested, put NDP at end of frame. */
+ 	if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
+ 		nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
+-		cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size);
++		cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_curr_size - ctx->max_ndp_size);
+ 		nth16->wNdpIndex = cpu_to_le16(skb_out->len);
+ 		skb_put_data(skb_out, ctx->delayed_ndp16, ctx->max_ndp_size);
+ 
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 8911e3466e61..89bc5cd4d02f 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1358,7 +1358,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
+ 		hdr = skb_vnet_hdr(skb);
+ 
+ 	if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
+-				    virtio_is_little_endian(vi->vdev), false))
++				    virtio_is_little_endian(vi->vdev), false,
++				    0))
+ 		BUG();
+ 
+ 	if (vi->mergeable_rx_bufs)
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+index 1fec8e3a6b35..6afcfd1f0eec 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+@@ -8,6 +8,7 @@
+  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
++ * Copyright(c) 2018        Intel Corporation
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of version 2 of the GNU General Public License as
+@@ -30,6 +31,7 @@
+  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
++ * Copyright(c) 2018        Intel Corporation
+  * All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+@@ -163,7 +165,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt,
+ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
+ 			       const struct fw_img *image)
+ {
+-	int sec_idx, idx;
++	int sec_idx, idx, ret;
+ 	u32 offset = 0;
+ 
+ 	/*
+@@ -190,17 +192,23 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
+ 	 */
+ 	if (sec_idx >= image->num_sec - 1) {
+ 		IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n");
+-		iwl_free_fw_paging(fwrt);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err;
+ 	}
+ 
+ 	/* copy the CSS block to the dram */
+ 	IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n",
+ 		     sec_idx);
+ 
++	if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) {
++		IWL_ERR(fwrt, "CSS block is larger than paging size\n");
++		ret = -EINVAL;
++		goto err;
++	}
++
+ 	memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block),
+ 	       image->sec[sec_idx].data,
+-	       fwrt->fw_paging_db[0].fw_paging_size);
++	       image->sec[sec_idx].len);
+ 	dma_sync_single_for_device(fwrt->trans->dev,
+ 				   fwrt->fw_paging_db[0].fw_paging_phys,
+ 				   fwrt->fw_paging_db[0].fw_paging_size,
+@@ -221,6 +229,14 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
+ 	for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) {
+ 		struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
+ 
++		if (block->fw_paging_size > image->sec[sec_idx].len - offset) {
++			IWL_ERR(fwrt,
++				"Paging: paging size is larger than remaining data in block %d\n",
++				idx);
++			ret = -EINVAL;
++			goto err;
++		}
++
+ 		memcpy(page_address(block->fw_paging_block),
+ 		       image->sec[sec_idx].data + offset,
+ 		       block->fw_paging_size);
+@@ -231,19 +247,32 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
+ 
+ 		IWL_DEBUG_FW(fwrt,
+ 			     "Paging: copied %d paging bytes to block %d\n",
+-			     fwrt->fw_paging_db[idx].fw_paging_size,
+-			     idx);
++			     block->fw_paging_size, idx);
+ 
+-		offset += fwrt->fw_paging_db[idx].fw_paging_size;
++		offset += block->fw_paging_size;
++
++		if (offset > image->sec[sec_idx].len) {
++			IWL_ERR(fwrt,
++				"Paging: offset goes over section size\n");
++			ret = -EINVAL;
++			goto err;
++		}
+ 	}
+ 
+ 	/* copy the last paging block */
+ 	if (fwrt->num_of_pages_in_last_blk > 0) {
+ 		struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
+ 
++		if (image->sec[sec_idx].len - offset > block->fw_paging_size) {
++			IWL_ERR(fwrt,
++				"Paging: last block is larger than paging size\n");
++			ret = -EINVAL;
++			goto err;
++		}
++
+ 		memcpy(page_address(block->fw_paging_block),
+ 		       image->sec[sec_idx].data + offset,
+-		       FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk);
++		       image->sec[sec_idx].len - offset);
+ 		dma_sync_single_for_device(fwrt->trans->dev,
+ 					   block->fw_paging_phys,
+ 					   block->fw_paging_size,
+@@ -255,6 +284,10 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
+ 	}
+ 
+ 	return 0;
++
++err:
++	iwl_free_fw_paging(fwrt);
++	return ret;
+ }
+ 
+ static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt,
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index dba797b57d73..550dda63563c 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2665,8 +2665,15 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
+ 
+ 	dev_info(dev->ctrl.device, "restart after slot reset\n");
+ 	pci_restore_state(pdev);
+-	nvme_reset_ctrl(&dev->ctrl);
+-	return PCI_ERS_RESULT_RECOVERED;
++	nvme_reset_ctrl_sync(&dev->ctrl);
++
++	switch (dev->ctrl.state) {
++	case NVME_CTRL_LIVE:
++	case NVME_CTRL_ADMIN_ONLY:
++		return PCI_ERS_RESULT_RECOVERED;
++	default:
++		return PCI_ERS_RESULT_DISCONNECT;
++	}
+ }
+ 
+ static void nvme_error_resume(struct pci_dev *pdev)
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 68242f50c303..7a23242fc6d4 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2345,6 +2345,9 @@ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
+ 	struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL);
+ 	if (!node)
+ 		return NULL;
++
++	/* Make sure all padding within the structure is initialized. */
++	memset(&node->msg, 0, sizeof node->msg);
+ 	node->vq = vq;
+ 	node->msg.type = type;
+ 	return node;
+diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
+index 74f2e6e6202a..8851d441e5fd 100644
+--- a/drivers/w1/masters/mxc_w1.c
++++ b/drivers/w1/masters/mxc_w1.c
+@@ -112,6 +112,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
+ 	if (IS_ERR(mdev->clk))
+ 		return PTR_ERR(mdev->clk);
+ 
++	err = clk_prepare_enable(mdev->clk);
++	if (err)
++		return err;
++
+ 	clkrate = clk_get_rate(mdev->clk);
+ 	if (clkrate < 10000000)
+ 		dev_warn(&pdev->dev,
+@@ -125,12 +129,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	mdev->regs = devm_ioremap_resource(&pdev->dev, res);
+-	if (IS_ERR(mdev->regs))
+-		return PTR_ERR(mdev->regs);
+-
+-	err = clk_prepare_enable(mdev->clk);
+-	if (err)
+-		return err;
++	if (IS_ERR(mdev->regs)) {
++		err = PTR_ERR(mdev->regs);
++		goto out_disable_clk;
++	}
+ 
+ 	/* Software reset 1-Wire module */
+ 	writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET);
+@@ -146,8 +148,12 @@ static int mxc_w1_probe(struct platform_device *pdev)
+ 
+ 	err = w1_add_master_device(&mdev->bus_master);
+ 	if (err)
+-		clk_disable_unprepare(mdev->clk);
++		goto out_disable_clk;
+ 
++	return 0;
++
++out_disable_clk:
++	clk_disable_unprepare(mdev->clk);
+ 	return err;
+ }
+ 
+diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
+index a7c5a9861bef..8311e8ed76de 100644
+--- a/fs/binfmt_misc.c
++++ b/fs/binfmt_misc.c
+@@ -387,8 +387,13 @@ static Node *create_entry(const char __user *buffer, size_t count)
+ 		s = strchr(p, del);
+ 		if (!s)
+ 			goto einval;
+-		*s++ = '\0';
+-		e->offset = simple_strtoul(p, &p, 10);
++		*s = '\0';
++		if (p != s) {
++			int r = kstrtoint(p, 10, &e->offset);
++			if (r != 0 || e->offset < 0)
++				goto einval;
++		}
++		p = s;
+ 		if (*p++)
+ 			goto einval;
+ 		pr_debug("register: offset: %#x\n", e->offset);
+@@ -428,7 +433,8 @@ static Node *create_entry(const char __user *buffer, size_t count)
+ 		if (e->mask &&
+ 		    string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size)
+ 			goto einval;
+-		if (e->size + e->offset > BINPRM_BUF_SIZE)
++		if (e->size > BINPRM_BUF_SIZE ||
++		    BINPRM_BUF_SIZE - e->size < e->offset)
+ 			goto einval;
+ 		pr_debug("register: magic/mask length: %i\n", e->size);
+ 		if (USE_DEBUG) {
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 8b031f40a2f5..a8daf50ea776 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1037,8 +1037,10 @@ static noinline int cow_file_range(struct inode *inode,
+ 				  ram_size, /* ram_bytes */
+ 				  BTRFS_COMPRESS_NONE, /* compress_type */
+ 				  BTRFS_ORDERED_REGULAR /* type */);
+-		if (IS_ERR(em))
++		if (IS_ERR(em)) {
++			ret = PTR_ERR(em);
+ 			goto out_reserve;
++		}
+ 		free_extent_map(em);
+ 
+ 		ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 111ee282b777..451579378abb 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2665,8 +2665,10 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
+ 	}
+ 
+ 	/* Check for compatibility reject unknown flags */
+-	if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED)
+-		return -EOPNOTSUPP;
++	if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) {
++		ret = -EOPNOTSUPP;
++		goto out;
++	}
+ 
+ 	if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
+ 		ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
+@@ -3837,11 +3839,6 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
+ 	    src->i_sb != inode->i_sb)
+ 		return -EXDEV;
+ 
+-	/* don't make the dst file partly checksummed */
+-	if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
+-	    (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
+-		return -EINVAL;
+-
+ 	if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
+ 		return -EISDIR;
+ 
+@@ -3851,6 +3848,13 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
+ 		inode_lock(src);
+ 	}
+ 
++	/* don't make the dst file partly checksummed */
++	if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
++	    (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
++		ret = -EINVAL;
++		goto out_unlock;
++	}
++
+ 	/* determine range to clone */
+ 	ret = -EINVAL;
+ 	if (off + len > src->i_size || off + len < off)
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index ec56f33feea9..d964f70eefa9 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -2762,7 +2762,7 @@ static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
+ 			have_csum = scrub_find_csum(sctx, logical, csum);
+ 			if (have_csum == 0)
+ 				++sctx->stat.no_csum;
+-			if (sctx->is_dev_replace && !have_csum) {
++			if (0 && sctx->is_dev_replace && !have_csum) {
+ 				ret = copy_nocow_pages(sctx, logical, l,
+ 						       mirror_num,
+ 						      physical_for_dev_replace);
+diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
+index 4f3884835267..dd95a6fa24bf 100644
+--- a/fs/cifs/cifsacl.h
++++ b/fs/cifs/cifsacl.h
+@@ -98,4 +98,18 @@ struct cifs_ace {
+ 	struct cifs_sid sid; /* ie UUID of user or group who gets these perms */
+ } __attribute__((packed));
+ 
++/*
++ * Minimum security identifier can be one for system defined Users
++ * and Groups such as NULL SID and World or Built-in accounts such
++ * as Administrator and Guest and consists of
++ * Revision + Num (Sub)Auths + Authority + Domain (one Subauthority)
++ */
++#define MIN_SID_LEN  (1 + 1 + 6 + 4) /* in bytes */
++
++/*
++ * Minimum security descriptor can be one without any SACL and DACL and can
++ * consist of revision, type, and two sids of minimum size for owner and group
++ */
++#define MIN_SEC_DESC_LEN  (sizeof(struct cifs_ntsd) + (2 * MIN_SID_LEN))
++
+ #endif /* _CIFSACL_H */
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 097598543403..91ce0cbaf6bf 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -1277,10 +1277,11 @@ smb2_is_session_expired(char *buf)
+ {
+ 	struct smb2_sync_hdr *shdr = get_sync_hdr(buf);
+ 
+-	if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED)
++	if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
++	    shdr->Status != STATUS_USER_SESSION_DELETED)
+ 		return false;
+ 
+-	cifs_dbg(FYI, "Session expired\n");
++	cifs_dbg(FYI, "Session expired or deleted\n");
+ 	return true;
+ }
+ 
+@@ -1589,8 +1590,11 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
+ 		oparms.create_options = 0;
+ 
+ 	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+-	if (!utf16_path)
+-		return ERR_PTR(-ENOMEM);
++	if (!utf16_path) {
++		rc = -ENOMEM;
++		free_xid(xid);
++		return ERR_PTR(rc);
++	}
+ 
+ 	oparms.tcon = tcon;
+ 	oparms.desired_access = READ_CONTROL;
+@@ -1648,8 +1652,11 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
+ 		access_flags = WRITE_DAC;
+ 
+ 	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+-	if (!utf16_path)
+-		return -ENOMEM;
++	if (!utf16_path) {
++		rc = -ENOMEM;
++		free_xid(xid);
++		return rc;
++	}
+ 
+ 	oparms.tcon = tcon;
+ 	oparms.desired_access = access_flags;
+@@ -1709,15 +1716,21 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+ 
+ 	/* if file not oplocked can't be sure whether asking to extend size */
+ 	if (!CIFS_CACHE_READ(cifsi))
+-		if (keep_size == false)
+-			return -EOPNOTSUPP;
++		if (keep_size == false) {
++			rc = -EOPNOTSUPP;
++			free_xid(xid);
++			return rc;
++		}
+ 
+ 	/*
+ 	 * Must check if file sparse since fallocate -z (zero range) assumes
+ 	 * non-sparse allocation
+ 	 */
+-	if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE))
+-		return -EOPNOTSUPP;
++	if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
++		rc = -EOPNOTSUPP;
++		free_xid(xid);
++		return rc;
++	}
+ 
+ 	/*
+ 	 * need to make sure we are not asked to extend the file since the SMB3
+@@ -1726,8 +1739,11 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
+ 	 * which for a non sparse file would zero the newly extended range
+ 	 */
+ 	if (keep_size == false)
+-		if (i_size_read(inode) < offset + len)
+-			return -EOPNOTSUPP;
++		if (i_size_read(inode) < offset + len) {
++			rc = -EOPNOTSUPP;
++			free_xid(xid);
++			return rc;
++		}
+ 
+ 	cifs_dbg(FYI, "offset %lld len %lld", offset, len);
+ 
+@@ -1760,8 +1776,11 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
+ 
+ 	/* Need to make file sparse, if not already, before freeing range. */
+ 	/* Consider adding equivalent for compressed since it could also work */
+-	if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse))
+-		return -EOPNOTSUPP;
++	if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
++		rc = -EOPNOTSUPP;
++		free_xid(xid);
++		return rc;
++	}
+ 
+ 	cifs_dbg(FYI, "offset %lld len %lld", offset, len);
+ 
+@@ -1792,8 +1811,10 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
+ 
+ 	/* if file not oplocked can't be sure whether asking to extend size */
+ 	if (!CIFS_CACHE_READ(cifsi))
+-		if (keep_size == false)
+-			return -EOPNOTSUPP;
++		if (keep_size == false) {
++			free_xid(xid);
++			return rc;
++		}
+ 
+ 	/*
+ 	 * Files are non-sparse by default so falloc may be a no-op
+@@ -1802,14 +1823,16 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
+ 	 */
+ 	if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
+ 		if (keep_size == true)
+-			return 0;
++			rc = 0;
+ 		/* check if extending file */
+ 		else if (i_size_read(inode) >= off + len)
+ 			/* not extending file and already not sparse */
+-			return 0;
++			rc = 0;
+ 		/* BB: in future add else clause to extend file */
+ 		else
+-			return -EOPNOTSUPP;
++			rc = -EOPNOTSUPP;
++		free_xid(xid);
++		return rc;
+ 	}
+ 
+ 	if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
+@@ -1821,8 +1844,11 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
+ 		 * ie potentially making a few extra pages at the beginning
+ 		 * or end of the file non-sparse via set_sparse is harmless.
+ 		 */
+-		if ((off > 8192) || (off + len + 8192 < i_size_read(inode)))
+-			return -EOPNOTSUPP;
++		if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
++			rc = -EOPNOTSUPP;
++			free_xid(xid);
++			return rc;
++		}
+ 
+ 		rc = smb2_set_sparse(xid, tcon, cfile, inode, false);
+ 	}
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 93d3f4a14b32..a0795271fbcf 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1172,6 +1172,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ 	sess_data->ses = ses;
+ 	sess_data->buf0_type = CIFS_NO_BUFFER;
+ 	sess_data->nls_cp = (struct nls_table *) nls_cp;
++	sess_data->previous_session = ses->Suid;
+ 
+ #ifdef CONFIG_CIFS_SMB311
+ 	/*
+@@ -2270,8 +2271,7 @@ SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	return query_info(xid, tcon, persistent_fid, volatile_fid,
+ 			  0, SMB2_O_INFO_SECURITY, additional_info,
+-			  SMB2_MAX_BUFFER_SIZE,
+-			  sizeof(struct smb2_file_all_info), data, plen);
++			  SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
+ }
+ 
+ int
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index c32802c956d5..bf7fa1507e81 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -561,10 +561,16 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
+ 		unsigned epb = inode->i_sb->s_blocksize / sizeof(u32);
+ 		int i;
+ 
+-		/* Count number blocks in a subtree under 'partial' */
+-		count = 1;
+-		for (i = 0; partial + i != chain + depth - 1; i++)
+-			count *= epb;
++		/*
++		 * Count number blocks in a subtree under 'partial'. At each
++		 * level we count number of complete empty subtrees beyond
++		 * current offset and then descend into the subtree only
++		 * partially beyond current offset.
++		 */
++		count = 0;
++		for (i = partial - chain + 1; i < depth; i++)
++			count = count * epb + (epb - offsets[i] - 1);
++		count++;
+ 		/* Fill in size of a hole we found */
+ 		map->m_pblk = 0;
+ 		map->m_len = min_t(unsigned int, map->m_len, count);
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 70cf4c7b268a..44b4fcdc3755 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -144,6 +144,12 @@ int ext4_find_inline_data_nolock(struct inode *inode)
+ 		goto out;
+ 
+ 	if (!is.s.not_found) {
++		if (is.s.here->e_value_inum) {
++			EXT4_ERROR_INODE(inode, "inline data xattr refers "
++					 "to an external xattr inode");
++			error = -EFSCORRUPTED;
++			goto out;
++		}
+ 		EXT4_I(inode)->i_inline_off = (u16)((void *)is.s.here -
+ 					(void *)ext4_raw_inode(&is.iloc));
+ 		EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE +
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 3350454fc5a7..a4eee5daa82d 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4290,28 +4290,28 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+ 		EXT4_BLOCK_SIZE_BITS(sb);
+ 	stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
+ 
+-	/* If there are no blocks to remove, return now */
+-	if (first_block >= stop_block)
+-		goto out_stop;
++	/* If there are blocks to remove, do it */
++	if (stop_block > first_block) {
+ 
+-	down_write(&EXT4_I(inode)->i_data_sem);
+-	ext4_discard_preallocations(inode);
++		down_write(&EXT4_I(inode)->i_data_sem);
++		ext4_discard_preallocations(inode);
+ 
+-	ret = ext4_es_remove_extent(inode, first_block,
+-				    stop_block - first_block);
+-	if (ret) {
+-		up_write(&EXT4_I(inode)->i_data_sem);
+-		goto out_stop;
+-	}
++		ret = ext4_es_remove_extent(inode, first_block,
++					    stop_block - first_block);
++		if (ret) {
++			up_write(&EXT4_I(inode)->i_data_sem);
++			goto out_stop;
++		}
+ 
+-	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+-		ret = ext4_ext_remove_space(inode, first_block,
+-					    stop_block - 1);
+-	else
+-		ret = ext4_ind_remove_space(handle, inode, first_block,
+-					    stop_block);
++		if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
++			ret = ext4_ext_remove_space(inode, first_block,
++						    stop_block - 1);
++		else
++			ret = ext4_ind_remove_space(handle, inode, first_block,
++						    stop_block);
+ 
+-	up_write(&EXT4_I(inode)->i_data_sem);
++		up_write(&EXT4_I(inode)->i_data_sem);
++	}
+ 	if (IS_SYNC(inode))
+ 		ext4_handle_sync(handle);
+ 
+@@ -4694,19 +4694,21 @@ static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
+ 	}
+ }
+ 
+-static inline void ext4_iget_extra_inode(struct inode *inode,
++static inline int ext4_iget_extra_inode(struct inode *inode,
+ 					 struct ext4_inode *raw_inode,
+ 					 struct ext4_inode_info *ei)
+ {
+ 	__le32 *magic = (void *)raw_inode +
+ 			EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
++
+ 	if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
+ 	    EXT4_INODE_SIZE(inode->i_sb) &&
+ 	    *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
+ 		ext4_set_inode_state(inode, EXT4_STATE_XATTR);
+-		ext4_find_inline_data_nolock(inode);
++		return ext4_find_inline_data_nolock(inode);
+ 	} else
+ 		EXT4_I(inode)->i_inline_off = 0;
++	return 0;
+ }
+ 
+ int ext4_get_projid(struct inode *inode, kprojid_t *projid)
+@@ -4886,7 +4888,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
+ 			ei->i_extra_isize = sizeof(struct ext4_inode) -
+ 					    EXT4_GOOD_OLD_INODE_SIZE;
+ 		} else {
+-			ext4_iget_extra_inode(inode, raw_inode, ei);
++			ret = ext4_iget_extra_inode(inode, raw_inode, ei);
++			if (ret)
++				goto bad_inode;
+ 		}
+ 	}
+ 
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index b6bec270a8e4..d792b7689d92 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1933,7 +1933,7 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
+ 		return 0;
+ 
+ 	n_group = ext4_get_group_number(sb, n_blocks_count - 1);
+-	if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
++	if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
+ 		ext4_warning(sb, "resize would cause inodes_count overflow");
+ 		return -EINVAL;
+ 	}
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 499cb4b1fbd2..fc4ced59c565 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -1688,7 +1688,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
+ 
+ 	/* No failures allowed past this point. */
+ 
+-	if (!s->not_found && here->e_value_offs) {
++	if (!s->not_found && here->e_value_size && here->e_value_offs) {
+ 		/* Remove the old value. */
+ 		void *first_val = s->base + min_offs;
+ 		size_t offs = le16_to_cpu(here->e_value_offs);
+diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
+index fe1d705ad91f..44d827c43bab 100644
+--- a/fs/orangefs/inode.c
++++ b/fs/orangefs/inode.c
+@@ -269,6 +269,13 @@ int orangefs_getattr(const struct path *path, struct kstat *stat,
+ 		else
+ 			stat->result_mask = STATX_BASIC_STATS &
+ 			    ~STATX_SIZE;
++
++		stat->attributes_mask = STATX_ATTR_IMMUTABLE |
++		    STATX_ATTR_APPEND;
++		if (inode->i_flags & S_IMMUTABLE)
++			stat->attributes |= STATX_ATTR_IMMUTABLE;
++		if (inode->i_flags & S_APPEND)
++			stat->attributes |= STATX_ATTR_APPEND;
+ 	}
+ 	return ret;
+ }
+diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
+index 1b5707c44c3f..e026bee02a66 100644
+--- a/fs/orangefs/namei.c
++++ b/fs/orangefs/namei.c
+@@ -326,6 +326,13 @@ static int orangefs_symlink(struct inode *dir,
+ 		ret = PTR_ERR(inode);
+ 		goto out;
+ 	}
++	/*
++	 * This is necessary because orangefs_inode_getattr will not
++	 * re-read symlink size as it is impossible for it to change.
++	 * Invalidating the cache does not help.  orangefs_new_inode
++	 * does not set the correct size (it does not know symname).
++	 */
++	inode->i_size = strlen(symname);
+ 
+ 	gossip_debug(GOSSIP_NAME_DEBUG,
+ 		     "Assigned symlink inode new number of %pU\n",
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index a0231e96a578..9665ef8c031e 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -556,7 +556,12 @@ extern int irq_affinity_online_cpu(unsigned int cpu);
+ #endif
+ 
+ #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
+-void irq_move_irq(struct irq_data *data);
++void __irq_move_irq(struct irq_data *data);
++static inline void irq_move_irq(struct irq_data *data)
++{
++	if (unlikely(irqd_is_setaffinity_pending(data)))
++		__irq_move_irq(data);
++}
+ void irq_move_masked_irq(struct irq_data *data);
+ void irq_force_complete_move(struct irq_desc *desc);
+ #else
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index f144216febc6..9397628a1967 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -58,7 +58,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
+ 					  struct virtio_net_hdr *hdr,
+ 					  bool little_endian,
+-					  bool has_data_valid)
++					  bool has_data_valid,
++					  int vlan_hlen)
+ {
+ 	memset(hdr, 0, sizeof(*hdr));   /* no info leak */
+ 
+@@ -83,12 +84,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
+ 
+ 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ 		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+-		if (skb_vlan_tag_present(skb))
+-			hdr->csum_start = __cpu_to_virtio16(little_endian,
+-				skb_checksum_start_offset(skb) + VLAN_HLEN);
+-		else
+-			hdr->csum_start = __cpu_to_virtio16(little_endian,
+-				skb_checksum_start_offset(skb));
++		hdr->csum_start = __cpu_to_virtio16(little_endian,
++			skb_checksum_start_offset(skb) + vlan_hlen);
+ 		hdr->csum_offset = __cpu_to_virtio16(little_endian,
+ 				skb->csum_offset);
+ 	} else if (has_data_valid &&
+diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
+index c4f5caaf3778..f6a3543e5247 100644
+--- a/include/net/transp_v6.h
++++ b/include/net/transp_v6.h
+@@ -45,8 +45,15 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
+ 			  struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
+ 			  struct sockcm_cookie *sockc);
+ 
+-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+-			     __u16 srcp, __u16 destp, int bucket);
++void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
++			       __u16 srcp, __u16 destp, int rqueue, int bucket);
++static inline void
++ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
++			__u16 destp, int bucket)
++{
++	__ip6_dgram_sock_seq_show(seq, sp, srcp, destp, sk_rmem_alloc_get(sp),
++				  bucket);
++}
+ 
+ #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
+ 
+diff --git a/include/net/udp.h b/include/net/udp.h
+index 850a8e581cce..a42523e8e9c9 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -244,6 +244,11 @@ static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
+ 	return htons((((u64) hash * (max - min)) >> 32) + min);
+ }
+ 
++static inline int udp_rqueue_get(struct sock *sk)
++{
++	return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
++}
++
+ /* net/ipv4/udp.c */
+ void udp_destruct_sock(struct sock *sk);
+ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 0f922729bab9..cf2a1d1446bc 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -205,6 +205,39 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ 	return ret;
+ }
+ 
++#ifdef CONFIG_GENERIC_PENDING_IRQ
++static inline int irq_set_affinity_pending(struct irq_data *data,
++					   const struct cpumask *dest)
++{
++	struct irq_desc *desc = irq_data_to_desc(data);
++
++	irqd_set_move_pending(data);
++	irq_copy_pending(desc, dest);
++	return 0;
++}
++#else
++static inline int irq_set_affinity_pending(struct irq_data *data,
++					   const struct cpumask *dest)
++{
++	return -EBUSY;
++}
++#endif
++
++static int irq_try_set_affinity(struct irq_data *data,
++				const struct cpumask *dest, bool force)
++{
++	int ret = irq_do_set_affinity(data, dest, force);
++
++	/*
++	 * In case that the underlying vector management is busy and the
++	 * architecture supports the generic pending mechanism then utilize
++	 * this to avoid returning an error to user space.
++	 */
++	if (ret == -EBUSY && !force)
++		ret = irq_set_affinity_pending(data, dest);
++	return ret;
++}
++
+ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
+ 			    bool force)
+ {
+@@ -215,8 +248,8 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
+ 	if (!chip || !chip->irq_set_affinity)
+ 		return -EINVAL;
+ 
+-	if (irq_can_move_pcntxt(data)) {
+-		ret = irq_do_set_affinity(data, mask, force);
++	if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
++		ret = irq_try_set_affinity(data, mask, force);
+ 	} else {
+ 		irqd_set_move_pending(data);
+ 		irq_copy_pending(desc, mask);
+diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
+index 86ae0eb80b53..def48589ea48 100644
+--- a/kernel/irq/migration.c
++++ b/kernel/irq/migration.c
+@@ -38,17 +38,18 @@ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
+ void irq_move_masked_irq(struct irq_data *idata)
+ {
+ 	struct irq_desc *desc = irq_data_to_desc(idata);
+-	struct irq_chip *chip = desc->irq_data.chip;
++	struct irq_data *data = &desc->irq_data;
++	struct irq_chip *chip = data->chip;
+ 
+-	if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
++	if (likely(!irqd_is_setaffinity_pending(data)))
+ 		return;
+ 
+-	irqd_clr_move_pending(&desc->irq_data);
++	irqd_clr_move_pending(data);
+ 
+ 	/*
+ 	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
+ 	 */
+-	if (irqd_is_per_cpu(&desc->irq_data)) {
++	if (irqd_is_per_cpu(data)) {
+ 		WARN_ON(1);
+ 		return;
+ 	}
+@@ -73,13 +74,24 @@ void irq_move_masked_irq(struct irq_data *idata)
+ 	 * For correct operation this depends on the caller
+ 	 * masking the irqs.
+ 	 */
+-	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
+-		irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
+-
++	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
++		int ret;
++
++		ret = irq_do_set_affinity(data, desc->pending_mask, false);
++		/*
++		 * If the there is a cleanup pending in the underlying
++		 * vector management, reschedule the move for the next
++		 * interrupt. Leave desc->pending_mask intact.
++		 */
++		if (ret == -EBUSY) {
++			irqd_set_move_pending(data);
++			return;
++		}
++	}
+ 	cpumask_clear(desc->pending_mask);
+ }
+ 
+-void irq_move_irq(struct irq_data *idata)
++void __irq_move_irq(struct irq_data *idata)
+ {
+ 	bool masked;
+ 
+@@ -90,9 +102,6 @@ void irq_move_irq(struct irq_data *idata)
+ 	 */
+ 	idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
+ 
+-	if (likely(!irqd_is_setaffinity_pending(idata)))
+-		return;
+-
+ 	if (unlikely(irqd_irq_disabled(idata)))
+ 		return;
+ 
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index be585f545337..90804bd5301a 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -423,6 +423,7 @@ static void wb_exit(struct bdi_writeback *wb)
+  * protected.
+  */
+ static DEFINE_SPINLOCK(cgwb_lock);
++static struct workqueue_struct *cgwb_release_wq;
+ 
+ /**
+  * wb_congested_get_create - get or create a wb_congested
+@@ -533,7 +534,7 @@ static void cgwb_release(struct percpu_ref *refcnt)
+ {
+ 	struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
+ 						refcnt);
+-	schedule_work(&wb->release_work);
++	queue_work(cgwb_release_wq, &wb->release_work);
+ }
+ 
+ static void cgwb_kill(struct bdi_writeback *wb)
+@@ -797,6 +798,21 @@ static void cgwb_bdi_register(struct backing_dev_info *bdi)
+ 	spin_unlock_irq(&cgwb_lock);
+ }
+ 
++static int __init cgwb_init(void)
++{
++	/*
++	 * There can be many concurrent release work items overwhelming
++	 * system_wq.  Put them in a separate wq and limit concurrency.
++	 * There's no point in executing many of these in parallel.
++	 */
++	cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
++	if (!cgwb_release_wq)
++		return -ENOMEM;
++
++	return 0;
++}
++subsys_initcall(cgwb_init);
++
+ #else	/* CONFIG_CGROUP_WRITEBACK */
+ 
+ static int cgwb_bdi_init(struct backing_dev_info *bdi)
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 1741dd23e7c1..bd68b6d1f892 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4043,7 +4043,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ 	 * orientated.
+ 	 */
+ 	if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
+-		ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
+ 		ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+ 					ac->high_zoneidx, ac->nodemask);
+ 	}
+diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
+index 7d20e1f3de28..56197f0d9608 100644
+--- a/net/dsa/tag_trailer.c
++++ b/net/dsa/tag_trailer.c
+@@ -75,7 +75,8 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	if (!skb->dev)
+ 		return NULL;
+ 
+-	pskb_trim_rcsum(skb, skb->len - 4);
++	if (pskb_trim_rcsum(skb, skb->len - 4))
++		return NULL;
+ 
+ 	return skb;
+ }
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index f8ad397e285e..27e87e96defc 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1680,6 +1680,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
+ 			reqsk_put(req);
+ 			goto discard_it;
+ 		}
++		if (tcp_checksum_complete(skb)) {
++			reqsk_put(req);
++			goto csum_error;
++		}
+ 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
+ 			inet_csk_reqsk_queue_drop_and_put(sk, req);
+ 			goto lookup;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index b6ba51536b37..b20b21577b27 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2710,7 +2710,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
+ 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
+ 		bucket, src, srcp, dest, destp, sp->sk_state,
+ 		sk_wmem_alloc_get(sp),
+-		sk_rmem_alloc_get(sp),
++		udp_rqueue_get(sp),
+ 		0, 0L, 0,
+ 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
+ 		0, sock_i_ino(sp),
+diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
+index d0390d844ac8..d9ad986c7b2c 100644
+--- a/net/ipv4/udp_diag.c
++++ b/net/ipv4/udp_diag.c
+@@ -163,7 +163,7 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+ static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+ 		void *info)
+ {
+-	r->idiag_rqueue = sk_rmem_alloc_get(sk);
++	r->idiag_rqueue = udp_rqueue_get(sk);
+ 	r->idiag_wqueue = sk_wmem_alloc_get(sk);
+ }
+ 
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index a9f7eca0b6a3..6840abb79a69 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -1025,8 +1025,8 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
+ }
+ EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
+ 
+-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+-			     __u16 srcp, __u16 destp, int bucket)
++void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
++			       __u16 srcp, __u16 destp, int rqueue, int bucket)
+ {
+ 	const struct in6_addr *dest, *src;
+ 
+@@ -1042,7 +1042,7 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+ 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
+ 		   sp->sk_state,
+ 		   sk_wmem_alloc_get(sp),
+-		   sk_rmem_alloc_get(sp),
++		   rqueue,
+ 		   0, 0L, 0,
+ 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
+ 		   0,
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 8f749742f11f..0cb580cd5f00 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2077,9 +2077,6 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
+ 	const struct in6_addr *daddr, *saddr;
+ 	struct rt6_info *rt6 = (struct rt6_info *)dst;
+ 
+-	if (rt6->rt6i_flags & RTF_LOCAL)
+-		return;
+-
+ 	if (dst_metric_locked(dst, RTAX_MTU))
+ 		return;
+ 
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 412139f4eccd..f7a9bd50b0ac 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1459,6 +1459,10 @@ static int tcp_v6_rcv(struct sk_buff *skb)
+ 			reqsk_put(req);
+ 			goto discard_it;
+ 		}
++		if (tcp_checksum_complete(skb)) {
++			reqsk_put(req);
++			goto csum_error;
++		}
+ 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
+ 			inet_csk_reqsk_queue_drop_and_put(sk, req);
+ 			goto lookup;
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 68d589f8d2b2..908476583b30 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1473,7 +1473,8 @@ int udp6_seq_show(struct seq_file *seq, void *v)
+ 		struct inet_sock *inet = inet_sk(v);
+ 		__u16 srcp = ntohs(inet->inet_sport);
+ 		__u16 destp = ntohs(inet->inet_dport);
+-		ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
++		__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
++					  udp_rqueue_get(v), bucket);
+ 	}
+ 	return 0;
+ }
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index c9432a0ccd56..29102f3639fe 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2037,7 +2037,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
+ 		return -EINVAL;
+ 	*len -= sizeof(vnet_hdr);
+ 
+-	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
++	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
+ 		return -EINVAL;
+ 
+ 	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
+@@ -2304,7 +2304,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	if (do_vnet) {
+ 		if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
+ 					    sizeof(struct virtio_net_hdr),
+-					    vio_le(), true)) {
++					    vio_le(), true, 0)) {
+ 			spin_lock(&sk->sk_receive_queue.lock);
+ 			goto drop_n_account;
+ 		}
+diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
+index b1f38063ada0..e5685b3debda 100644
+--- a/net/sched/act_simple.c
++++ b/net/sched/act_simple.c
+@@ -53,22 +53,22 @@ static void tcf_simp_release(struct tc_action *a)
+ 	kfree(d->tcfd_defdata);
+ }
+ 
+-static int alloc_defdata(struct tcf_defact *d, char *defdata)
++static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata)
+ {
+ 	d->tcfd_defdata = kzalloc(SIMP_MAX_DATA, GFP_KERNEL);
+ 	if (unlikely(!d->tcfd_defdata))
+ 		return -ENOMEM;
+-	strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
++	nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
+ 	return 0;
+ }
+ 
+-static void reset_policy(struct tcf_defact *d, char *defdata,
++static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata,
+ 			 struct tc_defact *p)
+ {
+ 	spin_lock_bh(&d->tcf_lock);
+ 	d->tcf_action = p->action;
+ 	memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
+-	strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
++	nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
+ 	spin_unlock_bh(&d->tcf_lock);
+ }
+ 
+@@ -87,7 +87,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
+ 	struct tcf_defact *d;
+ 	bool exists = false;
+ 	int ret = 0, err;
+-	char *defdata;
+ 
+ 	if (nla == NULL)
+ 		return -EINVAL;
+@@ -110,8 +109,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
+ 		return -EINVAL;
+ 	}
+ 
+-	defdata = nla_data(tb[TCA_DEF_DATA]);
+-
+ 	if (!exists) {
+ 		ret = tcf_idr_create(tn, parm->index, est, a,
+ 				     &act_simp_ops, bind, false);
+@@ -119,7 +116,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
+ 			return ret;
+ 
+ 		d = to_defact(*a);
+-		ret = alloc_defdata(d, defdata);
++		ret = alloc_defdata(d, tb[TCA_DEF_DATA]);
+ 		if (ret < 0) {
+ 			tcf_idr_release(*a, bind);
+ 			return ret;
+@@ -133,7 +130,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
+ 		if (!ovr)
+ 			return -EEXIST;
+ 
+-		reset_policy(d, defdata, parm);
++		reset_policy(d, tb[TCA_DEF_DATA], parm);
+ 	}
+ 
+ 	if (ret == ACT_P_CREATED)
+diff --git a/net/socket.c b/net/socket.c
+index 08847c3b8c39..26b1fab01daf 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -538,7 +538,10 @@ static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
+ 	if (!err && (iattr->ia_valid & ATTR_UID)) {
+ 		struct socket *sock = SOCKET_I(d_inode(dentry));
+ 
+-		sock->sk->sk_uid = iattr->ia_uid;
++		if (sock->sk)
++			sock->sk->sk_uid = iattr->ia_uid;
++		else
++			err = -ENOENT;
+ 	}
+ 
+ 	return err;
+@@ -587,12 +590,16 @@ EXPORT_SYMBOL(sock_alloc);
+  *	an inode not a file.
+  */
+ 
+-void sock_release(struct socket *sock)
++static void __sock_release(struct socket *sock, struct inode *inode)
+ {
+ 	if (sock->ops) {
+ 		struct module *owner = sock->ops->owner;
+ 
++		if (inode)
++			inode_lock(inode);
+ 		sock->ops->release(sock);
++		if (inode)
++			inode_unlock(inode);
+ 		sock->ops = NULL;
+ 		module_put(owner);
+ 	}
+@@ -606,6 +613,11 @@ void sock_release(struct socket *sock)
+ 	}
+ 	sock->file = NULL;
+ }
++
++void sock_release(struct socket *sock)
++{
++	__sock_release(sock, NULL);
++}
+ EXPORT_SYMBOL(sock_release);
+ 
+ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
+@@ -1146,7 +1158,7 @@ static int sock_mmap(struct file *file, struct vm_area_struct *vma)
+ 
+ static int sock_close(struct inode *inode, struct file *filp)
+ {
+-	sock_release(SOCKET_I(inode));
++	__sock_release(SOCKET_I(inode), inode);
+ 	return 0;
+ }
+ 
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index f26376e954ae..cb0e7d97cdda 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -195,18 +195,12 @@ static void tls_free_both_sg(struct sock *sk)
+ }
+ 
+ static int tls_do_encryption(struct tls_context *tls_ctx,
+-			     struct tls_sw_context *ctx, size_t data_len,
+-			     gfp_t flags)
++			     struct tls_sw_context *ctx,
++			     struct aead_request *aead_req,
++			     size_t data_len)
+ {
+-	unsigned int req_size = sizeof(struct aead_request) +
+-		crypto_aead_reqsize(ctx->aead_send);
+-	struct aead_request *aead_req;
+ 	int rc;
+ 
+-	aead_req = kzalloc(req_size, flags);
+-	if (!aead_req)
+-		return -ENOMEM;
+-
+ 	ctx->sg_encrypted_data[0].offset += tls_ctx->prepend_size;
+ 	ctx->sg_encrypted_data[0].length -= tls_ctx->prepend_size;
+ 
+@@ -223,7 +217,6 @@ static int tls_do_encryption(struct tls_context *tls_ctx,
+ 	ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size;
+ 	ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size;
+ 
+-	kfree(aead_req);
+ 	return rc;
+ }
+ 
+@@ -232,8 +225,14 @@ static int tls_push_record(struct sock *sk, int flags,
+ {
+ 	struct tls_context *tls_ctx = tls_get_ctx(sk);
+ 	struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
++	struct aead_request *req;
+ 	int rc;
+ 
++	req = kzalloc(sizeof(struct aead_request) +
++		      crypto_aead_reqsize(ctx->aead_send), sk->sk_allocation);
++	if (!req)
++		return -ENOMEM;
++
+ 	sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
+ 	sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
+ 
+@@ -249,15 +248,14 @@ static int tls_push_record(struct sock *sk, int flags,
+ 	tls_ctx->pending_open_record_frags = 0;
+ 	set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
+ 
+-	rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size,
+-			       sk->sk_allocation);
++	rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size);
+ 	if (rc < 0) {
+ 		/* If we are called from write_space and
+ 		 * we fail, we need to set this SOCK_NOSPACE
+ 		 * to trigger another write_space in the future.
+ 		 */
+ 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+-		return rc;
++		goto out_req;
+ 	}
+ 
+ 	free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
+@@ -272,6 +270,8 @@ static int tls_push_record(struct sock *sk, int flags,
+ 		tls_err_abort(sk);
+ 
+ 	tls_advance_record_sn(sk, tls_ctx);
++out_req:
++	kfree(req);
+ 	return rc;
+ }
+ 
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index d1eb14842340..a12e594d4e3b 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -748,8 +748,10 @@ int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
+ 		return err;
+ 	strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
+ 	apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
+-	if (apcm == NULL)
++	if (apcm == NULL) {
++		snd_device_free(chip->card, pcm);
+ 		return -ENOMEM;
++	}
+ 	apcm->chip = chip;
+ 	apcm->pcm = pcm;
+ 	apcm->codec = codec;
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 5b4dbcec6de8..ba9a7e552183 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -959,12 +959,15 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
++	SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
++	SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
+ 	SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
+ 	SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
+ 	SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ 	SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
+ 	SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
+ 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 01a6643fc7d4..06c2c80a045b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6580,7 +6580,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ 	SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ 	SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+-	SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+@@ -6752,6 +6751,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x1b, 0x01111010},
+ 		{0x1e, 0x01451130},
+ 		{0x21, 0x02211020}),
++	SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
++		{0x12, 0x90a60140},
++		{0x14, 0x90170110},
++		{0x19, 0x02a11030},
++		{0x21, 0x02211020}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		{0x12, 0x90a60140},
+ 		{0x14, 0x90170110},
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 754e632a27bd..02b7ad1946db 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3277,6 +3277,10 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ 	}
+ },
+ 
++/* disabled due to regression for other devices;
++ * see https://bugzilla.kernel.org/show_bug.cgi?id=199905
++ */
++#if 0
+ {
+ 	/*
+ 	 * Nura's first gen headphones use Cambridge Silicon Radio's vendor
+@@ -3324,6 +3328,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ 		}
+ 	}
+ },
++#endif /* disabled */
+ 
+ {
+ 	/*


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-06-20 19:44 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-06-20 19:44 UTC (permalink / raw
  To: gentoo-commits

commit:     39d0fad6ec600bb1c3d7cb58750a0f1f96b7bf7b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun 20 19:44:07 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun 20 19:44:07 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=39d0fad6

Linux patch 4.16.17

 0000_README              |     4 +
 1016_linux-4.16.17.patch | 10919 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 10923 insertions(+)

diff --git a/0000_README b/0000_README
index 83e0c3b..c683722 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,10 @@ Patch:  1015_linux-4.16.16.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.16
 
+Patch:  1016_linux-4.16.17.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.17
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1016_linux-4.16.17.patch b/1016_linux-4.16.17.patch
new file mode 100644
index 0000000..c408309
--- /dev/null
+++ b/1016_linux-4.16.17.patch
@@ -0,0 +1,10919 @@
+diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.txt b/Documentation/devicetree/bindings/display/panel/panel-common.txt
+index 557fa765adcb..5d2519af4bb5 100644
+--- a/Documentation/devicetree/bindings/display/panel/panel-common.txt
++++ b/Documentation/devicetree/bindings/display/panel/panel-common.txt
+@@ -38,7 +38,7 @@ Display Timings
+   require specific display timings. The panel-timing subnode expresses those
+   timings as specified in the timing subnode section of the display timing
+   bindings defined in
+-  Documentation/devicetree/bindings/display/display-timing.txt.
++  Documentation/devicetree/bindings/display/panel/display-timing.txt.
+ 
+ 
+ Connectivity
+diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+index 891db41e9420..98d7898fcd78 100644
+--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
++++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+@@ -25,6 +25,7 @@ Required Properties:
+ 		- "renesas,dmac-r8a7794" (R-Car E2)
+ 		- "renesas,dmac-r8a7795" (R-Car H3)
+ 		- "renesas,dmac-r8a7796" (R-Car M3-W)
++		- "renesas,dmac-r8a77965" (R-Car M3-N)
+ 		- "renesas,dmac-r8a77970" (R-Car V3M)
+ 
+ - reg: base address and length of the registers block for the DMAC
+diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
+index b4dc455eb155..d159807c2155 100644
+--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
++++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
+@@ -17,6 +17,7 @@ Required properties:
+ 
+       - "renesas,etheravb-r8a7795" for the R8A7795 SoC.
+       - "renesas,etheravb-r8a7796" for the R8A7796 SoC.
++      - "renesas,etheravb-r8a77965" for the R8A77965 SoC.
+       - "renesas,etheravb-r8a77970" for the R8A77970 SoC.
+       - "renesas,etheravb-r8a77980" for the R8A77980 SoC.
+       - "renesas,etheravb-r8a77995" for the R8A77995 SoC.
+diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+index 09789fdfa749..4dc4c354c72b 100644
+--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
++++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+@@ -55,9 +55,9 @@ pins it needs, and how they should be configured, with regard to muxer
+ configuration, drive strength and pullups. If one of these options is
+ not set, its actual value will be unspecified.
+ 
+-This driver supports the generic pin multiplexing and configuration
+-bindings. For details on each properties, you can refer to
+-./pinctrl-bindings.txt.
++Allwinner A1X Pin Controller supports the generic pin multiplexing and
++configuration bindings. For details on each properties, you can refer to
++ ./pinctrl-bindings.txt.
+ 
+ Required sub-node properties:
+   - pins
+diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
+index 8ff65fa632fd..c06c045126fc 100644
+--- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
++++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
+@@ -21,7 +21,7 @@ Required properties:
+ - interrupts : identifier to the device interrupt
+ - clocks : a list of phandle + clock-specifier pairs, one for each
+ 	   entry in clock names.
+-- clocks-names :
++- clock-names :
+    * "xtal" for external xtal clock identifier
+    * "pclk" for the bus core clock, either the clk81 clock or the gate clock
+    * "baud" for the source of the baudrate generator, can be either the xtal
+diff --git a/Documentation/devicetree/bindings/serial/mvebu-uart.txt b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
+index 2ae2fee7e023..b7e0e32b9ac6 100644
+--- a/Documentation/devicetree/bindings/serial/mvebu-uart.txt
++++ b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
+@@ -24,7 +24,7 @@ Required properties:
+     - Must contain two elements for the extended variant of the IP
+       (marvell,armada-3700-uart-ext): "uart-tx" and "uart-rx",
+       respectively the UART TX interrupt and the UART RX interrupt. A
+-      corresponding interrupts-names property must be defined.
++      corresponding interrupt-names property must be defined.
+     - For backward compatibility reasons, a single element interrupts
+       property is also supported for the standard variant of the IP,
+       containing only the UART sum interrupt. This form is deprecated
+diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+index cf504d0380ae..88f947c47adc 100644
+--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
++++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+@@ -41,6 +41,8 @@ Required properties:
+     - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART.
+     - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART.
+     - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART.
++    - "renesas,scif-r8a77965" for R8A77965 (R-Car M3-N) SCIF compatible UART.
++    - "renesas,hscif-r8a77965" for R8A77965 (R-Car M3-N) HSCIF compatible UART.
+     - "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART.
+     - "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART.
+     - "renesas,scif-r8a77995" for R8A77995 (R-Car D3) SCIF compatible UART.
+diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
+index ae850d6c0ad3..8ff7eadc8bef 100644
+--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
++++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
+@@ -181,6 +181,7 @@ karo	Ka-Ro electronics GmbH
+ keithkoep	Keith & Koep GmbH
+ keymile	Keymile GmbH
+ khadas	Khadas
++kiebackpeter    Kieback & Peter GmbH
+ kinetic Kinetic Technologies
+ kingnovel	Kingnovel Technology Co., Ltd.
+ kosagi	Sutajio Ko-Usagi PTE Ltd.
+diff --git a/Makefile b/Makefile
+index 55554f392115..02a4f7f8c613 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 16
++SUBLEVEL = 17
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
+index 45a6b9b7af2a..6a4e7341ecd3 100644
+--- a/arch/arm/boot/compressed/Makefile
++++ b/arch/arm/boot/compressed/Makefile
+@@ -117,11 +117,9 @@ ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
+ asflags-y := -DZIMAGE
+ 
+ # Supply kernel BSS size to the decompressor via a linker symbol.
+-KBSS_SZ = $(shell $(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \
+-		perl -e 'while (<>) { \
+-			$$bss_start=hex($$1) if /^([[:xdigit:]]+) B __bss_start$$/; \
+-			$$bss_end=hex($$1) if /^([[:xdigit:]]+) B __bss_stop$$/; \
+-		}; printf "%d\n", $$bss_end - $$bss_start;')
++KBSS_SZ = $(shell echo $$(($$($(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \
++		sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
++		       -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
+ LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
+ # Supply ZRELADDR to the decompressor via a linker symbol.
+ ifneq ($(CONFIG_AUTO_ZRELADDR),y)
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index 45c8823c3750..517e0e18f0b8 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -29,19 +29,19 @@
+ #if defined(CONFIG_DEBUG_ICEDCC)
+ 
+ #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
+-		.macro	loadsp, rb, tmp
++		.macro	loadsp, rb, tmp1, tmp2
+ 		.endm
+ 		.macro	writeb, ch, rb
+ 		mcr	p14, 0, \ch, c0, c5, 0
+ 		.endm
+ #elif defined(CONFIG_CPU_XSCALE)
+-		.macro	loadsp, rb, tmp
++		.macro	loadsp, rb, tmp1, tmp2
+ 		.endm
+ 		.macro	writeb, ch, rb
+ 		mcr	p14, 0, \ch, c8, c0, 0
+ 		.endm
+ #else
+-		.macro	loadsp, rb, tmp
++		.macro	loadsp, rb, tmp1, tmp2
+ 		.endm
+ 		.macro	writeb, ch, rb
+ 		mcr	p14, 0, \ch, c1, c0, 0
+@@ -57,7 +57,7 @@
+ 		.endm
+ 
+ #if defined(CONFIG_ARCH_SA1100)
+-		.macro	loadsp, rb, tmp
++		.macro	loadsp, rb, tmp1, tmp2
+ 		mov	\rb, #0x80000000	@ physical base address
+ #ifdef CONFIG_DEBUG_LL_SER3
+ 		add	\rb, \rb, #0x00050000	@ Ser3
+@@ -66,8 +66,8 @@
+ #endif
+ 		.endm
+ #else
+-		.macro	loadsp,	rb, tmp
+-		addruart \rb, \tmp
++		.macro	loadsp,	rb, tmp1, tmp2
++		addruart \rb, \tmp1, \tmp2
+ 		.endm
+ #endif
+ #endif
+@@ -561,8 +561,6 @@ not_relocated:	mov	r0, #0
+ 		bl	decompress_kernel
+ 		bl	cache_clean_flush
+ 		bl	cache_off
+-		mov	r1, r7			@ restore architecture number
+-		mov	r2, r8			@ restore atags pointer
+ 
+ #ifdef CONFIG_ARM_VIRT_EXT
+ 		mrs	r0, spsr		@ Get saved CPU boot mode
+@@ -1297,7 +1295,7 @@ phex:		adr	r3, phexbuf
+ 		b	1b
+ 
+ @ puts corrupts {r0, r1, r2, r3}
+-puts:		loadsp	r3, r1
++puts:		loadsp	r3, r2, r1
+ 1:		ldrb	r2, [r0], #1
+ 		teq	r2, #0
+ 		moveq	pc, lr
+@@ -1314,8 +1312,8 @@ puts:		loadsp	r3, r1
+ @ putc corrupts {r0, r1, r2, r3}
+ putc:
+ 		mov	r2, r0
++		loadsp	r3, r1, r0
+ 		mov	r0, #0
+-		loadsp	r3, r1
+ 		b	2b
+ 
+ @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
+@@ -1365,6 +1363,8 @@ __hyp_reentry_vectors:
+ 
+ __enter_kernel:
+ 		mov	r0, #0			@ must be 0
++		mov	r1, r7			@ restore architecture number
++		mov	r2, r8			@ restore atags pointer
+  ARM(		mov	pc, r4		)	@ call kernel
+  M_CLASS(	add	r4, r4, #1	)	@ enter in Thumb mode for M class
+  THUMB(		bx	r4		)	@ entry point is always ARM for A/R classes
+diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
+index 699fdf94d139..9fe4f5a6379e 100644
+--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
++++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
+@@ -69,7 +69,7 @@
+ 		timer@20200 {
+ 			compatible = "arm,cortex-a9-global-timer";
+ 			reg = <0x20200 0x100>;
+-			interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
+ 			clocks = <&periph_clk>;
+ 		};
+ 
+diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
+index c66cf7895363..3cf97f4dac24 100644
+--- a/arch/arm/boot/dts/da850.dtsi
++++ b/arch/arm/boot/dts/da850.dtsi
+@@ -46,8 +46,6 @@
+ 		pmx_core: pinmux@14120 {
+ 			compatible = "pinctrl-single";
+ 			reg = <0x14120 0x50>;
+-			#address-cells = <1>;
+-			#size-cells = <0>;
+ 			#pinctrl-cells = <2>;
+ 			pinctrl-single,bit-per-mux;
+ 			pinctrl-single,register-width = <32>;
+diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts
+index d6657b3bae84..85d7b5148b0a 100644
+--- a/arch/arm/boot/dts/dm8148-evm.dts
++++ b/arch/arm/boot/dts/dm8148-evm.dts
+@@ -10,7 +10,7 @@
+ 
+ / {
+ 	model = "DM8148 EVM";
+-	compatible = "ti,dm8148-evm", "ti,dm8148";
++	compatible = "ti,dm8148-evm", "ti,dm8148", "ti,dm814";
+ 
+ 	memory@80000000 {
+ 		device_type = "memory";
+diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts
+index 63883b3479f9..6418f9cdbe83 100644
+--- a/arch/arm/boot/dts/dm8148-t410.dts
++++ b/arch/arm/boot/dts/dm8148-t410.dts
+@@ -9,7 +9,7 @@
+ 
+ / {
+ 	model = "HP t410 Smart Zero Client";
+-	compatible = "hp,t410", "ti,dm8148";
++	compatible = "hp,t410", "ti,dm8148", "ti,dm814";
+ 
+ 	memory@80000000 {
+ 		device_type = "memory";
+diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts
+index c72a2132aa82..1d030d567307 100644
+--- a/arch/arm/boot/dts/dm8168-evm.dts
++++ b/arch/arm/boot/dts/dm8168-evm.dts
+@@ -10,7 +10,7 @@
+ 
+ / {
+ 	model = "DM8168 EVM";
+-	compatible = "ti,dm8168-evm", "ti,dm8168";
++	compatible = "ti,dm8168-evm", "ti,dm8168", "ti,dm816";
+ 
+ 	memory@80000000 {
+ 		device_type = "memory";
+diff --git a/arch/arm/boot/dts/dra62x-j5eco-evm.dts b/arch/arm/boot/dts/dra62x-j5eco-evm.dts
+index fee0547f7302..31b824ad5d29 100644
+--- a/arch/arm/boot/dts/dra62x-j5eco-evm.dts
++++ b/arch/arm/boot/dts/dra62x-j5eco-evm.dts
+@@ -10,7 +10,7 @@
+ 
+ / {
+ 	model = "DRA62x J5 Eco EVM";
+-	compatible = "ti,dra62x-j5eco-evm", "ti,dra62x", "ti,dm8148";
++	compatible = "ti,dra62x-j5eco-evm", "ti,dra62x", "ti,dm8148", "ti,dm814";
+ 
+ 	memory@80000000 {
+ 		device_type = "memory";
+diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts
+index 5306b78de0ca..380afcafeb16 100644
+--- a/arch/arm/boot/dts/imx51-zii-rdu1.dts
++++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts
+@@ -518,7 +518,7 @@
+ 	};
+ 
+ 	touchscreen@20 {
+-		compatible = "syna,rmi4_i2c";
++		compatible = "syna,rmi4-i2c";
+ 		reg = <0x20>;
+ 		pinctrl-names = "default";
+ 		pinctrl-0 = <&pinctrl_ts>;
+@@ -536,8 +536,8 @@
+ 
+ 		rmi4-f11@11 {
+ 			reg = <0x11>;
+-			touch-inverted-y;
+-			touch-swapped-x-y;
++			touchscreen-inverted-y;
++			touchscreen-swapped-x-y;
+ 			syna,sensor-type = <1>;
+ 		};
+ 	};
+diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
+index a30ee9fcb3ae..4fabe4e9283f 100644
+--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
++++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
+@@ -26,7 +26,7 @@
+ 		gpio = <&gpio1 3 0>;   /* gpio_3 */
+ 		startup-delay-us = <70000>;
+ 		enable-active-high;
+-		vin-supply = <&vmmc2>;
++		vin-supply = <&vaux3>;
+ 	};
+ 
+ 	/* HS USB Host PHY on PORT 1 */
+@@ -82,6 +82,7 @@
+ 		twl_audio: audio {
+ 			compatible = "ti,twl4030-audio";
+ 			codec {
++				ti,hs_extmute_gpio = <&gpio2 25 GPIO_ACTIVE_HIGH>;
+ 			};
+ 		};
+ 	};
+@@ -195,6 +196,7 @@
+ 		pinctrl-single,pins = <
+ 			OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0)        /* i2c1_scl.i2c1_scl */
+ 			OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0)        /* i2c1_sda.i2c1_sda */
++			OMAP3_CORE1_IOPAD(0x20ba, PIN_OUTPUT | MUX_MODE4)        /* gpmc_ncs6.gpio_57 */
+ 		>;
+ 	};
+ };
+@@ -209,7 +211,7 @@
+ 	};
+ 	wl127x_gpio: pinmux_wl127x_gpio_pin {
+ 		pinctrl-single,pins = <
+-			OMAP3_WKUP_IOPAD(0x2a0c, PIN_INPUT | MUX_MODE4)		/* sys_boot0.gpio_2 */
++			OMAP3_WKUP_IOPAD(0x2a0a, PIN_INPUT | MUX_MODE4)		/* sys_boot0.gpio_2 */
+ 			OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4)	/* sys_boot1.gpio_3 */
+ 		>;
+ 	};
+@@ -244,6 +246,11 @@
+ #include "twl4030.dtsi"
+ #include "twl4030_omap3.dtsi"
+ 
++&vaux3 {
++	regulator-min-microvolt = <2800000>;
++	regulator-max-microvolt = <2800000>;
++};
++
+ &twl {
+ 	twl_power: power {
+ 		compatible = "ti,twl4030-power-idle-osc-off", "ti,twl4030-power-idle";
+diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
+index 475904894b86..e554b6e039f3 100644
+--- a/arch/arm/boot/dts/omap4.dtsi
++++ b/arch/arm/boot/dts/omap4.dtsi
+@@ -163,10 +163,10 @@
+ 
+ 			cm2: cm2@8000 {
+ 				compatible = "ti,omap4-cm2", "simple-bus";
+-				reg = <0x8000 0x3000>;
++				reg = <0x8000 0x2000>;
+ 				#address-cells = <1>;
+ 				#size-cells = <1>;
+-				ranges = <0 0x8000 0x3000>;
++				ranges = <0 0x8000 0x2000>;
+ 
+ 				cm2_clocks: clocks {
+ 					#address-cells = <1>;
+@@ -250,11 +250,11 @@
+ 
+ 				prm: prm@6000 {
+ 					compatible = "ti,omap4-prm";
+-					reg = <0x6000 0x3000>;
++					reg = <0x6000 0x2000>;
+ 					interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ 					#address-cells = <1>;
+ 					#size-cells = <1>;
+-					ranges = <0 0x6000 0x3000>;
++					ranges = <0 0x6000 0x2000>;
+ 
+ 					prm_clocks: clocks {
+ 						#address-cells = <1>;
+diff --git a/arch/arm/include/uapi/asm/siginfo.h b/arch/arm/include/uapi/asm/siginfo.h
+deleted file mode 100644
+index d0513880be21..000000000000
+--- a/arch/arm/include/uapi/asm/siginfo.h
++++ /dev/null
+@@ -1,13 +0,0 @@
+-#ifndef __ASM_SIGINFO_H
+-#define __ASM_SIGINFO_H
+-
+-#include <asm-generic/siginfo.h>
+-
+-/*
+- * SIGFPE si_codes
+- */
+-#ifdef __KERNEL__
+-#define FPE_FIXME	0	/* Broken dup of SI_USER */
+-#endif /* __KERNEL__ */
+-
+-#endif
+diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
+index 6b38d7a634c1..c15318431986 100644
+--- a/arch/arm/kernel/machine_kexec.c
++++ b/arch/arm/kernel/machine_kexec.c
+@@ -95,6 +95,27 @@ void machine_crash_nonpanic_core(void *unused)
+ 		cpu_relax();
+ }
+ 
++void crash_smp_send_stop(void)
++{
++	static int cpus_stopped;
++	unsigned long msecs;
++
++	if (cpus_stopped)
++		return;
++
++	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
++	smp_call_function(machine_crash_nonpanic_core, NULL, false);
++	msecs = 1000; /* Wait at most a second for the other cpus to stop */
++	while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
++		mdelay(1);
++		msecs--;
++	}
++	if (atomic_read(&waiting_for_crash_ipi) > 0)
++		pr_warn("Non-crashing CPUs did not react to IPI\n");
++
++	cpus_stopped = 1;
++}
++
+ static void machine_kexec_mask_interrupts(void)
+ {
+ 	unsigned int i;
+@@ -120,19 +141,8 @@ static void machine_kexec_mask_interrupts(void)
+ 
+ void machine_crash_shutdown(struct pt_regs *regs)
+ {
+-	unsigned long msecs;
+-
+ 	local_irq_disable();
+-
+-	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+-	smp_call_function(machine_crash_nonpanic_core, NULL, false);
+-	msecs = 1000; /* Wait at most a second for the other cpus to stop */
+-	while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
+-		mdelay(1);
+-		msecs--;
+-	}
+-	if (atomic_read(&waiting_for_crash_ipi) > 0)
+-		pr_warn("Non-crashing CPUs did not react to IPI\n");
++	crash_smp_send_stop();
+ 
+ 	crash_save_cpu(regs, smp_processor_id());
+ 	machine_kexec_mask_interrupts();
+diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
+index f673cd7a6766..fb7c44cdadcb 100644
+--- a/arch/arm/mach-davinci/board-da830-evm.c
++++ b/arch/arm/mach-davinci/board-da830-evm.c
+@@ -205,12 +205,17 @@ static const short da830_evm_mmc_sd_pins[] = {
+ 	-1
+ };
+ 
++#define DA830_MMCSD_WP_PIN		GPIO_TO_PIN(2, 1)
++#define DA830_MMCSD_CD_PIN		GPIO_TO_PIN(2, 2)
++
+ static struct gpiod_lookup_table mmc_gpios_table = {
+ 	.dev_id = "da830-mmc.0",
+ 	.table = {
+ 		/* gpio chip 1 contains gpio range 32-63 */
+-		GPIO_LOOKUP("davinci_gpio.1", 2, "cd", GPIO_ACTIVE_LOW),
+-		GPIO_LOOKUP("davinci_gpio.1", 1, "wp", GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_CD_PIN, "cd",
++			    GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_WP_PIN, "wp",
++			    GPIO_ACTIVE_LOW),
+ 	},
+ };
+ 
+diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
+index d898a94f6eae..631363293887 100644
+--- a/arch/arm/mach-davinci/board-da850-evm.c
++++ b/arch/arm/mach-davinci/board-da850-evm.c
+@@ -763,12 +763,17 @@ static const short da850_evm_mcasp_pins[] __initconst = {
+ 	-1
+ };
+ 
++#define DA850_MMCSD_CD_PIN		GPIO_TO_PIN(4, 0)
++#define DA850_MMCSD_WP_PIN		GPIO_TO_PIN(4, 1)
++
+ static struct gpiod_lookup_table mmc_gpios_table = {
+ 	.dev_id = "da830-mmc.0",
+ 	.table = {
+ 		/* gpio chip 2 contains gpio range 64-95 */
+-		GPIO_LOOKUP("davinci_gpio.2", 0, "cd", GPIO_ACTIVE_LOW),
+-		GPIO_LOOKUP("davinci_gpio.2", 1, "wp", GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd",
++			    GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp",
++			    GPIO_ACTIVE_LOW),
+ 	},
+ };
+ 
+diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
+index d6b11907380c..9aedec083dbf 100644
+--- a/arch/arm/mach-davinci/board-dm355-evm.c
++++ b/arch/arm/mach-davinci/board-dm355-evm.c
+@@ -19,6 +19,7 @@
+ #include <linux/gpio.h>
+ #include <linux/gpio/machine.h>
+ #include <linux/clk.h>
++#include <linux/dm9000.h>
+ #include <linux/videodev2.h>
+ #include <media/i2c/tvp514x.h>
+ #include <linux/spi/spi.h>
+@@ -109,12 +110,15 @@ static struct platform_device davinci_nand_device = {
+ 	},
+ };
+ 
++#define DM355_I2C_SDA_PIN	GPIO_TO_PIN(0, 15)
++#define DM355_I2C_SCL_PIN	GPIO_TO_PIN(0, 14)
++
+ static struct gpiod_lookup_table i2c_recovery_gpiod_table = {
+-	.dev_id = "i2c_davinci",
++	.dev_id = "i2c_davinci.1",
+ 	.table = {
+-		GPIO_LOOKUP("davinci_gpio", 15, "sda",
++		GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SDA_PIN, "sda",
+ 			    GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+-		GPIO_LOOKUP("davinci_gpio", 14, "scl",
++		GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SCL_PIN, "scl",
+ 			    GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ 	},
+ };
+@@ -179,11 +183,16 @@ static struct resource dm355evm_dm9000_rsrc[] = {
+ 	},
+ };
+ 
++static struct dm9000_plat_data dm335evm_dm9000_platdata;
++
+ static struct platform_device dm355evm_dm9000 = {
+ 	.name		= "dm9000",
+ 	.id		= -1,
+ 	.resource	= dm355evm_dm9000_rsrc,
+ 	.num_resources	= ARRAY_SIZE(dm355evm_dm9000_rsrc),
++	.dev		= {
++		.platform_data = &dm335evm_dm9000_platdata,
++	},
+ };
+ 
+ static struct tvp514x_platform_data tvp5146_pdata = {
+diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
+index 85e6fb33b1ee..50b246e315d1 100644
+--- a/arch/arm/mach-davinci/board-dm644x-evm.c
++++ b/arch/arm/mach-davinci/board-dm644x-evm.c
+@@ -17,6 +17,7 @@
+ #include <linux/i2c.h>
+ #include <linux/platform_data/pcf857x.h>
+ #include <linux/platform_data/at24.h>
++#include <linux/platform_data/gpio-davinci.h>
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/rawnand.h>
+ #include <linux/mtd/partitions.h>
+@@ -596,12 +597,15 @@ static struct i2c_board_info __initdata i2c_info[] =  {
+ 	},
+ };
+ 
++#define DM644X_I2C_SDA_PIN	GPIO_TO_PIN(2, 12)
++#define DM644X_I2C_SCL_PIN	GPIO_TO_PIN(2, 11)
++
+ static struct gpiod_lookup_table i2c_recovery_gpiod_table = {
+-	.dev_id = "i2c_davinci",
++	.dev_id = "i2c_davinci.1",
+ 	.table = {
+-		GPIO_LOOKUP("davinci_gpio", 44, "sda",
++		GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SDA_PIN, "sda",
+ 			    GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+-		GPIO_LOOKUP("davinci_gpio", 43, "scl",
++		GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SCL_PIN, "scl",
+ 			    GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ 	},
+ };
+diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
+index cb0a41e83582..4c458f714101 100644
+--- a/arch/arm/mach-davinci/board-dm646x-evm.c
++++ b/arch/arm/mach-davinci/board-dm646x-evm.c
+@@ -534,11 +534,12 @@ static struct vpif_display_config dm646x_vpif_display_config = {
+ 	.set_clock	= set_vpif_clock,
+ 	.subdevinfo	= dm646x_vpif_subdev,
+ 	.subdev_count	= ARRAY_SIZE(dm646x_vpif_subdev),
++	.i2c_adapter_id = 1,
+ 	.chan_config[0] = {
+ 		.outputs = dm6467_ch0_outputs,
+ 		.output_count = ARRAY_SIZE(dm6467_ch0_outputs),
+ 	},
+-	.card_name	= "DM646x EVM",
++	.card_name	= "DM646x EVM Video Display",
+ };
+ 
+ /**
+@@ -676,6 +677,7 @@ static struct vpif_capture_config dm646x_vpif_capture_cfg = {
+ 	.setup_input_channel_mode = setup_vpif_input_channel_mode,
+ 	.subdev_info = vpif_capture_sdev_info,
+ 	.subdev_count = ARRAY_SIZE(vpif_capture_sdev_info),
++	.i2c_adapter_id = 1,
+ 	.chan_config[0] = {
+ 		.inputs = dm6467_ch0_inputs,
+ 		.input_count = ARRAY_SIZE(dm6467_ch0_inputs),
+@@ -696,6 +698,7 @@ static struct vpif_capture_config dm646x_vpif_capture_cfg = {
+ 			.fid_pol = 0,
+ 		},
+ 	},
++	.card_name = "DM646x EVM Video Capture",
+ };
+ 
+ static void __init evm_init_video(void)
+diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
+index 62eb7d668890..10a027253250 100644
+--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
++++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
+@@ -123,12 +123,16 @@ static const short hawk_mmcsd0_pins[] = {
+ 	-1
+ };
+ 
++#define DA850_HAWK_MMCSD_CD_PIN		GPIO_TO_PIN(3, 12)
++#define DA850_HAWK_MMCSD_WP_PIN		GPIO_TO_PIN(3, 13)
++
+ static struct gpiod_lookup_table mmc_gpios_table = {
+ 	.dev_id = "da830-mmc.0",
+ 	.table = {
+-		/* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/
+-		GPIO_LOOKUP("davinci_gpio.0", 28, "cd", GPIO_ACTIVE_LOW),
+-		GPIO_LOOKUP("davinci_gpio.0", 29, "wp", GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_CD_PIN, "cd",
++			    GPIO_ACTIVE_LOW),
++		GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_WP_PIN, "wp",
++			    GPIO_ACTIVE_LOW),
+ 	},
+ };
+ 
+diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
+index 6fc06a6ad4f8..137227b33397 100644
+--- a/arch/arm/mach-davinci/dm646x.c
++++ b/arch/arm/mach-davinci/dm646x.c
+@@ -495,7 +495,8 @@ static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = {
+ 	[IRQ_DM646X_MCASP0TXINT]        = 7,
+ 	[IRQ_DM646X_MCASP0RXINT]        = 7,
+ 	[IRQ_DM646X_RESERVED_3]         = 7,
+-	[IRQ_DM646X_MCASP1TXINT]        = 7,    /* clockevent */
++	[IRQ_DM646X_MCASP1TXINT]        = 7,
++	[IRQ_TINT0_TINT12]              = 7,    /* clockevent */
+ 	[IRQ_TINT0_TINT34]              = 7,    /* clocksource */
+ 	[IRQ_TINT1_TINT12]              = 7,    /* DSP timer */
+ 	[IRQ_TINT1_TINT34]              = 7,    /* system tick */
+diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c
+index fe57e2692629..abca83d22ff3 100644
+--- a/arch/arm/mach-keystone/pm_domain.c
++++ b/arch/arm/mach-keystone/pm_domain.c
+@@ -29,6 +29,7 @@ static struct dev_pm_domain keystone_pm_domain = {
+ 
+ static struct pm_clk_notifier_block platform_domain_notifier = {
+ 	.pm_domain = &keystone_pm_domain,
++	.con_ids = { NULL },
+ };
+ 
+ static const struct of_device_id of_keystone_table[] = {
+diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
+index 793a24a53c52..d7ca9e2b40d2 100644
+--- a/arch/arm/mach-omap1/ams-delta-fiq.c
++++ b/arch/arm/mach-omap1/ams-delta-fiq.c
+@@ -58,22 +58,24 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
+ 		irq_num = gpio_to_irq(gpio);
+ 		fiq_count = fiq_buffer[FIQ_CNT_INT_00 + gpio];
+ 
+-		while (irq_counter[gpio] < fiq_count) {
+-			if (gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) {
+-				struct irq_data *d = irq_get_irq_data(irq_num);
+-
+-				/*
+-				 * It looks like handle_edge_irq() that
+-				 * OMAP GPIO edge interrupts default to,
+-				 * expects interrupt already unmasked.
+-				 */
+-				if (irq_chip && irq_chip->irq_unmask)
++		if (irq_counter[gpio] < fiq_count &&
++				gpio != AMS_DELTA_GPIO_PIN_KEYBRD_CLK) {
++			struct irq_data *d = irq_get_irq_data(irq_num);
++
++			/*
++			 * handle_simple_irq() that OMAP GPIO edge
++			 * interrupts default to since commit 80ac93c27441
++			 * requires interrupt already acked and unmasked.
++			 */
++			if (irq_chip) {
++				if (irq_chip->irq_ack)
++					irq_chip->irq_ack(d);
++				if (irq_chip->irq_unmask)
+ 					irq_chip->irq_unmask(d);
+ 			}
+-			generic_handle_irq(irq_num);
+-
+-			irq_counter[gpio]++;
+ 		}
++		for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
++			generic_handle_irq(irq_num);
+ 	}
+ 	return IRQ_HANDLED;
+ }
+diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
+index 76eb6ec5f157..1e6a967cd2d5 100644
+--- a/arch/arm/mach-omap2/powerdomain.c
++++ b/arch/arm/mach-omap2/powerdomain.c
+@@ -188,7 +188,7 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)
+ 				       ((prev & OMAP_POWERSTATE_MASK) << 0));
+ 			trace_power_domain_target_rcuidle(pwrdm->name,
+ 							  trace_state,
+-							  smp_processor_id());
++							  raw_smp_processor_id());
+ 		}
+ 		break;
+ 	default:
+@@ -518,7 +518,7 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
+ 	if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) {
+ 		/* Trace the pwrdm desired target state */
+ 		trace_power_domain_target_rcuidle(pwrdm->name, pwrst,
+-						  smp_processor_id());
++						  raw_smp_processor_id());
+ 		/* Program the pwrdm desired target state */
+ 		ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst);
+ 	}
+diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
+index 4c375e11ae95..af4ee2cef2f9 100644
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -257,7 +257,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
+ 
+ 	if (exceptions == VFP_EXCEPTION_ERROR) {
+ 		vfp_panic("unhandled bounce", inst);
+-		vfp_raise_sigfpe(FPE_FIXME, regs);
++		vfp_raise_sigfpe(FPE_FLTINV, regs);
+ 		return;
+ 	}
+ 
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+index aeb6d21a3bec..afc4001689fd 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+@@ -248,3 +248,7 @@
+ 	pinctrl-0 = <&uart_ao_a_pins>;
+ 	pinctrl-names = "default";
+ };
++
++&usb0 {
++	status = "okay";
++};
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+index 9671f1e3c74a..40c674317987 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+@@ -271,3 +271,15 @@
+ 	pinctrl-0 = <&uart_ao_a_pins>;
+ 	pinctrl-names = "default";
+ };
++
++&usb0 {
++	status = "okay";
++};
++
++&usb2_phy0 {
++	/*
++	 * even though the schematics don't show it:
++	 * HDMI_5V is also used as supply for the USB VBUS.
++	 */
++	phy-supply = <&hdmi_5v>;
++};
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
+index 271f14279180..0fdebcc698a6 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
+@@ -251,3 +251,7 @@
+ 	pinctrl-0 = <&uart_ao_a_pins>;
+ 	pinctrl-names = "default";
+ };
++
++&usb0 {
++	status = "okay";
++};
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+index 7005068346a0..26de81a24fd5 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+@@ -185,3 +185,7 @@
+ 	pinctrl-0 = <&uart_ao_a_pins>;
+ 	pinctrl-names = "default";
+ };
++
++&usb0 {
++	status = "okay";
++};
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+index c8514110b9da..7f542992850f 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+@@ -57,6 +57,67 @@
+ 			no-map;
+ 		};
+ 	};
++
++	soc {
++		usb0: usb@c9000000 {
++			status = "disabled";
++			compatible = "amlogic,meson-gxl-dwc3";
++			#address-cells = <2>;
++			#size-cells = <2>;
++			ranges;
++
++			clocks = <&clkc CLKID_USB>;
++			clock-names = "usb_general";
++			resets = <&reset RESET_USB_OTG>;
++			reset-names = "usb_otg";
++
++			dwc3: dwc3@c9000000 {
++				compatible = "snps,dwc3";
++				reg = <0x0 0xc9000000 0x0 0x100000>;
++				interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
++				dr_mode = "host";
++				maximum-speed = "high-speed";
++				snps,dis_u2_susphy_quirk;
++				phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>;
++			};
++		};
++	};
++};
++
++&apb {
++	usb2_phy0: phy@78000 {
++		compatible = "amlogic,meson-gxl-usb2-phy";
++		#phy-cells = <0>;
++		reg = <0x0 0x78000 0x0 0x20>;
++		clocks = <&clkc CLKID_USB>;
++		clock-names = "phy";
++		resets = <&reset RESET_USB_OTG>;
++		reset-names = "phy";
++		status = "okay";
++	};
++
++	usb2_phy1: phy@78020 {
++		compatible = "amlogic,meson-gxl-usb2-phy";
++		#phy-cells = <0>;
++		reg = <0x0 0x78020 0x0 0x20>;
++		clocks = <&clkc CLKID_USB>;
++		clock-names = "phy";
++		resets = <&reset RESET_USB_OTG>;
++		reset-names = "phy";
++		status = "okay";
++	};
++
++	usb3_phy: phy@78080 {
++		compatible = "amlogic,meson-gxl-usb3-phy";
++		#phy-cells = <0>;
++		reg = <0x0 0x78080 0x0 0x20>;
++		interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
++		clocks = <&clkc CLKID_USB>, <&clkc_AO CLKID_AO_CEC_32K>;
++		clock-names = "phy", "peripheral";
++		resets = <&reset RESET_USB_OTG>, <&reset RESET_USB_OTG>;
++		reset-names = "phy", "peripheral";
++		status = "okay";
++	};
+ };
+ 
+ &ethmac {
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+index 1448c3dba08e..572b01ae8de1 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+@@ -413,3 +413,7 @@
+ 	status = "okay";
+ 	vref-supply = <&vddio_ao18>;
+ };
++
++&usb0 {
++	status = "okay";
++};
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
+index 19a798d2ae2f..fc53ed7afc11 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
+@@ -117,6 +117,19 @@
+ 	};
+ };
+ 
++&apb {
++	usb2_phy2: phy@78040 {
++		compatible = "amlogic,meson-gxl-usb2-phy";
++		#phy-cells = <0>;
++		reg = <0x0 0x78040 0x0 0x20>;
++		clocks = <&clkc CLKID_USB>;
++		clock-names = "phy";
++		resets = <&reset RESET_USB_OTG>;
++		reset-names = "phy";
++		status = "okay";
++	};
++};
++
+ &clkc_AO {
+ 	compatible = "amlogic,meson-gxm-aoclkc", "amlogic,meson-gx-aoclkc";
+ };
+@@ -137,3 +150,7 @@
+ &hdmi_tx {
+ 	compatible = "amlogic,meson-gxm-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
+ };
++
++&dwc3 {
++	phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>, <&usb2_phy2>;
++};
+diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
+index 4b5465da81d8..8c68e0c26f1b 100644
+--- a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
++++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
+@@ -36,11 +36,11 @@
+ 		#size-cells = <1>;
+ 		ranges = <0x0 0x0 0x67d00000 0x00800000>;
+ 
+-		sata0: ahci@210000 {
++		sata0: ahci@0 {
+ 			compatible = "brcm,iproc-ahci", "generic-ahci";
+-			reg = <0x00210000 0x1000>;
++			reg = <0x00000000 0x1000>;
+ 			reg-names = "ahci";
+-			interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+ 			status = "disabled";
+@@ -52,9 +52,9 @@
+ 			};
+ 		};
+ 
+-		sata_phy0: sata_phy@212100 {
++		sata_phy0: sata_phy@2100 {
+ 			compatible = "brcm,iproc-sr-sata-phy";
+-			reg = <0x00212100 0x1000>;
++			reg = <0x00002100 0x1000>;
+ 			reg-names = "phy";
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+@@ -66,11 +66,11 @@
+ 			};
+ 		};
+ 
+-		sata1: ahci@310000 {
++		sata1: ahci@10000 {
+ 			compatible = "brcm,iproc-ahci", "generic-ahci";
+-			reg = <0x00310000 0x1000>;
++			reg = <0x00010000 0x1000>;
+ 			reg-names = "ahci";
+-			interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+ 			status = "disabled";
+@@ -82,9 +82,9 @@
+ 			};
+ 		};
+ 
+-		sata_phy1: sata_phy@312100 {
++		sata_phy1: sata_phy@12100 {
+ 			compatible = "brcm,iproc-sr-sata-phy";
+-			reg = <0x00312100 0x1000>;
++			reg = <0x00012100 0x1000>;
+ 			reg-names = "phy";
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+@@ -96,11 +96,11 @@
+ 			};
+ 		};
+ 
+-		sata2: ahci@120000 {
++		sata2: ahci@20000 {
+ 			compatible = "brcm,iproc-ahci", "generic-ahci";
+-			reg = <0x00120000 0x1000>;
++			reg = <0x00020000 0x1000>;
+ 			reg-names = "ahci";
+-			interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+ 			status = "disabled";
+@@ -112,9 +112,9 @@
+ 			};
+ 		};
+ 
+-		sata_phy2: sata_phy@122100 {
++		sata_phy2: sata_phy@22100 {
+ 			compatible = "brcm,iproc-sr-sata-phy";
+-			reg = <0x00122100 0x1000>;
++			reg = <0x00022100 0x1000>;
+ 			reg-names = "phy";
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+@@ -126,11 +126,11 @@
+ 			};
+ 		};
+ 
+-		sata3: ahci@130000 {
++		sata3: ahci@30000 {
+ 			compatible = "brcm,iproc-ahci", "generic-ahci";
+-			reg = <0x00130000 0x1000>;
++			reg = <0x00030000 0x1000>;
+ 			reg-names = "ahci";
+-			interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+ 			status = "disabled";
+@@ -142,9 +142,9 @@
+ 			};
+ 		};
+ 
+-		sata_phy3: sata_phy@132100 {
++		sata_phy3: sata_phy@32100 {
+ 			compatible = "brcm,iproc-sr-sata-phy";
+-			reg = <0x00132100 0x1000>;
++			reg = <0x00032100 0x1000>;
+ 			reg-names = "phy";
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+@@ -156,11 +156,11 @@
+ 			};
+ 		};
+ 
+-		sata4: ahci@330000 {
++		sata4: ahci@100000 {
+ 			compatible = "brcm,iproc-ahci", "generic-ahci";
+-			reg = <0x00330000 0x1000>;
++			reg = <0x00100000 0x1000>;
+ 			reg-names = "ahci";
+-			interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+ 			status = "disabled";
+@@ -172,9 +172,9 @@
+ 			};
+ 		};
+ 
+-		sata_phy4: sata_phy@332100 {
++		sata_phy4: sata_phy@102100 {
+ 			compatible = "brcm,iproc-sr-sata-phy";
+-			reg = <0x00332100 0x1000>;
++			reg = <0x00102100 0x1000>;
+ 			reg-names = "phy";
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+@@ -186,11 +186,11 @@
+ 			};
+ 		};
+ 
+-		sata5: ahci@400000 {
++		sata5: ahci@110000 {
+ 			compatible = "brcm,iproc-ahci", "generic-ahci";
+-			reg = <0x00400000 0x1000>;
++			reg = <0x00110000 0x1000>;
+ 			reg-names = "ahci";
+-			interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+ 			status = "disabled";
+@@ -202,9 +202,9 @@
+ 			};
+ 		};
+ 
+-		sata_phy5: sata_phy@402100 {
++		sata_phy5: sata_phy@112100 {
+ 			compatible = "brcm,iproc-sr-sata-phy";
+-			reg = <0x00402100 0x1000>;
++			reg = <0x00112100 0x1000>;
+ 			reg-names = "phy";
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+@@ -216,11 +216,11 @@
+ 			};
+ 		};
+ 
+-		sata6: ahci@410000 {
++		sata6: ahci@120000 {
+ 			compatible = "brcm,iproc-ahci", "generic-ahci";
+-			reg = <0x00410000 0x1000>;
++			reg = <0x00120000 0x1000>;
+ 			reg-names = "ahci";
+-			interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+ 			status = "disabled";
+@@ -232,9 +232,9 @@
+ 			};
+ 		};
+ 
+-		sata_phy6: sata_phy@412100 {
++		sata_phy6: sata_phy@122100 {
+ 			compatible = "brcm,iproc-sr-sata-phy";
+-			reg = <0x00412100 0x1000>;
++			reg = <0x00122100 0x1000>;
+ 			reg-names = "phy";
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+@@ -246,11 +246,11 @@
+ 			};
+ 		};
+ 
+-		sata7: ahci@420000 {
++		sata7: ahci@130000 {
+ 			compatible = "brcm,iproc-ahci", "generic-ahci";
+-			reg = <0x00420000 0x1000>;
++			reg = <0x00130000 0x1000>;
+ 			reg-names = "ahci";
+-			interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+ 			status = "disabled";
+@@ -262,9 +262,9 @@
+ 			};
+ 		};
+ 
+-		sata_phy7: sata_phy@422100 {
++		sata_phy7: sata_phy@132100 {
+ 			compatible = "brcm,iproc-sr-sata-phy";
+-			reg = <0x00422100 0x1000>;
++			reg = <0x00132100 0x1000>;
+ 			reg-names = "phy";
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
+index a8baad7b80df..13f57fff1477 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
+@@ -46,7 +46,7 @@
+ 				compatible = "ethernet-phy-ieee802.3-c22";
+ 				reg = <0x0>;
+ 				interrupt-parent = <&gpio>;
+-				interrupts = <TEGRA_MAIN_GPIO(M, 5) IRQ_TYPE_LEVEL_HIGH>;
++				interrupts = <TEGRA_MAIN_GPIO(M, 5) IRQ_TYPE_LEVEL_LOW>;
+ 			};
+ 		};
+ 	};
+diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
+index cd7c2d0a1f64..4939ab25b506 100644
+--- a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
++++ b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi
+@@ -330,7 +330,7 @@
+ 			mmc-ddr-1_8v;
+ 			mmc-hs200-1_8v;
+ 			mmc-pwrseq = <&emmc_pwrseq>;
+-			cdns,phy-input-delay-legacy = <4>;
++			cdns,phy-input-delay-legacy = <9>;
+ 			cdns,phy-input-delay-mmc-highspeed = <2>;
+ 			cdns,phy-input-delay-mmc-ddr = <3>;
+ 			cdns,phy-dll-delay-sdclk = <21>;
+diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+index 8a3276ba2da1..ef8b9a4d8910 100644
+--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
++++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+@@ -435,7 +435,7 @@
+ 			mmc-ddr-1_8v;
+ 			mmc-hs200-1_8v;
+ 			mmc-pwrseq = <&emmc_pwrseq>;
+-			cdns,phy-input-delay-legacy = <4>;
++			cdns,phy-input-delay-legacy = <9>;
+ 			cdns,phy-input-delay-mmc-highspeed = <2>;
+ 			cdns,phy-input-delay-mmc-ddr = <3>;
+ 			cdns,phy-dll-delay-sdclk = <21>;
+diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+index 234fc58cc599..a1724f7e70fa 100644
+--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
++++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+@@ -336,7 +336,7 @@
+ 			mmc-ddr-1_8v;
+ 			mmc-hs200-1_8v;
+ 			mmc-pwrseq = <&emmc_pwrseq>;
+-			cdns,phy-input-delay-legacy = <4>;
++			cdns,phy-input-delay-legacy = <9>;
+ 			cdns,phy-input-delay-mmc-highspeed = <2>;
+ 			cdns,phy-input-delay-mmc-ddr = <3>;
+ 			cdns,phy-dll-delay-sdclk = <21>;
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 8e32a6f28f00..be1e2174bb66 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -75,6 +75,7 @@
+ #define ARM_CPU_IMP_CAVIUM		0x43
+ #define ARM_CPU_IMP_BRCM		0x42
+ #define ARM_CPU_IMP_QCOM		0x51
++#define ARM_CPU_IMP_NVIDIA		0x4E
+ 
+ #define ARM_CPU_PART_AEM_V8		0xD0F
+ #define ARM_CPU_PART_FOUNDATION		0xD00
+@@ -98,6 +99,9 @@
+ #define QCOM_CPU_PART_FALKOR		0xC00
+ #define QCOM_CPU_PART_KRYO		0x200
+ 
++#define NVIDIA_CPU_PART_DENVER		0x003
++#define NVIDIA_CPU_PART_CARMEL		0x004
++
+ #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+ #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
+@@ -112,6 +116,8 @@
+ #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
+ #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
+ #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
++#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
++#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
+ 
+ #ifndef __ASSEMBLY__
+ 
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 9ae31f7e2243..b3fb0ccd6010 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -25,6 +25,7 @@
+ #include <linux/sched/signal.h>
+ #include <linux/sched/task_stack.h>
+ #include <linux/mm.h>
++#include <linux/nospec.h>
+ #include <linux/smp.h>
+ #include <linux/ptrace.h>
+ #include <linux/user.h>
+@@ -249,15 +250,20 @@ static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
+ 
+ 	switch (note_type) {
+ 	case NT_ARM_HW_BREAK:
+-		if (idx < ARM_MAX_BRP)
+-			bp = tsk->thread.debug.hbp_break[idx];
++		if (idx >= ARM_MAX_BRP)
++			goto out;
++		idx = array_index_nospec(idx, ARM_MAX_BRP);
++		bp = tsk->thread.debug.hbp_break[idx];
+ 		break;
+ 	case NT_ARM_HW_WATCH:
+-		if (idx < ARM_MAX_WRP)
+-			bp = tsk->thread.debug.hbp_watch[idx];
++		if (idx >= ARM_MAX_WRP)
++			goto out;
++		idx = array_index_nospec(idx, ARM_MAX_WRP);
++		bp = tsk->thread.debug.hbp_watch[idx];
+ 		break;
+ 	}
+ 
++out:
+ 	return bp;
+ }
+ 
+@@ -1458,9 +1464,7 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
+ {
+ 	int ret;
+ 	u32 kdata;
+-	mm_segment_t old_fs = get_fs();
+ 
+-	set_fs(KERNEL_DS);
+ 	/* Watchpoint */
+ 	if (num < 0) {
+ 		ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
+@@ -1471,7 +1475,6 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
+ 	} else {
+ 		ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
+ 	}
+-	set_fs(old_fs);
+ 
+ 	if (!ret)
+ 		ret = put_user(kdata, data);
+@@ -1484,7 +1487,6 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
+ {
+ 	int ret;
+ 	u32 kdata = 0;
+-	mm_segment_t old_fs = get_fs();
+ 
+ 	if (num == 0)
+ 		return 0;
+@@ -1493,12 +1495,10 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
+ 	if (ret)
+ 		return ret;
+ 
+-	set_fs(KERNEL_DS);
+ 	if (num < 0)
+ 		ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
+ 	else
+ 		ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
+-	set_fs(old_fs);
+ 
+ 	return ret;
+ }
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index eb2d15147e8d..e904f4ed49ff 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -243,7 +243,8 @@ void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
+ 	 * If we were single stepping, we want to get the step exception after
+ 	 * we return from the trap.
+ 	 */
+-	user_fastforward_single_step(current);
++	if (user_mode(regs))
++		user_fastforward_single_step(current);
+ }
+ 
+ static LIST_HEAD(undef_hook);
+diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
+index dabfc1ecda3d..12145874c02b 100644
+--- a/arch/arm64/mm/kasan_init.c
++++ b/arch/arm64/mm/kasan_init.c
+@@ -204,7 +204,7 @@ void __init kasan_init(void)
+ 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+ 
+ 	kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
+-			   pfn_to_nid(virt_to_pfn(lm_alias(_text))));
++			   early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
+ 
+ 	kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
+ 				   (void *)mod_shadow_start);
+@@ -224,7 +224,7 @@ void __init kasan_init(void)
+ 
+ 		kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
+ 				   (unsigned long)kasan_mem_to_shadow(end),
+-				   pfn_to_nid(virt_to_pfn(start)));
++				   early_pfn_to_nid(virt_to_pfn(start)));
+ 	}
+ 
+ 	/*
+diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
+index 9e8621d94ee9..e17262ad125e 100644
+--- a/arch/hexagon/include/asm/io.h
++++ b/arch/hexagon/include/asm/io.h
+@@ -216,6 +216,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
+ 	memcpy((void *) dst, src, count);
+ }
+ 
++static inline void memset_io(volatile void __iomem *addr, int value,
++			     size_t size)
++{
++	memset((void __force *)addr, value, size);
++}
++
+ #define PCI_IO_ADDR	(volatile void __iomem *)
+ 
+ /*
+diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c
+index 617506d1a559..7cd0a2259269 100644
+--- a/arch/hexagon/lib/checksum.c
++++ b/arch/hexagon/lib/checksum.c
+@@ -199,3 +199,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+ 	memcpy(dst, src, len);
+ 	return csum_partial(dst, len, sum);
+ }
++EXPORT_SYMBOL(csum_partial_copy_nocheck);
+diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts
+index 2cd49b60e030..f7aad80c69ab 100644
+--- a/arch/mips/boot/dts/img/boston.dts
++++ b/arch/mips/boot/dts/img/boston.dts
+@@ -51,6 +51,8 @@
+ 		ranges = <0x02000000 0 0x40000000
+ 			  0x40000000 0 0x40000000>;
+ 
++		bus-range = <0x00 0xff>;
++
+ 		interrupt-map-mask = <0 0 0 7>;
+ 		interrupt-map = <0 0 0 1 &pci0_intc 1>,
+ 				<0 0 0 2 &pci0_intc 2>,
+@@ -79,6 +81,8 @@
+ 		ranges = <0x02000000 0 0x20000000
+ 			  0x20000000 0 0x20000000>;
+ 
++		bus-range = <0x00 0xff>;
++
+ 		interrupt-map-mask = <0 0 0 7>;
+ 		interrupt-map = <0 0 0 1 &pci1_intc 1>,
+ 				<0 0 0 2 &pci1_intc 2>,
+@@ -107,6 +111,8 @@
+ 		ranges = <0x02000000 0 0x16000000
+ 			  0x16000000 0 0x100000>;
+ 
++		bus-range = <0x00 0xff>;
++
+ 		interrupt-map-mask = <0 0 0 7>;
+ 		interrupt-map = <0 0 0 1 &pci2_intc 1>,
+ 				<0 0 0 2 &pci2_intc 2>,
+diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
+index 0cbf3af37eca..a7d0b836f2f7 100644
+--- a/arch/mips/include/asm/io.h
++++ b/arch/mips/include/asm/io.h
+@@ -307,7 +307,7 @@ static inline void iounmap(const volatile void __iomem *addr)
+ #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT)
+ #define war_io_reorder_wmb()		wmb()
+ #else
+-#define war_io_reorder_wmb()		do { } while (0)
++#define war_io_reorder_wmb()		barrier()
+ #endif
+ 
+ #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq)			\
+@@ -377,6 +377,8 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem)	\
+ 		BUG();							\
+ 	}								\
+ 									\
++	/* prevent prefetching of coherent DMA data prematurely */	\
++	rmb();								\
+ 	return pfx##ioswab##bwlq(__mem, __val);				\
+ }
+ 
+diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
+index d4240aa7f8b1..0f9ccd76a8ea 100644
+--- a/arch/parisc/kernel/drivers.c
++++ b/arch/parisc/kernel/drivers.c
+@@ -448,7 +448,8 @@ static int match_by_id(struct device * dev, void * data)
+  * Checks all the children of @parent for a matching @id.  If none
+  * found, it allocates a new device and returns it.
+  */
+-static struct parisc_device * alloc_tree_node(struct device *parent, char id)
++static struct parisc_device * __init alloc_tree_node(
++			struct device *parent, char id)
+ {
+ 	struct match_id_data d = {
+ 		.id = id,
+@@ -825,8 +826,8 @@ void walk_lower_bus(struct parisc_device *dev)
+  * devices which are not physically connected (such as extra serial &
+  * keyboard ports).  This problem is not yet solved.
+  */
+-static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
+-                            struct device *parent)
++static void __init walk_native_bus(unsigned long io_io_low,
++	unsigned long io_io_high, struct device *parent)
+ {
+ 	int i, devices_found = 0;
+ 	unsigned long hpa = io_io_low;
+diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
+index 4065b5e48c9d..5e26dbede5fc 100644
+--- a/arch/parisc/kernel/smp.c
++++ b/arch/parisc/kernel/smp.c
+@@ -423,8 +423,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+ }
+ 
+ #ifdef CONFIG_PROC_FS
+-int __init
+-setup_profiling_timer(unsigned int multiplier)
++int setup_profiling_timer(unsigned int multiplier)
+ {
+ 	return -EINVAL;
+ }
+diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
+index f7e684560186..42a873226a04 100644
+--- a/arch/parisc/kernel/time.c
++++ b/arch/parisc/kernel/time.c
+@@ -205,7 +205,7 @@ static int __init rtc_init(void)
+ device_initcall(rtc_init);
+ #endif
+ 
+-void read_persistent_clock(struct timespec *ts)
++void read_persistent_clock64(struct timespec64 *ts)
+ {
+ 	static struct pdc_tod tod_data;
+ 	if (pdc_tod_read(&tod_data) == 0) {
+diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
+index 9f421641a35c..16b077801a5f 100644
+--- a/arch/powerpc/include/asm/topology.h
++++ b/arch/powerpc/include/asm/topology.h
+@@ -91,6 +91,7 @@ extern int start_topology_update(void);
+ extern int stop_topology_update(void);
+ extern int prrn_is_enabled(void);
+ extern int find_and_online_cpu_nid(int cpu);
++extern int timed_topology_update(int nsecs);
+ #else
+ static inline int start_topology_update(void)
+ {
+@@ -108,16 +109,12 @@ static inline int find_and_online_cpu_nid(int cpu)
+ {
+ 	return 0;
+ }
++static inline int timed_topology_update(int nsecs)
++{
++	return 0;
++}
+ #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
+ 
+-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES)
+-#if defined(CONFIG_PPC_SPLPAR)
+-extern int timed_topology_update(int nsecs);
+-#else
+-#define	timed_topology_update(nsecs)
+-#endif /* CONFIG_PPC_SPLPAR */
+-#endif /* CONFIG_HOTPLUG_CPU || CONFIG_NEED_MULTIPLE_NODES */
+-
+ #include <asm-generic/topology.h>
+ 
+ #ifdef CONFIG_SMP
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index c27557aff394..e96b8e1cbd8c 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -864,6 +864,17 @@ static void init_fallback_flush(void)
+ 	int cpu;
+ 
+ 	l1d_size = ppc64_caches.l1d.size;
++
++	/*
++	 * If there is no d-cache-size property in the device tree, l1d_size
++	 * could be zero. That leads to the loop in the asm wrapping around to
++	 * 2^64-1, and then walking off the end of the fallback area and
++	 * eventually causing a page fault which is fatal. Just default to
++	 * something vaguely sane.
++	 */
++	if (!l1d_size)
++		l1d_size = (64 * 1024);
++
+ 	limit = min(ppc64_bolted_size(), ppc64_rma_size);
+ 
+ 	/*
+diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
+index 6038e2e7aee0..876d4f294fdd 100644
+--- a/arch/powerpc/kvm/booke.c
++++ b/arch/powerpc/kvm/booke.c
+@@ -305,6 +305,13 @@ void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
+ 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
+ }
+ 
++#ifdef CONFIG_ALTIVEC
++void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
++{
++	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
++}
++#endif
++
+ void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
+ {
+ 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
+diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
+index 9033c8194eda..ccc421503363 100644
+--- a/arch/powerpc/platforms/cell/spufs/sched.c
++++ b/arch/powerpc/platforms/cell/spufs/sched.c
+@@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
+ 		LOAD_INT(c), LOAD_FRAC(c),
+ 		count_active_contexts(),
+ 		atomic_read(&nr_spu_contexts),
+-		idr_get_cursor(&task_active_pid_ns(current)->idr));
++		idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
+ 	return 0;
+ }
+ 
+diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
+index de470caf0784..fc222a0c2ac4 100644
+--- a/arch/powerpc/platforms/powernv/memtrace.c
++++ b/arch/powerpc/platforms/powernv/memtrace.c
+@@ -82,19 +82,6 @@ static const struct file_operations memtrace_fops = {
+ 	.open	= simple_open,
+ };
+ 
+-static void flush_memory_region(u64 base, u64 size)
+-{
+-	unsigned long line_size = ppc64_caches.l1d.size;
+-	u64 end = base + size;
+-	u64 addr;
+-
+-	base = round_down(base, line_size);
+-	end = round_up(end, line_size);
+-
+-	for (addr = base; addr < end; addr += line_size)
+-		asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory");
+-}
+-
+ static int check_memblock_online(struct memory_block *mem, void *arg)
+ {
+ 	if (mem->state != MEM_ONLINE)
+@@ -132,10 +119,6 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
+ 	walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
+ 			  change_memblock_state);
+ 
+-	/* RCU grace period? */
+-	flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT),
+-			    nr_pages << PAGE_SHIFT);
+-
+ 	lock_device_hotplug();
+ 	remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
+ 	unlock_device_hotplug();
+diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
+index 04807c7f64cc..1225d9add766 100644
+--- a/arch/riscv/Kconfig
++++ b/arch/riscv/Kconfig
+@@ -11,6 +11,7 @@ config RISCV
+ 	select ARCH_WANT_FRAME_POINTERS
+ 	select CLONE_BACKWARDS
+ 	select COMMON_CLK
++	select DMA_DIRECT_OPS
+ 	select GENERIC_CLOCKEVENTS
+ 	select GENERIC_CPU_DEVICES
+ 	select GENERIC_IRQ_SHOW
+@@ -88,9 +89,6 @@ config PGTABLE_LEVELS
+ config HAVE_KPROBES
+ 	def_bool n
+ 
+-config DMA_DIRECT_OPS
+-	def_bool y
+-
+ menu "Platform type"
+ 
+ choice
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
+index 324568d33921..f6561b783b61 100644
+--- a/arch/riscv/kernel/vdso/Makefile
++++ b/arch/riscv/kernel/vdso/Makefile
+@@ -52,7 +52,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
+ # Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
+ # Make sure only to export the intended __vdso_xxx symbol offsets.
+ quiet_cmd_vdsold = VDSOLD  $@
+-      cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \
++      cmd_vdsold = $(CC) $(KCFLAGS) $(call cc-option, -no-pie) -nostdlib $(SYSCFLAGS_$(@F)) \
+                            -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
+                    $(CROSS_COMPILE)objcopy \
+                            $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
+diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
+index 97fe29316476..1851eaeee131 100644
+--- a/arch/sh/Kconfig
++++ b/arch/sh/Kconfig
+@@ -9,6 +9,7 @@ config SUPERH
+ 	select HAVE_IDE if HAS_IOPORT_MAP
+ 	select HAVE_MEMBLOCK
+ 	select HAVE_MEMBLOCK_NODE_MAP
++	select NO_BOOTMEM
+ 	select ARCH_DISCARD_MEMBLOCK
+ 	select HAVE_OPROFILE
+ 	select HAVE_GENERIC_DMA_COHERENT
+diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
+index 4205f6d42b69..a5bd03642678 100644
+--- a/arch/sh/kernel/cpu/sh2/probe.c
++++ b/arch/sh/kernel/cpu/sh2/probe.c
+@@ -43,7 +43,11 @@ void __ref cpu_probe(void)
+ #endif
+ 
+ #if defined(CONFIG_CPU_J2)
++#if defined(CONFIG_SMP)
+ 	unsigned cpu = hard_smp_processor_id();
++#else
++	unsigned cpu = 0;
++#endif
+ 	if (cpu == 0) of_scan_flat_dt(scan_cache, NULL);
+ 	if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu);
+ 	if (cpu != 0) return;
+diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
+index b95c411d0333..b075b030218a 100644
+--- a/arch/sh/kernel/setup.c
++++ b/arch/sh/kernel/setup.c
+@@ -11,7 +11,6 @@
+ #include <linux/ioport.h>
+ #include <linux/init.h>
+ #include <linux/initrd.h>
+-#include <linux/bootmem.h>
+ #include <linux/console.h>
+ #include <linux/root_dev.h>
+ #include <linux/utsname.h>
+diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
+index ce0bbaa7e404..4034035fbede 100644
+--- a/arch/sh/mm/init.c
++++ b/arch/sh/mm/init.c
+@@ -211,59 +211,15 @@ void __init allocate_pgdat(unsigned int nid)
+ 
+ 	NODE_DATA(nid) = __va(phys);
+ 	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
+-
+-	NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
+ #endif
+ 
+ 	NODE_DATA(nid)->node_start_pfn = start_pfn;
+ 	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
+ }
+ 
+-static void __init bootmem_init_one_node(unsigned int nid)
+-{
+-	unsigned long total_pages, paddr;
+-	unsigned long end_pfn;
+-	struct pglist_data *p;
+-
+-	p = NODE_DATA(nid);
+-
+-	/* Nothing to do.. */
+-	if (!p->node_spanned_pages)
+-		return;
+-
+-	end_pfn = pgdat_end_pfn(p);
+-
+-	total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
+-
+-	paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
+-	if (!paddr)
+-		panic("Can't allocate bootmap for nid[%d]\n", nid);
+-
+-	init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
+-
+-	free_bootmem_with_active_regions(nid, end_pfn);
+-
+-	/*
+-	 * XXX Handle initial reservations for the system memory node
+-	 * only for the moment, we'll refactor this later for handling
+-	 * reservations in other nodes.
+-	 */
+-	if (nid == 0) {
+-		struct memblock_region *reg;
+-
+-		/* Reserve the sections we're already using. */
+-		for_each_memblock(reserved, reg) {
+-			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
+-		}
+-	}
+-
+-	sparse_memory_present_with_active_regions(nid);
+-}
+-
+ static void __init do_init_bootmem(void)
+ {
+ 	struct memblock_region *reg;
+-	int i;
+ 
+ 	/* Add active regions with valid PFNs. */
+ 	for_each_memblock(memory, reg) {
+@@ -279,9 +235,12 @@ static void __init do_init_bootmem(void)
+ 
+ 	plat_mem_setup();
+ 
+-	for_each_online_node(i)
+-		bootmem_init_one_node(i);
++	for_each_memblock(memory, reg) {
++		int nid = memblock_get_region_node(reg);
+ 
++		memory_present(nid, memblock_region_memory_base_pfn(reg),
++			memblock_region_memory_end_pfn(reg));
++	}
+ 	sparse_init();
+ }
+ 
+@@ -322,7 +281,6 @@ void __init paging_init(void)
+ {
+ 	unsigned long max_zone_pfns[MAX_NR_ZONES];
+ 	unsigned long vaddr, end;
+-	int nid;
+ 
+ 	sh_mv.mv_mem_init();
+ 
+@@ -377,21 +335,7 @@ void __init paging_init(void)
+ 	kmap_coherent_init();
+ 
+ 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+-
+-	for_each_online_node(nid) {
+-		pg_data_t *pgdat = NODE_DATA(nid);
+-		unsigned long low, start_pfn;
+-
+-		start_pfn = pgdat->bdata->node_min_pfn;
+-		low = pgdat->bdata->node_low_pfn;
+-
+-		if (max_zone_pfns[ZONE_NORMAL] < low)
+-			max_zone_pfns[ZONE_NORMAL] = low;
+-
+-		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
+-		       nid, start_pfn, low);
+-	}
+-
++	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+ 	free_area_init_nodes(max_zone_pfns);
+ }
+ 
+diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
+index 05713d190247..830e8b3684e4 100644
+--- a/arch/sh/mm/numa.c
++++ b/arch/sh/mm/numa.c
+@@ -8,7 +8,6 @@
+  * for more details.
+  */
+ #include <linux/module.h>
+-#include <linux/bootmem.h>
+ #include <linux/memblock.h>
+ #include <linux/mm.h>
+ #include <linux/numa.h>
+@@ -26,9 +25,7 @@ EXPORT_SYMBOL_GPL(node_data);
+  */
+ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
+ {
+-	unsigned long bootmap_pages;
+ 	unsigned long start_pfn, end_pfn;
+-	unsigned long bootmem_paddr;
+ 
+ 	/* Don't allow bogus node assignment */
+ 	BUG_ON(nid >= MAX_NUMNODES || nid <= 0);
+@@ -48,25 +45,9 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
+ 					     SMP_CACHE_BYTES, end));
+ 	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
+ 
+-	NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
+ 	NODE_DATA(nid)->node_start_pfn = start_pfn;
+ 	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
+ 
+-	/* Node-local bootmap */
+-	bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
+-	bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT,
+-				       PAGE_SIZE, end);
+-	init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
+-			  start_pfn, end_pfn);
+-
+-	free_bootmem_with_active_regions(nid, end_pfn);
+-
+-	/* Reserve the pgdat and bootmap space with the bootmem allocator */
+-	reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT,
+-			     sizeof(struct pglist_data), BOOTMEM_DEFAULT);
+-	reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr,
+-			     bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
+-
+ 	/* It's up */
+ 	node_set_online(nid);
+ 
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 39cd0b36c790..9296b41ac342 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3331,7 +3331,8 @@ static void intel_pmu_cpu_starting(int cpu)
+ 
+ 	cpuc->lbr_sel = NULL;
+ 
+-	flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
++	if (x86_pmu.version > 1)
++		flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
+ 
+ 	if (!cpuc->shared_regs)
+ 		return;
+@@ -3494,6 +3495,8 @@ static __initconst const struct x86_pmu core_pmu = {
+ 	.cpu_dying		= intel_pmu_cpu_dying,
+ };
+ 
++static struct attribute *intel_pmu_attrs[];
++
+ static __initconst const struct x86_pmu intel_pmu = {
+ 	.name			= "Intel",
+ 	.handle_irq		= intel_pmu_handle_irq,
+@@ -3524,6 +3527,8 @@ static __initconst const struct x86_pmu intel_pmu = {
+ 	.format_attrs		= intel_arch3_formats_attr,
+ 	.events_sysfs_show	= intel_event_sysfs_show,
+ 
++	.attrs			= intel_pmu_attrs,
++
+ 	.cpu_prepare		= intel_pmu_cpu_prepare,
+ 	.cpu_starting		= intel_pmu_cpu_starting,
+ 	.cpu_dying		= intel_pmu_cpu_dying,
+@@ -3902,8 +3907,6 @@ __init int intel_pmu_init(void)
+ 
+ 	x86_pmu.max_pebs_events		= min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
+ 
+-
+-	x86_pmu.attrs			= intel_pmu_attrs;
+ 	/*
+ 	 * Quirk: v2 perfmon does not report fixed-purpose events, so
+ 	 * assume at least 3 events, when not running in a hypervisor:
+diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
+index b3e32b010ab1..c2c01f84df75 100644
+--- a/arch/x86/include/asm/insn.h
++++ b/arch/x86/include/asm/insn.h
+@@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn)
+ 	return insn_offset_displacement(insn) + insn->displacement.nbytes;
+ }
+ 
++#define POP_SS_OPCODE 0x1f
++#define MOV_SREG_OPCODE 0x8e
++
++/*
++ * Intel SDM Vol.3A 6.8.3 states;
++ * "Any single-step trap that would be delivered following the MOV to SS
++ * instruction or POP to SS instruction (because EFLAGS.TF is 1) is
++ * suppressed."
++ * This function returns true if @insn is MOV SS or POP SS. On these
++ * instructions, single stepping is suppressed.
++ */
++static inline int insn_masking_exception(struct insn *insn)
++{
++	return insn->opcode.bytes[0] == POP_SS_OPCODE ||
++		(insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
++		 X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
++}
++
+ #endif /* _ASM_X86_INSN_H */
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 4b0539a52c4c..e2201c9c3f20 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1019,6 +1019,7 @@ struct kvm_x86_ops {
+ 
+ 	bool (*has_wbinvd_exit)(void);
+ 
++	u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
+ 	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+ 
+ 	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index c895f38a7a5e..0b2330e19169 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -751,6 +751,9 @@ static const struct _tlb_table intel_tlb_table[] = {
+ 	{ 0x5d, TLB_DATA_4K_4M,		256,	" TLB_DATA 4 KByte and 4 MByte pages" },
+ 	{ 0x61, TLB_INST_4K,		48,	" TLB_INST 4 KByte pages, full associative" },
+ 	{ 0x63, TLB_DATA_1G,		4,	" TLB_DATA 1 GByte pages, 4-way set associative" },
++	{ 0x6b, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 8-way associative" },
++	{ 0x6c, TLB_DATA_2M_4M,		128,	" TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
++	{ 0x6d, TLB_DATA_1G,		16,	" TLB_DATA 1 GByte pages, fully associative" },
+ 	{ 0x76, TLB_INST_2M_4M,		8,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
+ 	{ 0xb0, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 4-way set associative" },
+ 	{ 0xb1, TLB_INST_2M_4M,		4,	" TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
+diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
+index fb095ba0c02f..f24cd9f1799a 100644
+--- a/arch/x86/kernel/kexec-bzimage64.c
++++ b/arch/x86/kernel/kexec-bzimage64.c
+@@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
+ 	 * little bit simple
+ 	 */
+ 	efi_map_sz = efi_get_runtime_map_size();
+-	efi_map_sz = ALIGN(efi_map_sz, 16);
+ 	params_cmdline_sz = sizeof(struct boot_params) + cmdline_len +
+ 				MAX_ELFCOREHDR_STR_LEN;
+ 	params_cmdline_sz = ALIGN(params_cmdline_sz, 16);
+-	kbuf.bufsz = params_cmdline_sz + efi_map_sz +
++	kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) +
+ 				sizeof(struct setup_data) +
+ 				sizeof(struct efi_setup_data);
+ 
+@@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
+ 	if (!params)
+ 		return ERR_PTR(-ENOMEM);
+ 	efi_map_offset = params_cmdline_sz;
+-	efi_setup_data_offset = efi_map_offset + efi_map_sz;
++	efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16);
+ 
+ 	/* Copy setup header onto bootparams. Documentation/x86/boot.txt */
+ 	setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset;
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index 0715f827607c..6f4d42377fe5 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -370,6 +370,10 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
+ 	if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+ 		return 0;
+ 
++	/* We should not singlestep on the exception masking instructions */
++	if (insn_masking_exception(insn))
++		return 0;
++
+ #ifdef CONFIG_X86_64
+ 	/* Only x86_64 has RIP relative instructions */
+ 	if (insn_rip_relative(insn)) {
+diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
+index 85c7ef23d99f..c84bb5396958 100644
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -299,6 +299,10 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
+ 	if (is_prefix_bad(insn))
+ 		return -ENOTSUPP;
+ 
++	/* We should not singlestep on the exception masking instructions */
++	if (insn_masking_exception(insn))
++		return -ENOTSUPP;
++
+ 	if (x86_64)
+ 		good_insns = good_insns_64;
+ 	else
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index dc97f2544b6f..5d13abecb384 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -1223,7 +1223,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
+ 	struct kvm_run *run = vcpu->run;
+ 
+ 	kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
+-	return 1;
++	return kvm_skip_emulated_instruction(vcpu);
+ }
+ 
+ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index dbbd762359a9..569aa55d0aba 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1313,12 +1313,23 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
+ 	seg->base = 0;
+ }
+ 
++static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
++{
++	struct vcpu_svm *svm = to_svm(vcpu);
++
++	if (is_guest_mode(vcpu))
++		return svm->nested.hsave->control.tsc_offset;
++
++	return vcpu->arch.tsc_offset;
++}
++
+ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 	u64 g_tsc_offset = 0;
+ 
+ 	if (is_guest_mode(vcpu)) {
++		/* Write L1's TSC offset.  */
+ 		g_tsc_offset = svm->vmcb->control.tsc_offset -
+ 			       svm->nested.hsave->control.tsc_offset;
+ 		svm->nested.hsave->control.tsc_offset = offset;
+@@ -3188,6 +3199,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
+ 	/* Restore the original control entries */
+ 	copy_vmcb_control_area(vmcb, hsave);
+ 
++	svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
+ 	kvm_clear_exception_queue(&svm->vcpu);
+ 	kvm_clear_interrupt_queue(&svm->vcpu);
+ 
+@@ -3348,10 +3360,12 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
+ 	/* We don't want to see VMMCALLs from a nested guest */
+ 	clr_intercept(svm, INTERCEPT_VMMCALL);
+ 
++	svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
++	svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
++
+ 	svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
+ 	svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
+ 	svm->vmcb->control.int_state = nested_vmcb->control.int_state;
+-	svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
+ 	svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
+ 	svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
+ 
+@@ -3901,12 +3915,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
+ 	switch (msr_info->index) {
+-	case MSR_IA32_TSC: {
+-		msr_info->data = svm->vmcb->control.tsc_offset +
+-			kvm_scale_tsc(vcpu, rdtsc());
+-
+-		break;
+-	}
+ 	case MSR_STAR:
+ 		msr_info->data = svm->vmcb->save.star;
+ 		break;
+@@ -4066,9 +4074,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ 		svm->vmcb->save.g_pat = data;
+ 		mark_dirty(svm->vmcb, VMCB_NPT);
+ 		break;
+-	case MSR_IA32_TSC:
+-		kvm_write_tsc(vcpu, msr);
+-		break;
+ 	case MSR_IA32_SPEC_CTRL:
+ 		if (!msr->host_initiated &&
+ 		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
+@@ -5142,9 +5147,8 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ 		}
+ 
+ 		if (!ret && svm) {
+-			trace_kvm_pi_irte_update(svm->vcpu.vcpu_id,
+-						 host_irq, e->gsi,
+-						 vcpu_info.vector,
++			trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
++						 e->gsi, vcpu_info.vector,
+ 						 vcpu_info.pi_desc_addr, set);
+ 		}
+ 
+@@ -6967,6 +6971,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ 
+ 	.has_wbinvd_exit = svm_has_wbinvd_exit,
+ 
++	.read_l1_tsc_offset = svm_read_l1_tsc_offset,
+ 	.write_tsc_offset = svm_write_tsc_offset,
+ 
+ 	.set_tdp_cr3 = set_tdp_cr3,
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index e3b589e28264..c779f0970126 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2638,18 +2638,15 @@ static void setup_msrs(struct vcpu_vmx *vmx)
+ 		vmx_update_msr_bitmap(&vmx->vcpu);
+ }
+ 
+-/*
+- * reads and returns guest's timestamp counter "register"
+- * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
+- * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
+- */
+-static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
++static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
+ {
+-	u64 host_tsc, tsc_offset;
++	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ 
+-	host_tsc = rdtsc();
+-	tsc_offset = vmcs_read64(TSC_OFFSET);
+-	return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
++	if (is_guest_mode(vcpu) &&
++	    (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
++		return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
++
++	return vcpu->arch.tsc_offset;
+ }
+ 
+ /*
+@@ -3272,9 +3269,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ #endif
+ 	case MSR_EFER:
+ 		return kvm_get_msr_common(vcpu, msr_info);
+-	case MSR_IA32_TSC:
+-		msr_info->data = guest_read_tsc(vcpu);
+-		break;
+ 	case MSR_IA32_SPEC_CTRL:
+ 		if (!msr_info->host_initiated &&
+ 		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+@@ -3392,9 +3386,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 			return 1;
+ 		vmcs_write64(GUEST_BNDCFGS, data);
+ 		break;
+-	case MSR_IA32_TSC:
+-		kvm_write_tsc(vcpu, msr_info);
+-		break;
+ 	case MSR_IA32_SPEC_CTRL:
+ 		if (!msr_info->host_initiated &&
+ 		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+@@ -4281,12 +4272,6 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
+ 	__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
+ }
+ 
+-static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
+-{
+-	if (enable_ept)
+-		vmx_flush_tlb(vcpu, true);
+-}
+-
+ static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
+ {
+ 	ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
+@@ -9039,7 +9024,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
+ 	} else {
+ 		sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
+ 		sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+-		vmx_flush_tlb_ept_only(vcpu);
++		vmx_flush_tlb(vcpu, true);
+ 	}
+ 	vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
+ 
+@@ -9067,7 +9052,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
+ 	    !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
+ 			     SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+ 		vmcs_write64(APIC_ACCESS_ADDR, hpa);
+-		vmx_flush_tlb_ept_only(vcpu);
++		vmx_flush_tlb(vcpu, true);
+ 	}
+ }
+ 
+@@ -10338,6 +10323,16 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
+ 	return true;
+ }
+ 
++static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
++					  struct vmcs12 *vmcs12)
++{
++	if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
++	    !page_address_valid(vcpu, vmcs12->apic_access_addr))
++		return -EINVAL;
++	else
++		return 0;
++}
++
+ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
+ 					   struct vmcs12 *vmcs12)
+ {
+@@ -10906,11 +10901,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ 		vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
+ 	}
+ 
+-	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+-		vmcs_write64(TSC_OFFSET,
+-			vcpu->arch.tsc_offset + vmcs12->tsc_offset);
+-	else
+-		vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
++	vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
++
+ 	if (kvm_has_tsc_control)
+ 		decache_tsc_multiplier(vmx);
+ 
+@@ -10952,7 +10944,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ 		}
+ 	} else if (nested_cpu_has2(vmcs12,
+ 				   SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+-		vmx_flush_tlb_ept_only(vcpu);
++		vmx_flush_tlb(vcpu, true);
+ 	}
+ 
+ 	/*
+@@ -11006,6 +10998,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+ 	if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12))
+ 		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ 
++	if (nested_vmx_check_apic_access_controls(vcpu, vmcs12))
++		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
++
+ 	if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12))
+ 		return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ 
+@@ -11124,6 +11119,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
+ 	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ 	u32 msr_entry_idx;
+ 	u32 exit_qual;
++	int r;
+ 
+ 	enter_guest_mode(vcpu);
+ 
+@@ -11133,26 +11129,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
+ 	vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
+ 	vmx_segment_cache_clear(vmx);
+ 
+-	if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) {
+-		leave_guest_mode(vcpu);
+-		vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+-		nested_vmx_entry_failure(vcpu, vmcs12,
+-					 EXIT_REASON_INVALID_STATE, exit_qual);
+-		return 1;
+-	}
++	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
++		vcpu->arch.tsc_offset += vmcs12->tsc_offset;
++
++	r = EXIT_REASON_INVALID_STATE;
++	if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual))
++		goto fail;
+ 
+ 	nested_get_vmcs12_pages(vcpu, vmcs12);
+ 
++	r = EXIT_REASON_MSR_LOAD_FAIL;
+ 	msr_entry_idx = nested_vmx_load_msr(vcpu,
+ 					    vmcs12->vm_entry_msr_load_addr,
+ 					    vmcs12->vm_entry_msr_load_count);
+-	if (msr_entry_idx) {
+-		leave_guest_mode(vcpu);
+-		vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+-		nested_vmx_entry_failure(vcpu, vmcs12,
+-				EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
+-		return 1;
+-	}
++	if (msr_entry_idx)
++		goto fail;
+ 
+ 	/*
+ 	 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
+@@ -11161,6 +11152,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
+ 	 * the success flag) when L2 exits (see nested_vmx_vmexit()).
+ 	 */
+ 	return 0;
++
++fail:
++	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
++		vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
++	leave_guest_mode(vcpu);
++	vmx_switch_vmcs(vcpu, &vmx->vmcs01);
++	nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual);
++	return 1;
+ }
+ 
+ /*
+@@ -11732,6 +11731,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
+ 
+ 	leave_guest_mode(vcpu);
+ 
++	if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
++		vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
++
+ 	if (likely(!vmx->fail)) {
+ 		if (exit_reason == -1)
+ 			sync_vmcs12(vcpu, vmcs12);
+@@ -11769,7 +11771,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
+ 	} else if (!nested_cpu_has_ept(vmcs12) &&
+ 		   nested_cpu_has2(vmcs12,
+ 				   SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+-		vmx_flush_tlb_ept_only(vcpu);
++		vmx_flush_tlb(vcpu, true);
+ 	}
+ 
+ 	/* This is needed for same reason as it was needed in prepare_vmcs02 */
+@@ -12237,7 +12239,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ 		vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
+ 		vcpu_info.vector = irq.vector;
+ 
+-		trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi,
++		trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
+ 				vcpu_info.vector, vcpu_info.pi_desc_addr, set);
+ 
+ 		if (set)
+@@ -12410,6 +12412,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
+ 
+ 	.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
+ 
++	.read_l1_tsc_offset = vmx_read_l1_tsc_offset,
+ 	.write_tsc_offset = vmx_write_tsc_offset,
+ 
+ 	.set_tdp_cr3 = vmx_set_cr3,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index cf08ac8a910c..f3a1f9f3fb29 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -112,7 +112,7 @@ module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
+ static bool __read_mostly report_ignored_msrs = true;
+ module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
+ 
+-unsigned int min_timer_period_us = 500;
++unsigned int min_timer_period_us = 200;
+ module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
+ 
+ static bool __read_mostly kvmclock_periodic_sync = true;
+@@ -1459,7 +1459,7 @@ static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
+ 
+ static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
+ {
+-	u64 curr_offset = vcpu->arch.tsc_offset;
++	u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
+ 	vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
+ }
+ 
+@@ -1501,7 +1501,9 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
+ 
+ u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
+ {
+-	return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
++	u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
++
++	return tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
+ }
+ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
+ 
+@@ -2331,6 +2333,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 			return 1;
+ 		vcpu->arch.smbase = data;
+ 		break;
++	case MSR_IA32_TSC:
++		kvm_write_tsc(vcpu, msr_info);
++		break;
+ 	case MSR_SMI_COUNT:
+ 		if (!msr_info->host_initiated)
+ 			return 1;
+@@ -2570,6 +2575,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 	case MSR_IA32_UCODE_REV:
+ 		msr_info->data = vcpu->arch.microcode_version;
+ 		break;
++	case MSR_IA32_TSC:
++		msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
++		break;
+ 	case MSR_MTRRcap:
+ 	case 0x200 ... 0x2ff:
+ 		return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
+@@ -6545,12 +6553,13 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
+ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
+ {
+ 	unsigned long nr, a0, a1, a2, a3, ret;
+-	int op_64_bit, r;
+-
+-	r = kvm_skip_emulated_instruction(vcpu);
++	int op_64_bit;
+ 
+-	if (kvm_hv_hypercall_enabled(vcpu->kvm))
+-		return kvm_hv_hypercall(vcpu);
++	if (kvm_hv_hypercall_enabled(vcpu->kvm)) {
++		if (!kvm_hv_hypercall(vcpu))
++			return 0;
++		goto out;
++	}
+ 
+ 	nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ 	a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
+@@ -6571,7 +6580,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
+ 
+ 	if (kvm_x86_ops->get_cpl(vcpu) != 0) {
+ 		ret = -KVM_EPERM;
+-		goto out;
++		goto out_error;
+ 	}
+ 
+ 	switch (nr) {
+@@ -6591,12 +6600,14 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
+ 		ret = -KVM_ENOSYS;
+ 		break;
+ 	}
+-out:
++out_error:
+ 	if (!op_64_bit)
+ 		ret = (u32)ret;
+ 	kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
++
++out:
+ 	++vcpu->stat.hypercalls;
+-	return r;
++	return kvm_skip_emulated_instruction(vcpu);
+ }
+ EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
+ 
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index ce5b2ebd5701..6609cb6c91af 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -992,7 +992,17 @@ xadd:			if (is_imm8(insn->off))
+ 			break;
+ 
+ 		case BPF_JMP | BPF_JA:
+-			jmp_offset = addrs[i + insn->off] - addrs[i];
++			if (insn->off == -1)
++				/* -1 jmp instructions will always jump
++				 * backwards two bytes. Explicitly handling
++				 * this case avoids wasting too many passes
++				 * when there are long sequences of replaced
++				 * dead code.
++				 */
++				jmp_offset = -2;
++			else
++				jmp_offset = addrs[i + insn->off] - addrs[i];
++
+ 			if (!jmp_offset)
+ 				/* optimize out nop jumps */
+ 				break;
+@@ -1191,6 +1201,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 	for (pass = 0; pass < 20 || image; pass++) {
+ 		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
+ 		if (proglen <= 0) {
++out_image:
+ 			image = NULL;
+ 			if (header)
+ 				bpf_jit_binary_free(header);
+@@ -1201,8 +1212,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 			if (proglen != oldproglen) {
+ 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
+ 				       proglen, oldproglen);
+-				prog = orig_prog;
+-				goto out_addrs;
++				goto out_image;
+ 			}
+ 			break;
+ 		}
+@@ -1239,7 +1249,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 		prog = orig_prog;
+ 	}
+ 
+-	if (!prog->is_func || extra_pass) {
++	if (!image || !prog->is_func || extra_pass) {
+ out_addrs:
+ 		kfree(addrs);
+ 		kfree(jit_data);
+diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
+index 826898701045..19c1ff542387 100644
+--- a/arch/x86/xen/enlighten_hvm.c
++++ b/arch/x86/xen/enlighten_hvm.c
+@@ -65,6 +65,19 @@ static void __init xen_hvm_init_mem_mapping(void)
+ {
+ 	early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
+ 	HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
++
++	/*
++	 * The virtual address of the shared_info page has changed, so
++	 * the vcpu_info pointer for VCPU 0 is now stale.
++	 *
++	 * The prepare_boot_cpu callback will re-initialize it via
++	 * xen_vcpu_setup, but we can't rely on that to be called for
++	 * old Xen versions (xen_have_vector_callback == 0).
++	 *
++	 * It is, in any case, bad to have a stale vcpu_info pointer
++	 * so reset it now.
++	 */
++	xen_vcpu_info_reset(0);
+ }
+ 
+ static void __init init_hvm_pv_info(void)
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index c2033a232a44..58d030517b0f 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1142,18 +1142,16 @@ int blkcg_init_queue(struct request_queue *q)
+ 	rcu_read_lock();
+ 	spin_lock_irq(q->queue_lock);
+ 	blkg = blkg_create(&blkcg_root, q, new_blkg);
++	if (IS_ERR(blkg))
++		goto err_unlock;
++	q->root_blkg = blkg;
++	q->root_rl.blkg = blkg;
+ 	spin_unlock_irq(q->queue_lock);
+ 	rcu_read_unlock();
+ 
+ 	if (preloaded)
+ 		radix_tree_preload_end();
+ 
+-	if (IS_ERR(blkg))
+-		return PTR_ERR(blkg);
+-
+-	q->root_blkg = blkg;
+-	q->root_rl.blkg = blkg;
+-
+ 	ret = blk_throtl_init(q);
+ 	if (ret) {
+ 		spin_lock_irq(q->queue_lock);
+@@ -1161,6 +1159,13 @@ int blkcg_init_queue(struct request_queue *q)
+ 		spin_unlock_irq(q->queue_lock);
+ 	}
+ 	return ret;
++
++err_unlock:
++	spin_unlock_irq(q->queue_lock);
++	rcu_read_unlock();
++	if (preloaded)
++		radix_tree_preload_end();
++	return PTR_ERR(blkg);
+ }
+ 
+ /**
+@@ -1367,17 +1372,12 @@ void blkcg_deactivate_policy(struct request_queue *q,
+ 	__clear_bit(pol->plid, q->blkcg_pols);
+ 
+ 	list_for_each_entry(blkg, &q->blkg_list, q_node) {
+-		/* grab blkcg lock too while removing @pd from @blkg */
+-		spin_lock(&blkg->blkcg->lock);
+-
+ 		if (blkg->pd[pol->plid]) {
+ 			if (pol->pd_offline_fn)
+ 				pol->pd_offline_fn(blkg->pd[pol->plid]);
+ 			pol->pd_free_fn(blkg->pd[pol->plid]);
+ 			blkg->pd[pol->plid] = NULL;
+ 		}
+-
+-		spin_unlock(&blkg->blkcg->lock);
+ 	}
+ 
+ 	spin_unlock_irq(q->queue_lock);
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 96de7aa4f62a..00e16588b169 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -118,6 +118,25 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
+ 	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
+ }
+ 
++static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
++				     struct request *rq, void *priv,
++				     bool reserved)
++{
++	struct mq_inflight *mi = priv;
++
++	if (rq->part == mi->part)
++		mi->inflight[rq_data_dir(rq)]++;
++}
++
++void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
++			 unsigned int inflight[2])
++{
++	struct mq_inflight mi = { .part = part, .inflight = inflight, };
++
++	inflight[0] = inflight[1] = 0;
++	blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
++}
++
+ void blk_freeze_queue_start(struct request_queue *q)
+ {
+ 	int freeze_depth;
+diff --git a/block/blk-mq.h b/block/blk-mq.h
+index 88c558f71819..ecc86b6efdec 100644
+--- a/block/blk-mq.h
++++ b/block/blk-mq.h
+@@ -185,7 +185,9 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
+ }
+ 
+ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
+-			unsigned int inflight[2]);
++		      unsigned int inflight[2]);
++void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
++			 unsigned int inflight[2]);
+ 
+ static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
+ {
+diff --git a/block/genhd.c b/block/genhd.c
+index 9656f9e9f99e..8f34897159f5 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -82,6 +82,18 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part,
+ 	}
+ }
+ 
++void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
++		       unsigned int inflight[2])
++{
++	if (q->mq_ops) {
++		blk_mq_in_flight_rw(q, part, inflight);
++		return;
++	}
++
++	inflight[0] = atomic_read(&part->in_flight[0]);
++	inflight[1] = atomic_read(&part->in_flight[1]);
++}
++
+ struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
+ {
+ 	struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
+diff --git a/block/partition-generic.c b/block/partition-generic.c
+index 08dabcd8b6ae..db57cced9b98 100644
+--- a/block/partition-generic.c
++++ b/block/partition-generic.c
+@@ -145,13 +145,15 @@ ssize_t part_stat_show(struct device *dev,
+ 		jiffies_to_msecs(part_stat_read(p, time_in_queue)));
+ }
+ 
+-ssize_t part_inflight_show(struct device *dev,
+-			struct device_attribute *attr, char *buf)
++ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
++			   char *buf)
+ {
+ 	struct hd_struct *p = dev_to_part(dev);
++	struct request_queue *q = part_to_disk(p)->queue;
++	unsigned int inflight[2];
+ 
+-	return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]),
+-		atomic_read(&p->in_flight[1]));
++	part_in_flight_rw(q, p, inflight);
++	return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
+ }
+ 
+ #ifdef CONFIG_FAIL_MAKE_REQUEST
+diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
+index ebb626ffb5fa..4bde16fb97d8 100644
+--- a/drivers/acpi/acpi_watchdog.c
++++ b/drivers/acpi/acpi_watchdog.c
+@@ -12,23 +12,64 @@
+ #define pr_fmt(fmt) "ACPI: watchdog: " fmt
+ 
+ #include <linux/acpi.h>
++#include <linux/dmi.h>
+ #include <linux/ioport.h>
+ #include <linux/platform_device.h>
+ 
+ #include "internal.h"
+ 
++static const struct dmi_system_id acpi_watchdog_skip[] = {
++	{
++		/*
++		 * On Lenovo Z50-70 there are two issues with the WDAT
++		 * table. First some of the instructions use RTC SRAM
++		 * to store persistent information. This does not work well
++		 * with Linux RTC driver. Second, more important thing is
++		 * that the instructions do not actually reset the system.
++		 *
++		 * On this particular system iTCO_wdt seems to work just
++		 * fine so we prefer that over WDAT for now.
++		 *
++		 * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
++		 */
++		.ident = "Lenovo Z50-70",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "20354"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"),
++		},
++	},
++	{}
++};
++
++static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
++{
++	const struct acpi_table_wdat *wdat = NULL;
++	acpi_status status;
++
++	if (acpi_disabled)
++		return NULL;
++
++	if (dmi_check_system(acpi_watchdog_skip))
++		return NULL;
++
++	status = acpi_get_table(ACPI_SIG_WDAT, 0,
++				(struct acpi_table_header **)&wdat);
++	if (ACPI_FAILURE(status)) {
++		/* It is fine if there is no WDAT */
++		return NULL;
++	}
++
++	return wdat;
++}
++
+ /**
+  * Returns true if this system should prefer ACPI based watchdog instead of
+  * the native one (which are typically the same hardware).
+  */
+ bool acpi_has_watchdog(void)
+ {
+-	struct acpi_table_header hdr;
+-
+-	if (acpi_disabled)
+-		return false;
+-
+-	return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr));
++	return !!acpi_watchdog_get_wdat();
+ }
+ EXPORT_SYMBOL_GPL(acpi_has_watchdog);
+ 
+@@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void)
+ 	struct platform_device *pdev;
+ 	struct resource *resources;
+ 	size_t nresources = 0;
+-	acpi_status status;
+ 	int i;
+ 
+-	status = acpi_get_table(ACPI_SIG_WDAT, 0,
+-				(struct acpi_table_header **)&wdat);
+-	if (ACPI_FAILURE(status)) {
++	wdat = acpi_watchdog_get_wdat();
++	if (!wdat) {
+ 		/* It is fine if there is no WDAT */
+ 		return;
+ 	}
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 8e63d937babb..807e1ae67b7c 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -2150,10 +2150,10 @@ int __init acpi_scan_init(void)
+ 	acpi_cmos_rtc_init();
+ 	acpi_container_init();
+ 	acpi_memory_hotplug_init();
++	acpi_watchdog_init();
+ 	acpi_pnp_init();
+ 	acpi_int340x_thermal_init();
+ 	acpi_amba_init();
+-	acpi_watchdog_init();
+ 	acpi_init_lpit();
+ 
+ 	acpi_scan_add_handler(&generic_device_handler);
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 46cde0912762..b7846d8d3e87 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -364,6 +364,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
+ 		},
+ 	},
++	/*
++	 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
++	 * the Low Power S0 Idle firmware interface (see
++	 * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
++	 */
++	{
++	.callback = init_no_lps0,
++	.ident = "ThinkPad X1 Tablet(2016)",
++	.matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
++		},
++	},
+ 	{},
+ };
+ 
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 1d396b6e6000..738fb22978dd 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -699,7 +699,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
+ 
+ 	DPRINTK("ENTER\n");
+ 
+-	ahci_stop_engine(ap);
++	hpriv->stop_engine(ap);
+ 
+ 	rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
+ 				 deadline, &online, NULL);
+@@ -725,7 +725,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
+ 	bool online;
+ 	int rc;
+ 
+-	ahci_stop_engine(ap);
++	hpriv->stop_engine(ap);
+ 
+ 	/* clear D2H reception area to properly wait for D2H FIS */
+ 	ata_tf_init(link->device, &tf);
+@@ -789,7 +789,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+ 
+ 	DPRINTK("ENTER\n");
+ 
+-	ahci_stop_engine(ap);
++	hpriv->stop_engine(ap);
+ 
+ 	for (i = 0; i < 2; i++) {
+ 		u16 val;
+diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
+index a9d996e17d75..824bd399f02e 100644
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -365,6 +365,13 @@ struct ahci_host_priv {
+ 	 * be overridden anytime before the host is activated.
+ 	 */
+ 	void			(*start_engine)(struct ata_port *ap);
++	/*
++	 * Optional ahci_stop_engine override, if not set this gets set to the
++	 * default ahci_stop_engine during ahci_save_initial_config, this can
++	 * be overridden anytime before the host is activated.
++	 */
++	int			(*stop_engine)(struct ata_port *ap);
++
+ 	irqreturn_t 		(*irq_handler)(int irq, void *dev_instance);
+ 
+ 	/* only required for per-port MSI(-X) support */
+diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
+index de7128d81e9c..0045dacd814b 100644
+--- a/drivers/ata/ahci_mvebu.c
++++ b/drivers/ata/ahci_mvebu.c
+@@ -62,6 +62,60 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
+ 	writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
+ }
+ 
++/**
++ * ahci_mvebu_stop_engine
++ *
++ * @ap:	Target ata port
++ *
++ * Errata Ref#226 - SATA Disk HOT swap issue when connected through
++ * Port Multiplier in FIS-based Switching mode.
++ *
++ * To avoid the issue, according to design, the bits[11:8, 0] of
++ * register PxFBS are cleared when Port Command and Status (0x18) bit[0]
++ * changes its value from 1 to 0, i.e. falling edge of Port
++ * Command and Status bit[0] sends PULSE that resets PxFBS
++ * bits[11:8; 0].
++ *
++ * This function is used to override function of "ahci_stop_engine"
++ * from libahci.c by adding the mvebu work around(WA) to save PxFBS
++ * value before the PxCMD ST write of 0, then restore PxFBS value.
++ *
++ * Return: 0 on success; Error code otherwise.
++ */
++int ahci_mvebu_stop_engine(struct ata_port *ap)
++{
++	void __iomem *port_mmio = ahci_port_base(ap);
++	u32 tmp, port_fbs;
++
++	tmp = readl(port_mmio + PORT_CMD);
++
++	/* check if the HBA is idle */
++	if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
++		return 0;
++
++	/* save the port PxFBS register for later restore */
++	port_fbs = readl(port_mmio + PORT_FBS);
++
++	/* setting HBA to idle */
++	tmp &= ~PORT_CMD_START;
++	writel(tmp, port_mmio + PORT_CMD);
++
++	/*
++	 * bit #15 PxCMD signal doesn't clear PxFBS,
++	 * restore the PxFBS register right after clearing the PxCMD ST,
++	 * no need to wait for the PxCMD bit #15.
++	 */
++	writel(port_fbs, port_mmio + PORT_FBS);
++
++	/* wait for engine to stop. This could be as long as 500 msec */
++	tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
++				PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
++	if (tmp & PORT_CMD_LIST_ON)
++		return -EIO;
++
++	return 0;
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
+ {
+@@ -112,6 +166,8 @@ static int ahci_mvebu_probe(struct platform_device *pdev)
+ 	if (rc)
+ 		return rc;
+ 
++	hpriv->stop_engine = ahci_mvebu_stop_engine;
++
+ 	if (of_device_is_compatible(pdev->dev.of_node,
+ 				    "marvell,armada-380-ahci")) {
+ 		dram = mv_mbus_dram_info();
+diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
+index 2685f28160f7..cfdef4d44ae9 100644
+--- a/drivers/ata/ahci_qoriq.c
++++ b/drivers/ata/ahci_qoriq.c
+@@ -96,7 +96,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
+ 
+ 	DPRINTK("ENTER\n");
+ 
+-	ahci_stop_engine(ap);
++	hpriv->stop_engine(ap);
+ 
+ 	/*
+ 	 * There is a errata on ls1021a Rev1.0 and Rev2.0 which is:
+diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
+index c2b5941d9184..ad58da7c9aff 100644
+--- a/drivers/ata/ahci_xgene.c
++++ b/drivers/ata/ahci_xgene.c
+@@ -165,7 +165,7 @@ static int xgene_ahci_restart_engine(struct ata_port *ap)
+ 				    PORT_CMD_ISSUE, 0x0, 1, 100))
+ 		  return -EBUSY;
+ 
+-	ahci_stop_engine(ap);
++	hpriv->stop_engine(ap);
+ 	ahci_start_fis_rx(ap);
+ 
+ 	/*
+@@ -421,7 +421,7 @@ static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
+ 	portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
+ 	portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
+ 
+-	ahci_stop_engine(ap);
++	hpriv->stop_engine(ap);
+ 
+ 	rc = xgene_ahci_do_hardreset(link, deadline, &online);
+ 
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 7adcf3caabd0..e5d90977caec 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -560,6 +560,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
+ 	if (!hpriv->start_engine)
+ 		hpriv->start_engine = ahci_start_engine;
+ 
++	if (!hpriv->stop_engine)
++		hpriv->stop_engine = ahci_stop_engine;
++
+ 	if (!hpriv->irq_handler)
+ 		hpriv->irq_handler = ahci_single_level_irq_intr;
+ }
+@@ -897,9 +900,10 @@ static void ahci_start_port(struct ata_port *ap)
+ static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
+ {
+ 	int rc;
++	struct ahci_host_priv *hpriv = ap->host->private_data;
+ 
+ 	/* disable DMA */
+-	rc = ahci_stop_engine(ap);
++	rc = hpriv->stop_engine(ap);
+ 	if (rc) {
+ 		*emsg = "failed to stop engine";
+ 		return rc;
+@@ -1310,7 +1314,7 @@ int ahci_kick_engine(struct ata_port *ap)
+ 	int busy, rc;
+ 
+ 	/* stop engine */
+-	rc = ahci_stop_engine(ap);
++	rc = hpriv->stop_engine(ap);
+ 	if (rc)
+ 		goto out_restart;
+ 
+@@ -1549,7 +1553,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
+ 
+ 	DPRINTK("ENTER\n");
+ 
+-	ahci_stop_engine(ap);
++	hpriv->stop_engine(ap);
+ 
+ 	/* clear D2H reception area to properly wait for D2H FIS */
+ 	ata_tf_init(link->device, &tf);
+@@ -2075,14 +2079,14 @@ void ahci_error_handler(struct ata_port *ap)
+ 
+ 	if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
+ 		/* restart engine */
+-		ahci_stop_engine(ap);
++		hpriv->stop_engine(ap);
+ 		hpriv->start_engine(ap);
+ 	}
+ 
+ 	sata_pmp_error_handler(ap);
+ 
+ 	if (!ata_dev_enabled(ap->link.device))
+-		ahci_stop_engine(ap);
++		hpriv->stop_engine(ap);
+ }
+ EXPORT_SYMBOL_GPL(ahci_error_handler);
+ 
+@@ -2129,7 +2133,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
+ 		return;
+ 
+ 	/* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
+-	rc = ahci_stop_engine(ap);
++	rc = hpriv->stop_engine(ap);
+ 	if (rc)
+ 		return;
+ 
+@@ -2189,7 +2193,7 @@ static void ahci_enable_fbs(struct ata_port *ap)
+ 		return;
+ 	}
+ 
+-	rc = ahci_stop_engine(ap);
++	rc = hpriv->stop_engine(ap);
+ 	if (rc)
+ 		return;
+ 
+@@ -2222,7 +2226,7 @@ static void ahci_disable_fbs(struct ata_port *ap)
+ 		return;
+ 	}
+ 
+-	rc = ahci_stop_engine(ap);
++	rc = hpriv->stop_engine(ap);
+ 	if (rc)
+ 		return;
+ 
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index c016829a38fd..513b260bcff1 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -175,8 +175,8 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
+ { }
+ #endif /* CONFIG_PM */
+ 
+-static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
+-				 va_list args)
++static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi,
++				 const char *fmt, va_list args)
+ {
+ 	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
+ 				     ATA_EH_DESC_LEN - ehi->desc_len,
+diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
+index aafb8cc03523..e67815b896fc 100644
+--- a/drivers/ata/sata_highbank.c
++++ b/drivers/ata/sata_highbank.c
+@@ -410,7 +410,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
+ 	int rc;
+ 	int retry = 100;
+ 
+-	ahci_stop_engine(ap);
++	hpriv->stop_engine(ap);
+ 
+ 	/* clear D2H reception area to properly wait for D2H FIS */
+ 	ata_tf_init(link->device, &tf);
+diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
+index c381c8e396fc..79d8c84693a1 100644
+--- a/drivers/char/agp/uninorth-agp.c
++++ b/drivers/char/agp/uninorth-agp.c
+@@ -195,7 +195,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty
+ 	return 0;
+ }
+ 
+-int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
++static int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+ {
+ 	size_t i;
+ 	u32 *gp;
+@@ -470,7 +470,7 @@ static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
+ 	return 0;
+ }
+ 
+-void null_cache_flush(void)
++static void null_cache_flush(void)
+ {
+ 	mb();
+ }
+diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
+index 39cabe157163..4f6a048aece6 100644
+--- a/drivers/clk/clk-mux.c
++++ b/drivers/clk/clk-mux.c
+@@ -101,10 +101,18 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
+ 	return 0;
+ }
+ 
++static int clk_mux_determine_rate(struct clk_hw *hw,
++				  struct clk_rate_request *req)
++{
++	struct clk_mux *mux = to_clk_mux(hw);
++
++	return clk_mux_determine_rate_flags(hw, req, mux->flags);
++}
++
+ const struct clk_ops clk_mux_ops = {
+ 	.get_parent = clk_mux_get_parent,
+ 	.set_parent = clk_mux_set_parent,
+-	.determine_rate = __clk_mux_determine_rate,
++	.determine_rate = clk_mux_determine_rate,
+ };
+ EXPORT_SYMBOL_GPL(clk_mux_ops);
+ 
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 5698d2fac1af..665b64f0b0f8 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -426,9 +426,9 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now,
+ 	return now <= rate && now > best;
+ }
+ 
+-static int
+-clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
+-			     unsigned long flags)
++int clk_mux_determine_rate_flags(struct clk_hw *hw,
++				 struct clk_rate_request *req,
++				 unsigned long flags)
+ {
+ 	struct clk_core *core = hw->core, *parent, *best_parent = NULL;
+ 	int i, num_parents, ret;
+@@ -488,6 +488,7 @@ clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
+ 
+ struct clk *__clk_lookup(const char *name)
+ {
+diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
+index 85c118164469..c95034584747 100644
+--- a/drivers/clk/imx/clk-imx6ul.c
++++ b/drivers/clk/imx/clk-imx6ul.c
+@@ -461,7 +461,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
+ 	clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000);
+ 
+ 	/* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */
+-	clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]);
++	clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_OSC]);
+ 	clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]);
+ 	clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]);
+ 	clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]);
+diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
+index 557ed25b42e3..d175b9545581 100644
+--- a/drivers/clocksource/timer-imx-tpm.c
++++ b/drivers/clocksource/timer-imx-tpm.c
+@@ -20,6 +20,7 @@
+ #define TPM_SC				0x10
+ #define TPM_SC_CMOD_INC_PER_CNT		(0x1 << 3)
+ #define TPM_SC_CMOD_DIV_DEFAULT		0x3
++#define TPM_SC_TOF_MASK			(0x1 << 7)
+ #define TPM_CNT				0x14
+ #define TPM_MOD				0x18
+ #define TPM_STATUS			0x1c
+@@ -29,6 +30,7 @@
+ #define TPM_C0SC_MODE_SHIFT		2
+ #define TPM_C0SC_MODE_MASK		0x3c
+ #define TPM_C0SC_MODE_SW_COMPARE	0x4
++#define TPM_C0SC_CHF_MASK		(0x1 << 7)
+ #define TPM_C0V				0x24
+ 
+ static void __iomem *timer_base;
+@@ -205,9 +207,13 @@ static int __init tpm_timer_init(struct device_node *np)
+ 	 * 4) Channel0 disabled
+ 	 * 5) DMA transfers disabled
+ 	 */
++	/* make sure counter is disabled */
+ 	writel(0, timer_base + TPM_SC);
++	/* TOF is W1C */
++	writel(TPM_SC_TOF_MASK, timer_base + TPM_SC);
+ 	writel(0, timer_base + TPM_CNT);
+-	writel(0, timer_base + TPM_C0SC);
++	/* CHF is W1C */
++	writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC);
+ 
+ 	/* increase per cnt, div 8 by default */
+ 	writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT,
+diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
+index a8bec064d14a..ebde031ebd50 100644
+--- a/drivers/cpufreq/Kconfig.arm
++++ b/drivers/cpufreq/Kconfig.arm
+@@ -70,16 +70,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ
+ 
+ 	  Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
+ 
+-config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+-	bool "Broadcom STB AVS CPUfreq driver sysfs debug capability"
+-	depends on ARM_BRCMSTB_AVS_CPUFREQ
+-	help
+-	  Enabling this option turns on debug support via sysfs under
+-	  /sys/kernel/debug/brcmstb-avs-cpufreq. It is possible to read all and
+-	  write some AVS mailbox registers through sysfs entries.
+-
+-	  If in doubt, say N.
+-
+ config ARM_EXYNOS5440_CPUFREQ
+ 	tristate "SAMSUNG EXYNOS5440"
+ 	depends on SOC_EXYNOS5440
+diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+index 7281a2c19c36..726fb4db139e 100644
+--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
++++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+@@ -49,13 +49,6 @@
+ #include <linux/platform_device.h>
+ #include <linux/semaphore.h>
+ 
+-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+-#include <linux/ctype.h>
+-#include <linux/debugfs.h>
+-#include <linux/slab.h>
+-#include <linux/uaccess.h>
+-#endif
+-
+ /* Max number of arguments AVS calls take */
+ #define AVS_MAX_CMD_ARGS	4
+ /*
+@@ -182,88 +175,11 @@ struct private_data {
+ 	void __iomem *base;
+ 	void __iomem *avs_intr_base;
+ 	struct device *dev;
+-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+-	struct dentry *debugfs;
+-#endif
+ 	struct completion done;
+ 	struct semaphore sem;
+ 	struct pmap pmap;
+ };
+ 
+-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+-
+-enum debugfs_format {
+-	DEBUGFS_NORMAL,
+-	DEBUGFS_FLOAT,
+-	DEBUGFS_REV,
+-};
+-
+-struct debugfs_data {
+-	struct debugfs_entry *entry;
+-	struct private_data *priv;
+-};
+-
+-struct debugfs_entry {
+-	char *name;
+-	u32 offset;
+-	fmode_t mode;
+-	enum debugfs_format format;
+-};
+-
+-#define DEBUGFS_ENTRY(name, mode, format)	{ \
+-	#name, AVS_MBOX_##name, mode, format \
+-}
+-
+-/*
+- * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly.
+- */
+-#define AVS_MBOX_PARAM1		AVS_MBOX_PARAM(0)
+-#define AVS_MBOX_PARAM2		AVS_MBOX_PARAM(1)
+-#define AVS_MBOX_PARAM3		AVS_MBOX_PARAM(2)
+-#define AVS_MBOX_PARAM4		AVS_MBOX_PARAM(3)
+-
+-/*
+- * This table stores the name, access permissions and offset for each hardware
+- * register and is used to generate debugfs entries.
+- */
+-static struct debugfs_entry debugfs_entries[] = {
+-	DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL),
+-	DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL),
+-	DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT),
+-	DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT),
+-	DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT),
+-	DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT),
+-	DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL),
+-	DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL),
+-	DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL),
+-	DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL),
+-	DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV),
+-	DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL),
+-	DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL),
+-	DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL),
+-	DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL),
+-	DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL),
+-	DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT),
+-	DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT),
+-	DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT),
+-	DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT),
+-	DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL),
+-};
+-
+-static int brcm_avs_target_index(struct cpufreq_policy *, unsigned int);
+-
+-static char *__strtolower(char *s)
+-{
+-	char *p;
+-
+-	for (p = s; *p; p++)
+-		*p = tolower(*p);
+-
+-	return s;
+-}
+-
+-#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
+-
+ static void __iomem *__map_region(const char *name)
+ {
+ 	struct device_node *np;
+@@ -516,238 +432,6 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
+ 	return table;
+ }
+ 
+-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
+-
+-#define MANT(x)	(unsigned int)(abs((x)) / 1000)
+-#define FRAC(x)	(unsigned int)(abs((x)) - abs((x)) / 1000 * 1000)
+-
+-static int brcm_avs_debug_show(struct seq_file *s, void *data)
+-{
+-	struct debugfs_data *dbgfs = s->private;
+-	void __iomem *base;
+-	u32 val, offset;
+-
+-	if (!dbgfs) {
+-		seq_puts(s, "No device pointer\n");
+-		return 0;
+-	}
+-
+-	base = dbgfs->priv->base;
+-	offset = dbgfs->entry->offset;
+-	val = readl(base + offset);
+-	switch (dbgfs->entry->format) {
+-	case DEBUGFS_NORMAL:
+-		seq_printf(s, "%u\n", val);
+-		break;
+-	case DEBUGFS_FLOAT:
+-		seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val));
+-		break;
+-	case DEBUGFS_REV:
+-		seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff),
+-			   (val >> 16 & 0xff), (val >> 8 & 0xff),
+-			   val & 0xff);
+-		break;
+-	}
+-	seq_printf(s, "0x%08x\n", val);
+-
+-	return 0;
+-}
+-
+-#undef MANT
+-#undef FRAC
+-
+-static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf,
+-				  size_t size, loff_t *ppos)
+-{
+-	struct seq_file *s = file->private_data;
+-	struct debugfs_data *dbgfs = s->private;
+-	struct private_data *priv = dbgfs->priv;
+-	void __iomem *base, *avs_intr_base;
+-	bool use_issue_command = false;
+-	unsigned long val, offset;
+-	char str[128];
+-	int ret;
+-	char *str_ptr = str;
+-
+-	if (size >= sizeof(str))
+-		return -E2BIG;
+-
+-	memset(str, 0, sizeof(str));
+-	ret = copy_from_user(str, buf, size);
+-	if (ret)
+-		return ret;
+-
+-	base = priv->base;
+-	avs_intr_base = priv->avs_intr_base;
+-	offset = dbgfs->entry->offset;
+-	/*
+-	 * Special case writing to "command" entry only: if the string starts
+-	 * with a 'c', we use the driver's __issue_avs_command() function.
+-	 * Otherwise, we perform a raw write. This should allow testing of raw
+-	 * access as well as using the higher level function. (Raw access
+-	 * doesn't clear the firmware return status after issuing the command.)
+-	 */
+-	if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) {
+-		use_issue_command = true;
+-		str_ptr++;
+-	}
+-	if (kstrtoul(str_ptr, 0, &val) != 0)
+-		return -EINVAL;
+-
+-	/*
+-	 * Setting the P-state is a special case. We need to update the CPU
+-	 * frequency we report.
+-	 */
+-	if (val == AVS_CMD_SET_PSTATE) {
+-		struct cpufreq_policy *policy;
+-		unsigned int pstate;
+-
+-		policy = cpufreq_cpu_get(smp_processor_id());
+-		/* Read back the P-state we are about to set */
+-		pstate = readl(base + AVS_MBOX_PARAM(0));
+-		if (use_issue_command) {
+-			ret = brcm_avs_target_index(policy, pstate);
+-			return ret ? ret : size;
+-		}
+-		policy->cur = policy->freq_table[pstate].frequency;
+-	}
+-
+-	if (use_issue_command) {
+-		ret = __issue_avs_command(priv, val, false, NULL);
+-	} else {
+-		/* Locking here is not perfect, but is only for debug. */
+-		ret = down_interruptible(&priv->sem);
+-		if (ret)
+-			return ret;
+-
+-		writel(val, base + offset);
+-		/* We have to wake up the firmware to process a command. */
+-		if (offset == AVS_MBOX_COMMAND)
+-			writel(AVS_CPU_L2_INT_MASK,
+-			       avs_intr_base + AVS_CPU_L2_SET0);
+-		up(&priv->sem);
+-	}
+-
+-	return ret ? ret : size;
+-}
+-
+-static struct debugfs_entry *__find_debugfs_entry(const char *name)
+-{
+-	int i;
+-
+-	for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++)
+-		if (strcasecmp(debugfs_entries[i].name, name) == 0)
+-			return &debugfs_entries[i];
+-
+-	return NULL;
+-}
+-
+-static int brcm_avs_debug_open(struct inode *inode, struct file *file)
+-{
+-	struct debugfs_data *data;
+-	fmode_t fmode;
+-	int ret;
+-
+-	/*
+-	 * seq_open(), which is called by single_open(), clears "write" access.
+-	 * We need write access to some files, so we preserve our access mode
+-	 * and restore it.
+-	 */
+-	fmode = file->f_mode;
+-	/*
+-	 * Check access permissions even for root. We don't want to be writing
+-	 * to read-only registers. Access for regular users has already been
+-	 * checked by the VFS layer.
+-	 */
+-	if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR))
+-		return -EACCES;
+-
+-	data = kmalloc(sizeof(*data), GFP_KERNEL);
+-	if (!data)
+-		return -ENOMEM;
+-	/*
+-	 * We use the same file system operations for all our debug files. To
+-	 * produce specific output, we look up the file name upon opening a
+-	 * debugfs entry and map it to a memory offset. This offset is then used
+-	 * in the generic "show" function to read a specific register.
+-	 */
+-	data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname);
+-	data->priv = inode->i_private;
+-
+-	ret = single_open(file, brcm_avs_debug_show, data);
+-	if (ret)
+-		kfree(data);
+-	file->f_mode = fmode;
+-
+-	return ret;
+-}
+-
+-static int brcm_avs_debug_release(struct inode *inode, struct file *file)
+-{
+-	struct seq_file *seq_priv = file->private_data;
+-	struct debugfs_data *data = seq_priv->private;
+-
+-	kfree(data);
+-	return single_release(inode, file);
+-}
+-
+-static const struct file_operations brcm_avs_debug_ops = {
+-	.open		= brcm_avs_debug_open,
+-	.read		= seq_read,
+-	.write		= brcm_avs_seq_write,
+-	.llseek		= seq_lseek,
+-	.release	= brcm_avs_debug_release,
+-};
+-
+-static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev)
+-{
+-	struct private_data *priv = platform_get_drvdata(pdev);
+-	struct dentry *dir;
+-	int i;
+-
+-	if (!priv)
+-		return;
+-
+-	dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL);
+-	if (IS_ERR_OR_NULL(dir))
+-		return;
+-	priv->debugfs = dir;
+-
+-	for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) {
+-		/*
+-		 * The DEBUGFS_ENTRY macro generates uppercase strings. We
+-		 * convert them to lowercase before creating the debugfs
+-		 * entries.
+-		 */
+-		char *entry = __strtolower(debugfs_entries[i].name);
+-		fmode_t mode = debugfs_entries[i].mode;
+-
+-		if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode,
+-					 dir, priv, &brcm_avs_debug_ops)) {
+-			priv->debugfs = NULL;
+-			debugfs_remove_recursive(dir);
+-			break;
+-		}
+-	}
+-}
+-
+-static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev)
+-{
+-	struct private_data *priv = platform_get_drvdata(pdev);
+-
+-	if (priv && priv->debugfs) {
+-		debugfs_remove_recursive(priv->debugfs);
+-		priv->debugfs = NULL;
+-	}
+-}
+-
+-#else
+-
+-static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {}
+-static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {}
+-
+-#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
+-
+ /*
+  * To ensure the right firmware is running we need to
+  *    - check the MAGIC matches what we expect
+@@ -1020,11 +704,8 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	brcm_avs_driver.driver_data = pdev;
+-	ret = cpufreq_register_driver(&brcm_avs_driver);
+-	if (!ret)
+-		brcm_avs_cpufreq_debug_init(pdev);
+ 
+-	return ret;
++	return cpufreq_register_driver(&brcm_avs_driver);
+ }
+ 
+ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
+@@ -1036,8 +717,6 @@ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	brcm_avs_cpufreq_debug_exit(pdev);
+-
+ 	priv = platform_get_drvdata(pdev);
+ 	iounmap(priv->base);
+ 	iounmap(priv->avs_intr_base);
+diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
+index b9bd827caa22..1b4d465cc5d9 100644
+--- a/drivers/firmware/efi/libstub/arm64-stub.c
++++ b/drivers/firmware/efi/libstub/arm64-stub.c
+@@ -97,6 +97,16 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
+ 		u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ?
+ 			     (phys_seed >> 32) & mask : TEXT_OFFSET;
+ 
++		/*
++		 * With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not
++		 * be a multiple of EFI_KIMG_ALIGN, and we must ensure that
++		 * we preserve the misalignment of 'offset' relative to
++		 * EFI_KIMG_ALIGN so that statically allocated objects whose
++		 * alignment exceeds PAGE_SIZE appear correctly aligned in
++		 * memory.
++		 */
++		offset |= TEXT_OFFSET % EFI_KIMG_ALIGN;
++
+ 		/*
+ 		 * If KASLR is enabled, and we have some randomness available,
+ 		 * locate the kernel at a randomized offset in physical memory.
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 09d35051fdd6..3fabf9f97022 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -419,9 +419,11 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
+ 
+ 	if (other) {
+ 		signed long r;
+-		r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
++		r = dma_fence_wait(other, true);
+ 		if (r < 0) {
+-			DRM_ERROR("Error (%ld) waiting for fence!\n", r);
++			if (r != -ERESTARTSYS)
++				DRM_ERROR("Error (%ld) waiting for fence!\n", r);
++
+ 			return r;
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 62c3d9cd6ef1..0492aff87382 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -748,12 +748,13 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
+ 	struct timespec64 time;
+ 
+ 	dev = kfd_device_by_id(args->gpu_id);
+-	if (dev == NULL)
+-		return -EINVAL;
+-
+-	/* Reading GPU clock counter from KGD */
+-	args->gpu_clock_counter =
+-		dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
++	if (dev)
++		/* Reading GPU clock counter from KGD */
++		args->gpu_clock_counter =
++			dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
++	else
++		/* Node without GPU resource */
++		args->gpu_clock_counter = 0;
+ 
+ 	/* No access to rdtsc. Using raw monotonic time */
+ 	getrawmonotonic64(&time);
+diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
+index 39ac15ce4702..9e2ae02f31e0 100644
+--- a/drivers/gpu/drm/drm_dumb_buffers.c
++++ b/drivers/gpu/drm/drm_dumb_buffers.c
+@@ -65,12 +65,13 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ 		return -EINVAL;
+ 
+ 	/* overflow checks for 32bit size calculations */
+-	/* NOTE: DIV_ROUND_UP() can overflow */
++	if (args->bpp > U32_MAX - 8)
++		return -EINVAL;
+ 	cpp = DIV_ROUND_UP(args->bpp, 8);
+-	if (!cpp || cpp > 0xffffffffU / args->width)
++	if (cpp > U32_MAX / args->width)
+ 		return -EINVAL;
+ 	stride = cpp * args->width;
+-	if (args->height > 0xffffffffU / stride)
++	if (args->height > U32_MAX / stride)
+ 		return -EINVAL;
+ 
+ 	/* test for wrap-around */
+diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
+index dc5d79465f9b..66b7cc2128e7 100644
+--- a/drivers/gpu/drm/exynos/exynos_mixer.c
++++ b/drivers/gpu/drm/exynos/exynos_mixer.c
+@@ -485,7 +485,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
+ 			chroma_addr[1] = chroma_addr[0] + 0x40;
+ 		} else {
+ 			luma_addr[1] = luma_addr[0] + fb->pitches[0];
+-			chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
++			chroma_addr[1] = chroma_addr[0] + fb->pitches[1];
+ 		}
+ 	} else {
+ 		luma_addr[1] = 0;
+@@ -494,6 +494,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
+ 
+ 	spin_lock_irqsave(&ctx->reg_slock, flags);
+ 
++	vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
+ 	/* interlace or progressive scan mode */
+ 	val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
+ 	vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
+@@ -507,21 +508,23 @@ static void vp_video_buffer(struct mixer_context *ctx,
+ 	vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
+ 		VP_IMG_VSIZE(fb->height));
+ 	/* chroma plane for NV12/NV21 is half the height of the luma plane */
+-	vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
++	vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[1]) |
+ 		VP_IMG_VSIZE(fb->height / 2));
+ 
+ 	vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w);
+-	vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h);
+ 	vp_reg_write(ctx, VP_SRC_H_POSITION,
+ 			VP_SRC_H_POSITION_VAL(state->src.x));
+-	vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y);
+-
+ 	vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w);
+ 	vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x);
++
+ 	if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
++		vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h / 2);
++		vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y / 2);
+ 		vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2);
+ 		vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2);
+ 	} else {
++		vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h);
++		vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y);
+ 		vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h);
+ 		vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y);
+ 	}
+@@ -711,6 +714,15 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
+ 
+ 		/* interlace scan need to check shadow register */
+ 		if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
++			if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
++			    vp_reg_read(ctx, VP_SHADOW_UPDATE))
++				goto out;
++
++			base = mixer_reg_read(ctx, MXR_CFG);
++			shadow = mixer_reg_read(ctx, MXR_CFG_S);
++			if (base != shadow)
++				goto out;
++
+ 			base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
+ 			shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
+ 			if (base != shadow)
+diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
+index c311f571bdf9..189cfa2470a8 100644
+--- a/drivers/gpu/drm/exynos/regs-mixer.h
++++ b/drivers/gpu/drm/exynos/regs-mixer.h
+@@ -47,6 +47,7 @@
+ #define MXR_MO				0x0304
+ #define MXR_RESOLUTION			0x0310
+ 
++#define MXR_CFG_S			0x2004
+ #define MXR_GRAPHIC0_BASE_S		0x2024
+ #define MXR_GRAPHIC1_BASE_S		0x2044
+ 
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 0f7324a686ca..d729b2b4b66d 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -740,7 +740,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
+ 	switch (mipi_fmt) {
+ 	case MIPI_DSI_FMT_RGB888:	return CMD_DST_FORMAT_RGB888;
+ 	case MIPI_DSI_FMT_RGB666_PACKED:
+-	case MIPI_DSI_FMT_RGB666:	return VID_DST_FORMAT_RGB666;
++	case MIPI_DSI_FMT_RGB666:	return CMD_DST_FORMAT_RGB666;
+ 	case MIPI_DSI_FMT_RGB565:	return CMD_DST_FORMAT_RGB565;
+ 	default:			return CMD_DST_FORMAT_RGB888;
+ 	}
+diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
+index c178563fcd4d..456622b46335 100644
+--- a/drivers/gpu/drm/msm/msm_fbdev.c
++++ b/drivers/gpu/drm/msm/msm_fbdev.c
+@@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
+ 
+ 	if (IS_ERR(fb)) {
+ 		dev_err(dev->dev, "failed to allocate fb\n");
+-		ret = PTR_ERR(fb);
+-		goto fail;
++		return PTR_ERR(fb);
+ 	}
+ 
+ 	bo = msm_framebuffer_bo(fb, 0);
+@@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
+ 
+ fail_unlock:
+ 	mutex_unlock(&dev->struct_mutex);
+-fail:
+-
+-	if (ret) {
+-		if (fb)
+-			drm_framebuffer_remove(fb);
+-	}
+-
++	drm_framebuffer_remove(fb);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
+index 07376de9ff4c..37ec3411297b 100644
+--- a/drivers/gpu/drm/msm/msm_gem.c
++++ b/drivers/gpu/drm/msm/msm_gem.c
+@@ -132,17 +132,19 @@ static void put_pages(struct drm_gem_object *obj)
+ 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ 
+ 	if (msm_obj->pages) {
+-		/* For non-cached buffers, ensure the new pages are clean
+-		 * because display controller, GPU, etc. are not coherent:
+-		 */
+-		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+-			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
+-					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
++		if (msm_obj->sgt) {
++			/* For non-cached buffers, ensure the new
++			 * pages are clean because display controller,
++			 * GPU, etc. are not coherent:
++			 */
++			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
++				dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
++					     msm_obj->sgt->nents,
++					     DMA_BIDIRECTIONAL);
+ 
+-		if (msm_obj->sgt)
+ 			sg_free_table(msm_obj->sgt);
+-
+-		kfree(msm_obj->sgt);
++			kfree(msm_obj->sgt);
++		}
+ 
+ 		if (use_pages(obj))
+ 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
+diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+index bf914f2ac99e..f3d7decbbe24 100644
+--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
++++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+@@ -665,7 +665,7 @@ static int hdmi_audio_config(struct device *dev,
+ 			     struct omap_dss_audio *dss_audio)
+ {
+ 	struct omap_hdmi *hd = dev_get_drvdata(dev);
+-	int ret;
++	int ret = 0;
+ 
+ 	mutex_lock(&hd->lock);
+ 
+diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+index 35ed2add6189..813ba42f2753 100644
+--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
++++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+@@ -922,8 +922,13 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
+ {
+ 	const struct hdmi4_features *features;
+ 	struct resource *res;
++	const struct soc_device_attribute *soc;
+ 
+-	features = soc_device_match(hdmi4_soc_devices)->data;
++	soc = soc_device_match(hdmi4_soc_devices);
++	if (!soc)
++		return -ENODEV;
++
++	features = soc->data;
+ 	core->cts_swmode = features->cts_swmode;
+ 	core->audio_use_mclk = features->audio_use_mclk;
+ 
+diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+index 689cda41858b..dc36274bdc15 100644
+--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
++++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+@@ -660,7 +660,7 @@ static int hdmi_audio_config(struct device *dev,
+ 			     struct omap_dss_audio *dss_audio)
+ {
+ 	struct omap_hdmi *hd = dev_get_drvdata(dev);
+-	int ret;
++	int ret = 0;
+ 
+ 	mutex_lock(&hd->lock);
+ 
+diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
+index a0d7b1d905e8..5cde26ac937b 100644
+--- a/drivers/gpu/drm/omapdrm/omap_connector.c
++++ b/drivers/gpu/drm/omapdrm/omap_connector.c
+@@ -121,6 +121,9 @@ static int omap_connector_get_modes(struct drm_connector *connector)
+ 	if (dssdrv->read_edid) {
+ 		void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
+ 
++		if (!edid)
++			return 0;
++
+ 		if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
+ 				drm_edid_is_valid(edid)) {
+ 			drm_mode_connector_update_edid_property(
+@@ -139,6 +142,9 @@ static int omap_connector_get_modes(struct drm_connector *connector)
+ 		struct drm_display_mode *mode = drm_mode_create(dev);
+ 		struct videomode vm = {0};
+ 
++		if (!mode)
++			return 0;
++
+ 		dssdrv->get_timings(dssdev, &vm);
+ 
+ 		drm_display_mode_from_videomode(&vm, mode);
+@@ -200,6 +206,10 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
+ 	if (!r) {
+ 		/* check if vrefresh is still valid */
+ 		new_mode = drm_mode_duplicate(dev, mode);
++
++		if (!new_mode)
++			return MODE_BAD;
++
+ 		new_mode->clock = vm.pixelclock / 1000;
+ 		new_mode->vrefresh = 0;
+ 		if (mode->vrefresh == drm_mode_vrefresh(new_mode))
+diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+index 4be0c94673f5..17d1baee522b 100644
+--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
++++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+@@ -401,12 +401,16 @@ int tiler_unpin(struct tiler_block *block)
+ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
+ 		uint16_t h, uint16_t align)
+ {
+-	struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
++	struct tiler_block *block;
+ 	u32 min_align = 128;
+ 	int ret;
+ 	unsigned long flags;
+ 	u32 slot_bytes;
+ 
++	block = kzalloc(sizeof(*block), GFP_KERNEL);
++	if (!block)
++		return ERR_PTR(-ENOMEM);
++
+ 	BUG_ON(!validfmt(fmt));
+ 
+ 	/* convert width/height to slots */
+diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
+index 661362d072f7..ebfdb38b4616 100644
+--- a/drivers/gpu/drm/omapdrm/tcm-sita.c
++++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
+@@ -90,7 +90,7 @@ static int l2r_t2b(uint16_t w, uint16_t h, uint16_t a, int16_t offset,
+ {
+ 	int i;
+ 	unsigned long index;
+-	bool area_free;
++	bool area_free = false;
+ 	unsigned long slots_per_band = PAGE_SIZE / slot_bytes;
+ 	unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0;
+ 	unsigned long curr_bit = bit_offset;
+diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
+index 72c9dbd81d7f..f185812970da 100644
+--- a/drivers/gpu/drm/vc4/vc4_dpi.c
++++ b/drivers/gpu/drm/vc4/vc4_dpi.c
+@@ -96,7 +96,6 @@ struct vc4_dpi {
+ 	struct platform_device *pdev;
+ 
+ 	struct drm_encoder *encoder;
+-	struct drm_connector *connector;
+ 
+ 	void __iomem *regs;
+ 
+@@ -164,14 +163,31 @@ static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
+ 
+ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
+ {
++	struct drm_device *dev = encoder->dev;
+ 	struct drm_display_mode *mode = &encoder->crtc->mode;
+ 	struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
+ 	struct vc4_dpi *dpi = vc4_encoder->dpi;
++	struct drm_connector_list_iter conn_iter;
++	struct drm_connector *connector = NULL, *connector_scan;
+ 	u32 dpi_c = DPI_ENABLE | DPI_OUTPUT_ENABLE_MODE;
+ 	int ret;
+ 
+-	if (dpi->connector->display_info.num_bus_formats) {
+-		u32 bus_format = dpi->connector->display_info.bus_formats[0];
++	/* Look up the connector attached to DPI so we can get the
++	 * bus_format.  Ideally the bridge would tell us the
++	 * bus_format we want, but it doesn't yet, so assume that it's
++	 * uniform throughout the bridge chain.
++	 */
++	drm_connector_list_iter_begin(dev, &conn_iter);
++	drm_for_each_connector_iter(connector_scan, &conn_iter) {
++		if (connector_scan->encoder == encoder) {
++			connector = connector_scan;
++			break;
++		}
++	}
++	drm_connector_list_iter_end(&conn_iter);
++
++	if (connector && connector->display_info.num_bus_formats) {
++		u32 bus_format = connector->display_info.bus_formats[0];
+ 
+ 		switch (bus_format) {
+ 		case MEDIA_BUS_FMT_RGB888_1X24:
+@@ -199,6 +215,9 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
+ 			DRM_ERROR("Unknown media bus format %d\n", bus_format);
+ 			break;
+ 		}
++	} else {
++		/* Default to 24bit if no connector found. */
++		dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, DPI_FORMAT);
+ 	}
+ 
+ 	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index 19c499f5623d..84ace3b62bb0 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -448,10 +448,11 @@ config HID_LENOVO
+ 	select NEW_LEDS
+ 	select LEDS_CLASS
+ 	---help---
+-	Support for Lenovo devices that are not fully compliant with HID standard.
++	Support for IBM/Lenovo devices that are not fully compliant with HID standard.
+ 
+-	Say Y if you want support for the non-compliant features of the Lenovo
+-	Thinkpad standalone keyboards, e.g:
++	Say Y if you want support for horizontal scrolling of the IBM/Lenovo
++	Scrollpoint mice or the non-compliant features of the Lenovo Thinkpad
++	standalone keyboards, e.g:
+ 	- ThinkPad USB Keyboard with TrackPoint (supports extra LEDs and trackpoint
+ 	  configuration)
+ 	- ThinkPad Compact Bluetooth Keyboard with TrackPoint (supports Fn keys)
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index c631d2c8988d..a026cc76f4f1 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -546,6 +546,13 @@
+ #define USB_VENDOR_ID_HUION		0x256c
+ #define USB_DEVICE_ID_HUION_TABLET	0x006e
+ 
++#define USB_VENDOR_ID_IBM					0x04b3
++#define USB_DEVICE_ID_IBM_SCROLLPOINT_III			0x3100
++#define USB_DEVICE_ID_IBM_SCROLLPOINT_PRO			0x3103
++#define USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL			0x3105
++#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL		0x3108
++#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO	0x3109
++
+ #define USB_VENDOR_ID_IDEACOM		0x1cb6
+ #define USB_DEVICE_ID_IDEACOM_IDC6650	0x6650
+ #define USB_DEVICE_ID_IDEACOM_IDC6651	0x6651
+@@ -678,6 +685,7 @@
+ #define USB_DEVICE_ID_LENOVO_TPKBD	0x6009
+ #define USB_DEVICE_ID_LENOVO_CUSBKBD	0x6047
+ #define USB_DEVICE_ID_LENOVO_CBTKBD	0x6048
++#define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL	0x6049
+ #define USB_DEVICE_ID_LENOVO_TPPRODOCK	0x6067
+ #define USB_DEVICE_ID_LENOVO_X1_COVER	0x6085
+ #define USB_DEVICE_ID_LENOVO_X1_TAB	0x60a3
+@@ -958,6 +966,7 @@
+ #define USB_DEVICE_ID_SIS817_TOUCH	0x0817
+ #define USB_DEVICE_ID_SIS_TS		0x1013
+ #define USB_DEVICE_ID_SIS1030_TOUCH	0x1030
++#define USB_DEVICE_ID_SIS10FB_TOUCH	0x10fb
+ 
+ #define USB_VENDOR_ID_SKYCABLE			0x1223
+ #define	USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER	0x3F07
+diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
+index 1ac4ff4d57a6..643b6eb54442 100644
+--- a/drivers/hid/hid-lenovo.c
++++ b/drivers/hid/hid-lenovo.c
+@@ -6,6 +6,17 @@
+  *
+  *  Copyright (c) 2012 Bernhard Seibold
+  *  Copyright (c) 2014 Jamie Lentin <jm@lentin.co.uk>
++ *
++ * Linux IBM/Lenovo Scrollpoint mouse driver:
++ * - IBM Scrollpoint III
++ * - IBM Scrollpoint Pro
++ * - IBM Scrollpoint Optical
++ * - IBM Scrollpoint Optical 800dpi
++ * - IBM Scrollpoint Optical 800dpi Pro
++ * - Lenovo Scrollpoint Optical
++ *
++ *  Copyright (c) 2012 Peter De Wachter <pdewacht@gmail.com>
++ *  Copyright (c) 2018 Peter Ganzhorn <peter.ganzhorn@gmail.com>
+  */
+ 
+ /*
+@@ -160,6 +171,17 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
+ 	return 0;
+ }
+ 
++static int lenovo_input_mapping_scrollpoint(struct hid_device *hdev,
++		struct hid_input *hi, struct hid_field *field,
++		struct hid_usage *usage, unsigned long **bit, int *max)
++{
++	if (usage->hid == HID_GD_Z) {
++		hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL);
++		return 1;
++	}
++	return 0;
++}
++
+ static int lenovo_input_mapping(struct hid_device *hdev,
+ 		struct hid_input *hi, struct hid_field *field,
+ 		struct hid_usage *usage, unsigned long **bit, int *max)
+@@ -172,6 +194,14 @@ static int lenovo_input_mapping(struct hid_device *hdev,
+ 	case USB_DEVICE_ID_LENOVO_CBTKBD:
+ 		return lenovo_input_mapping_cptkbd(hdev, hi, field,
+ 							usage, bit, max);
++	case USB_DEVICE_ID_IBM_SCROLLPOINT_III:
++	case USB_DEVICE_ID_IBM_SCROLLPOINT_PRO:
++	case USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL:
++	case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL:
++	case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO:
++	case USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL:
++		return lenovo_input_mapping_scrollpoint(hdev, hi, field,
++							usage, bit, max);
+ 	default:
+ 		return 0;
+ 	}
+@@ -883,6 +913,12 @@ static const struct hid_device_id lenovo_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
+ 	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_III) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_PRO) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) },
+ 	{ }
+ };
+ 
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index fd9f70a8b813..4c0d2491db08 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -174,6 +174,8 @@ static const struct i2c_hid_quirks {
+ 		I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
+ 	{ I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
+ 		I2C_HID_QUIRK_RESEND_REPORT_DESCR },
++	{ USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
++		I2C_HID_QUIRK_RESEND_REPORT_DESCR },
+ 	{ 0, 0 }
+ };
+ 
+diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
+index f272cdd9bd55..2623a567ffba 100644
+--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
++++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
+@@ -418,7 +418,7 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
+ 		list_del(&device->device_link);
+ 		spin_unlock_irqrestore(&dev->device_list_lock, flags);
+ 		dev_err(dev->devc, "Failed to register ISHTP client device\n");
+-		kfree(device);
++		put_device(&device->dev);
+ 		return NULL;
+ 	}
+ 
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index b54ef1ffcbec..ee7a37eb159a 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -1213,8 +1213,10 @@ static int __wacom_devm_sysfs_create_group(struct wacom *wacom,
+ 	devres->root = root;
+ 
+ 	error = sysfs_create_group(devres->root, group);
+-	if (error)
++	if (error) {
++		devres_free(devres);
+ 		return error;
++	}
+ 
+ 	devres_add(&wacom->hdev->dev, devres);
+ 
+diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
+index 2aa0e83174c5..dae8ac618a52 100644
+--- a/drivers/i2c/busses/i2c-pmcmsp.c
++++ b/drivers/i2c/busses/i2c-pmcmsp.c
+@@ -564,10 +564,10 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap,
+ 		 * TODO: We could potentially loop and retry in the case
+ 		 * of MSP_TWI_XFER_TIMEOUT.
+ 		 */
+-		return -1;
++		return -EIO;
+ 	}
+ 
+-	return 0;
++	return num;
+ }
+ 
+ static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter)
+diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
+index 25fcc3c1e32b..4053259bccb8 100644
+--- a/drivers/i2c/busses/i2c-sprd.c
++++ b/drivers/i2c/busses/i2c-sprd.c
+@@ -86,6 +86,7 @@ struct sprd_i2c {
+ 	u32 count;
+ 	int irq;
+ 	int err;
++	bool is_suspended;
+ };
+ 
+ static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count)
+@@ -283,6 +284,9 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
+ 	struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
+ 	int im, ret;
+ 
++	if (i2c_dev->is_suspended)
++		return -EBUSY;
++
+ 	ret = pm_runtime_get_sync(i2c_dev->dev);
+ 	if (ret < 0)
+ 		return ret;
+@@ -364,13 +368,12 @@ static irqreturn_t sprd_i2c_isr_thread(int irq, void *dev_id)
+ 	struct sprd_i2c *i2c_dev = dev_id;
+ 	struct i2c_msg *msg = i2c_dev->msg;
+ 	bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK);
+-	u32 i2c_count = readl(i2c_dev->base + I2C_COUNT);
+ 	u32 i2c_tran;
+ 
+ 	if (msg->flags & I2C_M_RD)
+ 		i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD;
+ 	else
+-		i2c_tran = i2c_count;
++		i2c_tran = i2c_dev->count;
+ 
+ 	/*
+ 	 * If we got one ACK from slave when writing data, and we did not
+@@ -408,14 +411,13 @@ static irqreturn_t sprd_i2c_isr(int irq, void *dev_id)
+ {
+ 	struct sprd_i2c *i2c_dev = dev_id;
+ 	struct i2c_msg *msg = i2c_dev->msg;
+-	u32 i2c_count = readl(i2c_dev->base + I2C_COUNT);
+ 	bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK);
+ 	u32 i2c_tran;
+ 
+ 	if (msg->flags & I2C_M_RD)
+ 		i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD;
+ 	else
+-		i2c_tran = i2c_count;
++		i2c_tran = i2c_dev->count;
+ 
+ 	/*
+ 	 * If we did not get one ACK from slave when writing data, then we
+@@ -586,11 +588,23 @@ static int sprd_i2c_remove(struct platform_device *pdev)
+ 
+ static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev)
+ {
++	struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
++
++	i2c_lock_adapter(&i2c_dev->adap);
++	i2c_dev->is_suspended = true;
++	i2c_unlock_adapter(&i2c_dev->adap);
++
+ 	return pm_runtime_force_suspend(pdev);
+ }
+ 
+ static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev)
+ {
++	struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
++
++	i2c_lock_adapter(&i2c_dev->adap);
++	i2c_dev->is_suspended = false;
++	i2c_unlock_adapter(&i2c_dev->adap);
++
+ 	return pm_runtime_force_resume(pdev);
+ }
+ 
+diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
+index e4be86b3de9a..7235c7302bb7 100644
+--- a/drivers/i2c/busses/i2c-viperboard.c
++++ b/drivers/i2c/busses/i2c-viperboard.c
+@@ -337,7 +337,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
+ 		}
+ 		mutex_unlock(&vb->lock);
+ 	}
+-	return 0;
++	return num;
+ error:
+ 	mutex_unlock(&vb->lock);
+ 	return error;
+diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
+index 8517d6ea91a6..6154da184fc1 100644
+--- a/drivers/infiniband/Kconfig
++++ b/drivers/infiniband/Kconfig
+@@ -62,9 +62,12 @@ config INFINIBAND_ON_DEMAND_PAGING
+ 	  pages on demand instead.
+ 
+ config INFINIBAND_ADDR_TRANS
+-	bool
++	bool "RDMA/CM"
+ 	depends on INFINIBAND
+ 	default y
++	---help---
++	  Support for RDMA communication manager (CM).
++	  This allows for a generic connection abstraction over RDMA.
+ 
+ config INFINIBAND_ADDR_TRANS_CONFIGFS
+ 	bool
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index a5367c5efbe7..b5a7897eb180 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -420,6 +420,8 @@ struct cma_hdr {
+ #define CMA_VERSION 0x00
+ 
+ struct cma_req_info {
++	struct sockaddr_storage listen_addr_storage;
++	struct sockaddr_storage src_addr_storage;
+ 	struct ib_device *device;
+ 	int port;
+ 	union ib_gid local_gid;
+@@ -899,7 +901,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
+ {
+ 	struct ib_qp_attr qp_attr;
+ 	int qp_attr_mask, ret;
+-	union ib_gid sgid;
+ 
+ 	mutex_lock(&id_priv->qp_mutex);
+ 	if (!id_priv->id.qp) {
+@@ -922,12 +923,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
+ 	if (ret)
+ 		goto out;
+ 
+-	ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
+-			   rdma_ah_read_grh(&qp_attr.ah_attr)->sgid_index,
+-			   &sgid, NULL);
+-	if (ret)
+-		goto out;
+-
+ 	BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
+ 
+ 	if (conn_param)
+@@ -1373,11 +1368,11 @@ static bool validate_net_dev(struct net_device *net_dev,
+ }
+ 
+ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
+-					  const struct cma_req_info *req)
++					  struct cma_req_info *req)
+ {
+-	struct sockaddr_storage listen_addr_storage, src_addr_storage;
+-	struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage,
+-			*src_addr = (struct sockaddr *)&src_addr_storage;
++	struct sockaddr *listen_addr =
++			(struct sockaddr *)&req->listen_addr_storage;
++	struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
+ 	struct net_device *net_dev;
+ 	const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
+ 	int err;
+@@ -1392,11 +1387,6 @@ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
+ 	if (!net_dev)
+ 		return ERR_PTR(-ENODEV);
+ 
+-	if (!validate_net_dev(net_dev, listen_addr, src_addr)) {
+-		dev_put(net_dev);
+-		return ERR_PTR(-EHOSTUNREACH);
+-	}
+-
+ 	return net_dev;
+ }
+ 
+@@ -1532,15 +1522,51 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
+ 		}
+ 	}
+ 
++	/*
++	 * Net namespace might be getting deleted while route lookup,
++	 * cm_id lookup is in progress. Therefore, perform netdevice
++	 * validation, cm_id lookup under rcu lock.
++	 * RCU lock along with netdevice state check, synchronizes with
++	 * netdevice migrating to different net namespace and also avoids
++	 * case where net namespace doesn't get deleted while lookup is in
++	 * progress.
++	 * If the device state is not IFF_UP, its properties such as ifindex
++	 * and nd_net cannot be trusted to remain valid without rcu lock.
++	 * net/core/dev.c change_net_namespace() ensures to synchronize with
++	 * ongoing operations on net device after device is closed using
++	 * synchronize_net().
++	 */
++	rcu_read_lock();
++	if (*net_dev) {
++		/*
++		 * If netdevice is down, it is likely that it is administratively
++		 * down or it might be migrating to different namespace.
++		 * In that case avoid further processing, as the net namespace
++		 * or ifindex may change.
++		 */
++		if (((*net_dev)->flags & IFF_UP) == 0) {
++			id_priv = ERR_PTR(-EHOSTUNREACH);
++			goto err;
++		}
++
++		if (!validate_net_dev(*net_dev,
++				 (struct sockaddr *)&req.listen_addr_storage,
++				 (struct sockaddr *)&req.src_addr_storage)) {
++			id_priv = ERR_PTR(-EHOSTUNREACH);
++			goto err;
++		}
++	}
++
+ 	bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
+ 				rdma_ps_from_service_id(req.service_id),
+ 				cma_port_from_service_id(req.service_id));
+ 	id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
++err:
++	rcu_read_unlock();
+ 	if (IS_ERR(id_priv) && *net_dev) {
+ 		dev_put(*net_dev);
+ 		*net_dev = NULL;
+ 	}
+-
+ 	return id_priv;
+ }
+ 
+diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
+index 81528f64061a..cb0fecc958b5 100644
+--- a/drivers/infiniband/core/iwpm_util.c
++++ b/drivers/infiniband/core/iwpm_util.c
+@@ -114,7 +114,7 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
+ 			struct sockaddr_storage *mapped_sockaddr,
+ 			u8 nl_client)
+ {
+-	struct hlist_head *hash_bucket_head;
++	struct hlist_head *hash_bucket_head = NULL;
+ 	struct iwpm_mapping_info *map_info;
+ 	unsigned long flags;
+ 	int ret = -EINVAL;
+@@ -142,6 +142,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
+ 		}
+ 	}
+ 	spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
++
++	if (!hash_bucket_head)
++		kfree(map_info);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index c50596f7f98a..b28452a55a08 100644
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -59,7 +59,7 @@ module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
+ MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
+ 
+ static struct list_head ib_mad_port_list;
+-static u32 ib_mad_client_id = 0;
++static atomic_t ib_mad_client_id = ATOMIC_INIT(0);
+ 
+ /* Port list lock */
+ static DEFINE_SPINLOCK(ib_mad_port_list_lock);
+@@ -377,7 +377,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
+ 	}
+ 
+ 	spin_lock_irqsave(&port_priv->reg_lock, flags);
+-	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
++	mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id);
+ 
+ 	/*
+ 	 * Make sure MAD registration (if supplied)
+diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
+index 339b85145044..6a71cdf1fe33 100644
+--- a/drivers/infiniband/core/uverbs_ioctl.c
++++ b/drivers/infiniband/core/uverbs_ioctl.c
+@@ -191,6 +191,15 @@ static int uverbs_validate_kernel_mandatory(const struct uverbs_method_spec *met
+ 			return -EINVAL;
+ 	}
+ 
++	for (; i < method_spec->num_buckets; i++) {
++		struct uverbs_attr_spec_hash *attr_spec_bucket =
++			method_spec->attr_buckets[i];
++
++		if (!bitmap_empty(attr_spec_bucket->mandatory_attrs_bitmask,
++				  attr_spec_bucket->num_attrs))
++			return -EINVAL;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
+index a97055dd4fbd..b5fab55cc275 100644
+--- a/drivers/infiniband/hw/hfi1/affinity.c
++++ b/drivers/infiniband/hw/hfi1/affinity.c
+@@ -412,7 +412,6 @@ static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
+ static int get_irq_affinity(struct hfi1_devdata *dd,
+ 			    struct hfi1_msix_entry *msix)
+ {
+-	int ret;
+ 	cpumask_var_t diff;
+ 	struct hfi1_affinity_node *entry;
+ 	struct cpu_mask_set *set = NULL;
+@@ -424,10 +423,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
+ 	extra[0] = '\0';
+ 	cpumask_clear(&msix->mask);
+ 
+-	ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
+-	if (!ret)
+-		return -ENOMEM;
+-
+ 	entry = node_affinity_lookup(dd->node);
+ 
+ 	switch (msix->type) {
+@@ -458,6 +453,9 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
+ 	 * finds its CPU here.
+ 	 */
+ 	if (cpu == -1 && set) {
++		if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
++			return -ENOMEM;
++
+ 		if (cpumask_equal(&set->mask, &set->used)) {
+ 			/*
+ 			 * We've used up all the CPUs, bump up the generation
+@@ -469,6 +467,8 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
+ 		cpumask_andnot(diff, &set->mask, &set->used);
+ 		cpu = cpumask_first(diff);
+ 		cpumask_set_cpu(cpu, &set->used);
++
++		free_cpumask_var(diff);
+ 	}
+ 
+ 	cpumask_set_cpu(cpu, &msix->mask);
+@@ -482,7 +482,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
+ 		hfi1_setup_sdma_notifier(msix);
+ 	}
+ 
+-	free_cpumask_var(diff);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
+index b27fe75c7102..6309edf811df 100644
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -88,9 +88,9 @@
+  * pio buffers per ctxt, etc.)  Zero means use one user context per CPU.
+  */
+ int num_user_contexts = -1;
+-module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
++module_param_named(num_user_contexts, num_user_contexts, int, 0444);
+ MODULE_PARM_DESC(
+-	num_user_contexts, "Set max number of user contexts to use");
++	num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
+ 
+ uint krcvqs[RXE_NUM_DATA_VL];
+ int krcvqsset;
+@@ -1209,30 +1209,49 @@ static void finalize_asic_data(struct hfi1_devdata *dd,
+ 	kfree(ad);
+ }
+ 
+-static void __hfi1_free_devdata(struct kobject *kobj)
++/**
++ * hfi1_clean_devdata - cleans up per-unit data structure
++ * @dd: pointer to a valid devdata structure
++ *
++ * It cleans up all data structures set up by
++ * by hfi1_alloc_devdata().
++ */
++static void hfi1_clean_devdata(struct hfi1_devdata *dd)
+ {
+-	struct hfi1_devdata *dd =
+-		container_of(kobj, struct hfi1_devdata, kobj);
+ 	struct hfi1_asic_data *ad;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&hfi1_devs_lock, flags);
+-	idr_remove(&hfi1_unit_table, dd->unit);
+-	list_del(&dd->list);
++	if (!list_empty(&dd->list)) {
++		idr_remove(&hfi1_unit_table, dd->unit);
++		list_del_init(&dd->list);
++	}
+ 	ad = release_asic_data(dd);
+ 	spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+-	if (ad)
+-		finalize_asic_data(dd, ad);
++
++	finalize_asic_data(dd, ad);
+ 	free_platform_config(dd);
+ 	rcu_barrier(); /* wait for rcu callbacks to complete */
+ 	free_percpu(dd->int_counter);
+ 	free_percpu(dd->rcv_limit);
+ 	free_percpu(dd->send_schedule);
+ 	free_percpu(dd->tx_opstats);
++	dd->int_counter   = NULL;
++	dd->rcv_limit     = NULL;
++	dd->send_schedule = NULL;
++	dd->tx_opstats    = NULL;
+ 	sdma_clean(dd, dd->num_sdma);
+ 	rvt_dealloc_device(&dd->verbs_dev.rdi);
+ }
+ 
++static void __hfi1_free_devdata(struct kobject *kobj)
++{
++	struct hfi1_devdata *dd =
++		container_of(kobj, struct hfi1_devdata, kobj);
++
++	hfi1_clean_devdata(dd);
++}
++
+ static struct kobj_type hfi1_devdata_type = {
+ 	.release = __hfi1_free_devdata,
+ };
+@@ -1333,9 +1352,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
+ 	return dd;
+ 
+ bail:
+-	if (!list_empty(&dd->list))
+-		list_del_init(&dd->list);
+-	rvt_dealloc_device(&dd->verbs_dev.rdi);
++	hfi1_clean_devdata(dd);
+ 	return ERR_PTR(ret);
+ }
+ 
+diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
+index d486355880cb..cbf7faa5038c 100644
+--- a/drivers/infiniband/hw/hfi1/platform.c
++++ b/drivers/infiniband/hw/hfi1/platform.c
+@@ -199,6 +199,7 @@ void free_platform_config(struct hfi1_devdata *dd)
+ {
+ 	/* Release memory allocated for eprom or fallback file read. */
+ 	kfree(dd->platform_config.data);
++	dd->platform_config.data = NULL;
+ }
+ 
+ void get_port_type(struct hfi1_pportdata *ppd)
+diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
+index 1869f639c3ae..b5966991d647 100644
+--- a/drivers/infiniband/hw/hfi1/qsfp.c
++++ b/drivers/infiniband/hw/hfi1/qsfp.c
+@@ -204,6 +204,8 @@ static void clean_i2c_bus(struct hfi1_i2c_bus *bus)
+ 
+ void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
+ {
++	if (!ad)
++		return;
+ 	clean_i2c_bus(ad->i2c_bus0);
+ 	ad->i2c_bus0 = NULL;
+ 	clean_i2c_bus(ad->i2c_bus1);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index 0eeabfbee192..0d8c113083ad 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -912,7 +912,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
+ 		obj_per_chunk = buf_chunk_size / obj_size;
+ 		num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
+ 		bt_chunk_num = bt_chunk_size / 8;
+-		if (table->type >= HEM_TYPE_MTT)
++		if (type >= HEM_TYPE_MTT)
+ 			num_bt_l0 = bt_chunk_num;
+ 
+ 		table->hem = kcalloc(num_hem, sizeof(*table->hem),
+@@ -920,7 +920,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
+ 		if (!table->hem)
+ 			goto err_kcalloc_hem_buf;
+ 
+-		if (check_whether_bt_num_3(table->type, hop_num)) {
++		if (check_whether_bt_num_3(type, hop_num)) {
+ 			unsigned long num_bt_l1;
+ 
+ 			num_bt_l1 = (num_hem + bt_chunk_num - 1) /
+@@ -939,8 +939,8 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
+ 				goto err_kcalloc_l1_dma;
+ 		}
+ 
+-		if (check_whether_bt_num_2(table->type, hop_num) ||
+-			check_whether_bt_num_3(table->type, hop_num)) {
++		if (check_whether_bt_num_2(type, hop_num) ||
++			check_whether_bt_num_3(type, hop_num)) {
+ 			table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
+ 					       GFP_KERNEL);
+ 			if (!table->bt_l0)
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index ec638778661c..3d056c67a339 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -71,6 +71,11 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ 			return -EINVAL;
+ 		}
+ 
++		if (wr->opcode == IB_WR_RDMA_READ) {
++			dev_err(hr_dev->dev, "Not support inline data!\n");
++			return -EINVAL;
++		}
++
+ 		for (i = 0; i < wr->num_sge; i++) {
+ 			memcpy(wqe, ((void *)wr->sg_list[i].addr),
+ 			       wr->sg_list[i].length);
+@@ -148,7 +153,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ 		     ibqp->qp_type != IB_QPT_GSI &&
+ 		     ibqp->qp_type != IB_QPT_UD)) {
+ 		dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
+-		*bad_wr = NULL;
++		*bad_wr = wr;
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+@@ -456,6 +461,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ 		} else {
+ 			dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
+ 			spin_unlock_irqrestore(&qp->sq.lock, flags);
++			*bad_wr = wr;
+ 			return -EOPNOTSUPP;
+ 		}
+ 	}
+@@ -3161,7 +3167,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
+ 		   (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
+ 		   (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
+ 		   (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
+-		   (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR)) {
++		   (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) ||
++		   (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
+ 		/* Nothing */
+ 		;
+ 	} else {
+diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
+index 4975f3e6596e..32fafa7700e3 100644
+--- a/drivers/infiniband/hw/mlx4/mr.c
++++ b/drivers/infiniband/hw/mlx4/mr.c
+@@ -346,7 +346,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
+ 	/* Add to the first block the misalignment that it suffers from. */
+ 	total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
+ 	last_block_end = current_block_start + current_block_len;
+-	last_block_aligned_end = round_up(last_block_end, 1 << block_shift);
++	last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
+ 	total_len += (last_block_aligned_end - last_block_end);
+ 
+ 	if (total_len & ((1ULL << block_shift) - 1ULL))
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index c14ed9cc9c9e..cf7b4bda8597 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -4833,9 +4833,7 @@ static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
+ static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
+ {
+ 	dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
+-	if (!dev->mdev->priv.uar)
+-		return -ENOMEM;
+-	return 0;
++	return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
+ }
+ 
+ static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
+diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
+index 61927c165b59..4cf11063e0b5 100644
+--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
++++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
+@@ -390,7 +390,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
+ 		.name	= "IB_OPCODE_RC_SEND_ONLY_INV",
+ 		.mask	= RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
+ 				| RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
+-				| RXE_END_MASK,
++				| RXE_END_MASK  | RXE_START_MASK,
+ 		.length = RXE_BTH_BYTES + RXE_IETH_BYTES,
+ 		.offset = {
+ 			[RXE_BTH]	= 0,
+diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
+index 7bdaf71b8221..785199990457 100644
+--- a/drivers/infiniband/sw/rxe/rxe_req.c
++++ b/drivers/infiniband/sw/rxe/rxe_req.c
+@@ -728,7 +728,6 @@ int rxe_requester(void *arg)
+ 		rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
+ 
+ 		if (ret == -EAGAIN) {
+-			kfree_skb(skb);
+ 			rxe_run_task(&qp->req.task, 1);
+ 			goto exit;
+ 		}
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
+index d37bb9b97569..e319bd904d30 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -742,7 +742,6 @@ static enum resp_states read_reply(struct rxe_qp *qp,
+ 	err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
+ 	if (err) {
+ 		pr_err("Failed sending RDMA reply.\n");
+-		kfree_skb(skb);
+ 		return RESPST_ERR_RNR;
+ 	}
+ 
+@@ -954,10 +953,8 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
+ 	}
+ 
+ 	err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
+-	if (err) {
++	if (err)
+ 		pr_err_ratelimited("Failed sending ack\n");
+-		kfree_skb(skb);
+-	}
+ 
+ err1:
+ 	return err;
+@@ -1150,7 +1147,6 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
+ 			if (rc) {
+ 				pr_err("Failed resending result. This flow is not handled - skb ignored\n");
+ 				rxe_drop_ref(qp);
+-				kfree_skb(skb_copy);
+ 				rc = RESPST_CLEANUP;
+ 				goto out;
+ 			}
+diff --git a/drivers/infiniband/ulp/srp/Kconfig b/drivers/infiniband/ulp/srp/Kconfig
+index c74ee9633041..99db8fe5173a 100644
+--- a/drivers/infiniband/ulp/srp/Kconfig
++++ b/drivers/infiniband/ulp/srp/Kconfig
+@@ -1,6 +1,6 @@
+ config INFINIBAND_SRP
+ 	tristate "InfiniBand SCSI RDMA Protocol"
+-	depends on SCSI
++	depends on SCSI && INFINIBAND_ADDR_TRANS
+ 	select SCSI_SRP_ATTRS
+ 	---help---
+ 	  Support for the SCSI RDMA Protocol over InfiniBand.  This
+diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
+index 31ee83d528d9..fb8b7182f05e 100644
+--- a/drivers/infiniband/ulp/srpt/Kconfig
++++ b/drivers/infiniband/ulp/srpt/Kconfig
+@@ -1,6 +1,6 @@
+ config INFINIBAND_SRPT
+ 	tristate "InfiniBand SCSI RDMA Protocol target support"
+-	depends on INFINIBAND && TARGET_CORE
++	depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE
+ 	---help---
+ 
+ 	  Support for the SCSI RDMA Protocol (SRP) Target driver. The
+diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
+index 76edbf2c1bce..082defc329a8 100644
+--- a/drivers/input/rmi4/rmi_spi.c
++++ b/drivers/input/rmi4/rmi_spi.c
+@@ -147,8 +147,11 @@ static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
+ 	if (len > RMI_SPI_XFER_SIZE_LIMIT)
+ 		return -EINVAL;
+ 
+-	if (rmi_spi->xfer_buf_size < len)
+-		rmi_spi_manage_pools(rmi_spi, len);
++	if (rmi_spi->xfer_buf_size < len) {
++		ret = rmi_spi_manage_pools(rmi_spi, len);
++		if (ret < 0)
++			return ret;
++	}
+ 
+ 	if (addr == 0)
+ 		/*
+diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
+index 429b694405c7..fc149ea64be7 100644
+--- a/drivers/input/touchscreen/atmel_mxt_ts.c
++++ b/drivers/input/touchscreen/atmel_mxt_ts.c
+@@ -275,7 +275,8 @@ struct mxt_data {
+ 	char phys[64];		/* device physical location */
+ 	const struct mxt_platform_data *pdata;
+ 	struct mxt_object *object_table;
+-	struct mxt_info info;
++	struct mxt_info *info;
++	void *raw_info_block;
+ 	unsigned int irq;
+ 	unsigned int max_x;
+ 	unsigned int max_y;
+@@ -450,12 +451,13 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry)
+ {
+ 	u8 appmode = data->client->addr;
+ 	u8 bootloader;
++	u8 family_id = data->info ? data->info->family_id : 0;
+ 
+ 	switch (appmode) {
+ 	case 0x4a:
+ 	case 0x4b:
+ 		/* Chips after 1664S use different scheme */
+-		if (retry || data->info.family_id >= 0xa2) {
++		if (retry || family_id >= 0xa2) {
+ 			bootloader = appmode - 0x24;
+ 			break;
+ 		}
+@@ -682,7 +684,7 @@ mxt_get_object(struct mxt_data *data, u8 type)
+ 	struct mxt_object *object;
+ 	int i;
+ 
+-	for (i = 0; i < data->info.object_num; i++) {
++	for (i = 0; i < data->info->object_num; i++) {
+ 		object = data->object_table + i;
+ 		if (object->type == type)
+ 			return object;
+@@ -1453,12 +1455,12 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
+ 		data_pos += offset;
+ 	}
+ 
+-	if (cfg_info.family_id != data->info.family_id) {
++	if (cfg_info.family_id != data->info->family_id) {
+ 		dev_err(dev, "Family ID mismatch!\n");
+ 		return -EINVAL;
+ 	}
+ 
+-	if (cfg_info.variant_id != data->info.variant_id) {
++	if (cfg_info.variant_id != data->info->variant_id) {
+ 		dev_err(dev, "Variant ID mismatch!\n");
+ 		return -EINVAL;
+ 	}
+@@ -1503,7 +1505,7 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
+ 
+ 	/* Malloc memory to store configuration */
+ 	cfg_start_ofs = MXT_OBJECT_START +
+-			data->info.object_num * sizeof(struct mxt_object) +
++			data->info->object_num * sizeof(struct mxt_object) +
+ 			MXT_INFO_CHECKSUM_SIZE;
+ 	config_mem_size = data->mem_size - cfg_start_ofs;
+ 	config_mem = kzalloc(config_mem_size, GFP_KERNEL);
+@@ -1554,20 +1556,6 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
+ 	return ret;
+ }
+ 
+-static int mxt_get_info(struct mxt_data *data)
+-{
+-	struct i2c_client *client = data->client;
+-	struct mxt_info *info = &data->info;
+-	int error;
+-
+-	/* Read 7-byte info block starting at address 0 */
+-	error = __mxt_read_reg(client, 0, sizeof(*info), info);
+-	if (error)
+-		return error;
+-
+-	return 0;
+-}
+-
+ static void mxt_free_input_device(struct mxt_data *data)
+ {
+ 	if (data->input_dev) {
+@@ -1582,9 +1570,10 @@ static void mxt_free_object_table(struct mxt_data *data)
+ 	video_unregister_device(&data->dbg.vdev);
+ 	v4l2_device_unregister(&data->dbg.v4l2);
+ #endif
+-
+-	kfree(data->object_table);
+ 	data->object_table = NULL;
++	data->info = NULL;
++	kfree(data->raw_info_block);
++	data->raw_info_block = NULL;
+ 	kfree(data->msg_buf);
+ 	data->msg_buf = NULL;
+ 	data->T5_address = 0;
+@@ -1600,34 +1589,18 @@ static void mxt_free_object_table(struct mxt_data *data)
+ 	data->max_reportid = 0;
+ }
+ 
+-static int mxt_get_object_table(struct mxt_data *data)
++static int mxt_parse_object_table(struct mxt_data *data,
++				  struct mxt_object *object_table)
+ {
+ 	struct i2c_client *client = data->client;
+-	size_t table_size;
+-	struct mxt_object *object_table;
+-	int error;
+ 	int i;
+ 	u8 reportid;
+ 	u16 end_address;
+ 
+-	table_size = data->info.object_num * sizeof(struct mxt_object);
+-	object_table = kzalloc(table_size, GFP_KERNEL);
+-	if (!object_table) {
+-		dev_err(&data->client->dev, "Failed to allocate memory\n");
+-		return -ENOMEM;
+-	}
+-
+-	error = __mxt_read_reg(client, MXT_OBJECT_START, table_size,
+-			object_table);
+-	if (error) {
+-		kfree(object_table);
+-		return error;
+-	}
+-
+ 	/* Valid Report IDs start counting from 1 */
+ 	reportid = 1;
+ 	data->mem_size = 0;
+-	for (i = 0; i < data->info.object_num; i++) {
++	for (i = 0; i < data->info->object_num; i++) {
+ 		struct mxt_object *object = object_table + i;
+ 		u8 min_id, max_id;
+ 
+@@ -1651,8 +1624,8 @@ static int mxt_get_object_table(struct mxt_data *data)
+ 
+ 		switch (object->type) {
+ 		case MXT_GEN_MESSAGE_T5:
+-			if (data->info.family_id == 0x80 &&
+-			    data->info.version < 0x20) {
++			if (data->info->family_id == 0x80 &&
++			    data->info->version < 0x20) {
+ 				/*
+ 				 * On mXT224 firmware versions prior to V2.0
+ 				 * read and discard unused CRC byte otherwise
+@@ -1707,24 +1680,102 @@ static int mxt_get_object_table(struct mxt_data *data)
+ 	/* If T44 exists, T5 position has to be directly after */
+ 	if (data->T44_address && (data->T5_address != data->T44_address + 1)) {
+ 		dev_err(&client->dev, "Invalid T44 position\n");
+-		error = -EINVAL;
+-		goto free_object_table;
++		return -EINVAL;
+ 	}
+ 
+ 	data->msg_buf = kcalloc(data->max_reportid,
+ 				data->T5_msg_size, GFP_KERNEL);
+-	if (!data->msg_buf) {
+-		dev_err(&client->dev, "Failed to allocate message buffer\n");
++	if (!data->msg_buf)
++		return -ENOMEM;
++
++	return 0;
++}
++
++static int mxt_read_info_block(struct mxt_data *data)
++{
++	struct i2c_client *client = data->client;
++	int error;
++	size_t size;
++	void *id_buf, *buf;
++	uint8_t num_objects;
++	u32 calculated_crc;
++	u8 *crc_ptr;
++
++	/* If info block already allocated, free it */
++	if (data->raw_info_block)
++		mxt_free_object_table(data);
++
++	/* Read 7-byte ID information block starting at address 0 */
++	size = sizeof(struct mxt_info);
++	id_buf = kzalloc(size, GFP_KERNEL);
++	if (!id_buf)
++		return -ENOMEM;
++
++	error = __mxt_read_reg(client, 0, size, id_buf);
++	if (error)
++		goto err_free_mem;
++
++	/* Resize buffer to give space for rest of info block */
++	num_objects = ((struct mxt_info *)id_buf)->object_num;
++	size += (num_objects * sizeof(struct mxt_object))
++		+ MXT_INFO_CHECKSUM_SIZE;
++
++	buf = krealloc(id_buf, size, GFP_KERNEL);
++	if (!buf) {
+ 		error = -ENOMEM;
+-		goto free_object_table;
++		goto err_free_mem;
++	}
++	id_buf = buf;
++
++	/* Read rest of info block */
++	error = __mxt_read_reg(client, MXT_OBJECT_START,
++			       size - MXT_OBJECT_START,
++			       id_buf + MXT_OBJECT_START);
++	if (error)
++		goto err_free_mem;
++
++	/* Extract & calculate checksum */
++	crc_ptr = id_buf + size - MXT_INFO_CHECKSUM_SIZE;
++	data->info_crc = crc_ptr[0] | (crc_ptr[1] << 8) | (crc_ptr[2] << 16);
++
++	calculated_crc = mxt_calculate_crc(id_buf, 0,
++					   size - MXT_INFO_CHECKSUM_SIZE);
++
++	/*
++	 * CRC mismatch can be caused by data corruption due to I2C comms
++	 * issue or else device is not using Object Based Protocol (eg i2c-hid)
++	 */
++	if ((data->info_crc == 0) || (data->info_crc != calculated_crc)) {
++		dev_err(&client->dev,
++			"Info Block CRC error calculated=0x%06X read=0x%06X\n",
++			calculated_crc, data->info_crc);
++		error = -EIO;
++		goto err_free_mem;
++	}
++
++	data->raw_info_block = id_buf;
++	data->info = (struct mxt_info *)id_buf;
++
++	dev_info(&client->dev,
++		 "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
++		 data->info->family_id, data->info->variant_id,
++		 data->info->version >> 4, data->info->version & 0xf,
++		 data->info->build, data->info->object_num);
++
++	/* Parse object table information */
++	error = mxt_parse_object_table(data, id_buf + MXT_OBJECT_START);
++	if (error) {
++		dev_err(&client->dev, "Error %d parsing object table\n", error);
++		mxt_free_object_table(data);
++		goto err_free_mem;
+ 	}
+ 
+-	data->object_table = object_table;
++	data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);
+ 
+ 	return 0;
+ 
+-free_object_table:
+-	mxt_free_object_table(data);
++err_free_mem:
++	kfree(id_buf);
+ 	return error;
+ }
+ 
+@@ -2039,7 +2090,7 @@ static int mxt_initialize(struct mxt_data *data)
+ 	int error;
+ 
+ 	while (1) {
+-		error = mxt_get_info(data);
++		error = mxt_read_info_block(data);
+ 		if (!error)
+ 			break;
+ 
+@@ -2070,16 +2121,9 @@ static int mxt_initialize(struct mxt_data *data)
+ 		msleep(MXT_FW_RESET_TIME);
+ 	}
+ 
+-	/* Get object table information */
+-	error = mxt_get_object_table(data);
+-	if (error) {
+-		dev_err(&client->dev, "Error %d reading object table\n", error);
+-		return error;
+-	}
+-
+ 	error = mxt_acquire_irq(data);
+ 	if (error)
+-		goto err_free_object_table;
++		return error;
+ 
+ 	error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
+ 					&client->dev, GFP_KERNEL, data,
+@@ -2087,14 +2131,10 @@ static int mxt_initialize(struct mxt_data *data)
+ 	if (error) {
+ 		dev_err(&client->dev, "Failed to invoke firmware loader: %d\n",
+ 			error);
+-		goto err_free_object_table;
++		return error;
+ 	}
+ 
+ 	return 0;
+-
+-err_free_object_table:
+-	mxt_free_object_table(data);
+-	return error;
+ }
+ 
+ static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
+@@ -2155,7 +2195,7 @@ static int mxt_init_t7_power_cfg(struct mxt_data *data)
+ static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x,
+ 			       unsigned int y)
+ {
+-	struct mxt_info *info = &data->info;
++	struct mxt_info *info = data->info;
+ 	struct mxt_dbg *dbg = &data->dbg;
+ 	unsigned int ofs, page;
+ 	unsigned int col = 0;
+@@ -2483,7 +2523,7 @@ static const struct video_device mxt_video_device = {
+ 
+ static void mxt_debug_init(struct mxt_data *data)
+ {
+-	struct mxt_info *info = &data->info;
++	struct mxt_info *info = data->info;
+ 	struct mxt_dbg *dbg = &data->dbg;
+ 	struct mxt_object *object;
+ 	int error;
+@@ -2569,7 +2609,6 @@ static int mxt_configure_objects(struct mxt_data *data,
+ 				 const struct firmware *cfg)
+ {
+ 	struct device *dev = &data->client->dev;
+-	struct mxt_info *info = &data->info;
+ 	int error;
+ 
+ 	error = mxt_init_t7_power_cfg(data);
+@@ -2594,11 +2633,6 @@ static int mxt_configure_objects(struct mxt_data *data,
+ 
+ 	mxt_debug_init(data);
+ 
+-	dev_info(dev,
+-		 "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
+-		 info->family_id, info->variant_id, info->version >> 4,
+-		 info->version & 0xf, info->build, info->object_num);
+-
+ 	return 0;
+ }
+ 
+@@ -2607,7 +2641,7 @@ static ssize_t mxt_fw_version_show(struct device *dev,
+ 				   struct device_attribute *attr, char *buf)
+ {
+ 	struct mxt_data *data = dev_get_drvdata(dev);
+-	struct mxt_info *info = &data->info;
++	struct mxt_info *info = data->info;
+ 	return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n",
+ 			 info->version >> 4, info->version & 0xf, info->build);
+ }
+@@ -2617,7 +2651,7 @@ static ssize_t mxt_hw_version_show(struct device *dev,
+ 				   struct device_attribute *attr, char *buf)
+ {
+ 	struct mxt_data *data = dev_get_drvdata(dev);
+-	struct mxt_info *info = &data->info;
++	struct mxt_info *info = data->info;
+ 	return scnprintf(buf, PAGE_SIZE, "%u.%u\n",
+ 			 info->family_id, info->variant_id);
+ }
+@@ -2656,7 +2690,7 @@ static ssize_t mxt_object_show(struct device *dev,
+ 		return -ENOMEM;
+ 
+ 	error = 0;
+-	for (i = 0; i < data->info.object_num; i++) {
++	for (i = 0; i < data->info->object_num; i++) {
+ 		object = data->object_table + i;
+ 
+ 		if (!mxt_object_readable(object->type))
+diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
+index 9a7ffd13c7f0..4e3e3d2f51c8 100644
+--- a/drivers/iommu/dmar.c
++++ b/drivers/iommu/dmar.c
+@@ -1345,7 +1345,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
+ 	struct qi_desc desc;
+ 
+ 	if (mask) {
+-		BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
++		BUG_ON(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
+ 		addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
+ 		desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+ 	} else
+diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
+index 66f69af2c219..3062a154a9fb 100644
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -1136,7 +1136,7 @@ static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
+ 	irte->dest_id = IRTE_DEST(cfg->dest_apicid);
+ 
+ 	/* Update the hardware only if the interrupt is in remapped mode. */
+-	if (!force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
++	if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
+ 		modify_irte(&ir_data->irq_2_iommu, irte);
+ }
+ 
+diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
+index 87c34f607a75..f47678be6383 100644
+--- a/drivers/mtd/onenand/omap2.c
++++ b/drivers/mtd/onenand/omap2.c
+@@ -377,56 +377,42 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
+ {
+ 	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ 	struct onenand_chip *this = mtd->priv;
+-	dma_addr_t dma_src, dma_dst;
+-	int bram_offset;
++	struct device *dev = &c->pdev->dev;
+ 	void *buf = (void *)buffer;
++	dma_addr_t dma_src, dma_dst;
++	int bram_offset, err;
+ 	size_t xtra;
+-	int ret;
+ 
+ 	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+-	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
+-		goto out_copy;
+-
+-	/* panic_write() may be in an interrupt context */
+-	if (in_interrupt() || oops_in_progress)
++	/*
++	 * If the buffer address is not DMA-able, len is not long enough to make
++	 * DMA transfers profitable or panic_write() may be in an interrupt
++	 * context fallback to PIO mode.
++	 */
++	if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
++	    count < 384 || in_interrupt() || oops_in_progress )
+ 		goto out_copy;
+ 
+-	if (buf >= high_memory) {
+-		struct page *p1;
+-
+-		if (((size_t)buf & PAGE_MASK) !=
+-		    ((size_t)(buf + count - 1) & PAGE_MASK))
+-			goto out_copy;
+-		p1 = vmalloc_to_page(buf);
+-		if (!p1)
+-			goto out_copy;
+-		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
+-	}
+-
+ 	xtra = count & 3;
+ 	if (xtra) {
+ 		count -= xtra;
+ 		memcpy(buf + count, this->base + bram_offset + count, xtra);
+ 	}
+ 
++	dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
+ 	dma_src = c->phys_base + bram_offset;
+-	dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
+-	if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
+-		dev_err(&c->pdev->dev,
+-			"Couldn't DMA map a %d byte buffer\n",
+-			count);
+-		goto out_copy;
+-	}
+ 
+-	ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
+-	dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
+-
+-	if (ret) {
+-		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
++	if (dma_mapping_error(dev, dma_dst)) {
++		dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
+ 		goto out_copy;
+ 	}
+ 
+-	return 0;
++	err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
++	dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
++	if (!err)
++		return 0;
++
++	dev_err(dev, "timeout waiting for DMA\n");
+ 
+ out_copy:
+ 	memcpy(buf, this->base + bram_offset, count);
+@@ -439,49 +425,34 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
+ {
+ 	struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
+ 	struct onenand_chip *this = mtd->priv;
+-	dma_addr_t dma_src, dma_dst;
+-	int bram_offset;
++	struct device *dev = &c->pdev->dev;
+ 	void *buf = (void *)buffer;
+-	int ret;
++	dma_addr_t dma_src, dma_dst;
++	int bram_offset, err;
+ 
+ 	bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
+-	if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
+-		goto out_copy;
+-
+-	/* panic_write() may be in an interrupt context */
+-	if (in_interrupt() || oops_in_progress)
++	/*
++	 * If the buffer address is not DMA-able, len is not long enough to make
++	 * DMA transfers profitable or panic_write() may be in an interrupt
++	 * context fallback to PIO mode.
++	 */
++	if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
++	    count < 384 || in_interrupt() || oops_in_progress )
+ 		goto out_copy;
+ 
+-	if (buf >= high_memory) {
+-		struct page *p1;
+-
+-		if (((size_t)buf & PAGE_MASK) !=
+-		    ((size_t)(buf + count - 1) & PAGE_MASK))
+-			goto out_copy;
+-		p1 = vmalloc_to_page(buf);
+-		if (!p1)
+-			goto out_copy;
+-		buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
+-	}
+-
+-	dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
++	dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
+ 	dma_dst = c->phys_base + bram_offset;
+-	if (dma_mapping_error(&c->pdev->dev, dma_src)) {
+-		dev_err(&c->pdev->dev,
+-			"Couldn't DMA map a %d byte buffer\n",
+-			count);
+-		return -1;
+-	}
+-
+-	ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
+-	dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
+-
+-	if (ret) {
+-		dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
++	if (dma_mapping_error(dev, dma_src)) {
++		dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
+ 		goto out_copy;
+ 	}
+ 
+-	return 0;
++	err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
++	dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE);
++	if (!err)
++		return 0;
++
++	dev_err(dev, "timeout waiting for DMA\n");
+ 
+ out_copy:
+ 	memcpy(this->base + bram_offset, buf, count);
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index b1779566c5bb..3c71f1cb205f 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -605,7 +605,7 @@ void can_bus_off(struct net_device *dev)
+ {
+ 	struct can_priv *priv = netdev_priv(dev);
+ 
+-	netdev_dbg(dev, "bus-off\n");
++	netdev_info(dev, "bus-off\n");
+ 
+ 	netif_carrier_off(dev);
+ 
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+index 32f6d2e24d66..1a1a6380c128 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+@@ -95,6 +95,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
+ 	/*rss rings */
+ 	cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
+ 	cfg->vecs = min(cfg->vecs, num_online_cpus());
++	cfg->vecs = min(cfg->vecs, self->irqvecs);
+ 	/* cfg->vecs should be power of 2 for RSS */
+ 	if (cfg->vecs >= 8U)
+ 		cfg->vecs = 8U;
+@@ -246,6 +247,8 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
+ 
+ 	self->ndev->hw_features |= aq_hw_caps->hw_features;
+ 	self->ndev->features = aq_hw_caps->hw_features;
++	self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
++				     NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO;
+ 	self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
+ 	self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ 
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+index 219b550d1665..faa533a0ec47 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+@@ -80,6 +80,7 @@ struct aq_nic_s {
+ 
+ 	struct pci_dev *pdev;
+ 	unsigned int msix_entry_mask;
++	u32 irqvecs;
+ };
+ 
+ static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+index ecc6306f940f..750007513f9d 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+@@ -267,16 +267,15 @@ static int aq_pci_probe(struct pci_dev *pdev,
+ 	numvecs = min(numvecs, num_online_cpus());
+ 	/*enable interrupts */
+ #if !AQ_CFG_FORCE_LEGACY_INT
+-	err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs,
+-				    PCI_IRQ_MSIX);
+-
+-	if (err < 0) {
+-		err = pci_alloc_irq_vectors(self->pdev, 1, 1,
+-					    PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+-		if (err < 0)
+-			goto err_hwinit;
+-	}
++	err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
++				    PCI_IRQ_MSIX | PCI_IRQ_MSI |
++				    PCI_IRQ_LEGACY);
++
++	if (err < 0)
++		goto err_hwinit;
++	numvecs = err;
+ #endif
++	self->irqvecs = numvecs;
+ 
+ 	/* net device init */
+ 	aq_nic_cfg_start(self);
+@@ -298,9 +297,9 @@ static int aq_pci_probe(struct pci_dev *pdev,
+ 	kfree(self->aq_hw);
+ err_ioremap:
+ 	free_netdev(ndev);
+-err_pci_func:
+-	pci_release_regions(pdev);
+ err_ndev:
++	pci_release_regions(pdev);
++err_pci_func:
+ 	pci_disable_device(pdev);
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 57dcb957f27c..e95fb6b43187 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -5191,6 +5191,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	}
+ 	spin_lock_init(&adapter->mbox_lock);
+ 	INIT_LIST_HEAD(&adapter->mlist.list);
++	adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
+ 	pci_set_drvdata(pdev, adapter);
+ 
+ 	if (func != ent->driver_data) {
+@@ -5225,8 +5226,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto out_free_adapter;
+ 	}
+ 
+-	adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
+-
+ 	/* PCI device has been enabled */
+ 	adapter->flags |= DEV_ENABLED;
+ 	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
+diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
+index 3e62692af011..fa5b30f547f6 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
++++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
+@@ -87,7 +87,7 @@ do { \
+ 
+ #define HNAE_AE_REGISTER 0x1
+ 
+-#define RCB_RING_NAME_LEN 16
++#define RCB_RING_NAME_LEN (IFNAMSIZ + 4)
+ 
+ #define HNAE_LOWEST_LATENCY_COAL_PARAM	30
+ #define HNAE_LOW_LATENCY_COAL_PARAM	80
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index fd8e6937ee00..cd6d08399970 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1711,7 +1711,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+ 	for (i = 0; i < adapter->req_rx_queues; i++)
+ 		napi_schedule(&adapter->napi[i]);
+ 
+-	if (adapter->reset_reason != VNIC_RESET_FAILOVER)
++	if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
++	    adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
+ 		netdev_notify_peers(netdev);
+ 
+ 	netif_carrier_on(netdev);
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index b88fae785369..33a052174c0f 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -1698,7 +1698,22 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
+ 	WARN_ON(hw->mac.type != e1000_i210);
+ 	WARN_ON(queue < 0 || queue > 1);
+ 
+-	if (enable) {
++	if (enable || queue == 0) {
++		/* i210 does not allow the queue 0 to be in the Strict
++		 * Priority mode while the Qav mode is enabled, so,
++		 * instead of disabling strict priority mode, we give
++		 * queue 0 the maximum of credits possible.
++		 *
++		 * See section 8.12.19 of the i210 datasheet, "Note:
++		 * Queue0 QueueMode must be set to 1b when
++		 * TransmitMode is set to Qav."
++		 */
++		if (queue == 0 && !enable) {
++			/* max "linkspeed" idleslope in kbps */
++			idleslope = 1000000;
++			hicredit = ETH_FRAME_LEN;
++		}
++
+ 		set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
+ 		set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
+ 
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+index 93eacddb6704..336562a0685d 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+@@ -918,8 +918,8 @@ void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
+ 	kfree(ipsec->ip_tbl);
+ 	kfree(ipsec->rx_tbl);
+ 	kfree(ipsec->tx_tbl);
++	kfree(ipsec);
+ err1:
+-	kfree(adapter->ipsec);
+ 	netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+index f470d0204771..14e3a801390b 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+@@ -3427,6 +3427,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
+ 		hw->phy.sfp_setup_needed = false;
+ 	}
+ 
++	if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
++		return status;
++
+ 	/* Reset PHY */
+ 	if (!hw->phy.reset_disable && hw->phy.ops.reset)
+ 		hw->phy.ops.reset(hw);
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
+index 7f1083ce23da..7f5b9b6bf007 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -8332,12 +8332,12 @@ static int mvpp2_probe(struct platform_device *pdev)
+ 		if (IS_ERR(priv->axi_clk)) {
+ 			err = PTR_ERR(priv->axi_clk);
+ 			if (err == -EPROBE_DEFER)
+-				goto err_gop_clk;
++				goto err_mg_clk;
+ 			priv->axi_clk = NULL;
+ 		} else {
+ 			err = clk_prepare_enable(priv->axi_clk);
+ 			if (err < 0)
+-				goto err_gop_clk;
++				goto err_mg_clk;
+ 		}
+ 
+ 		/* Get system's tclk rate */
+@@ -8351,7 +8351,7 @@ static int mvpp2_probe(struct platform_device *pdev)
+ 	if (priv->hw_version == MVPP22) {
+ 		err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
+ 		if (err)
+-			goto err_mg_clk;
++			goto err_axi_clk;
+ 		/* Sadly, the BM pools all share the same register to
+ 		 * store the high 32 bits of their address. So they
+ 		 * must all have the same high 32 bits, which forces
+@@ -8359,14 +8359,14 @@ static int mvpp2_probe(struct platform_device *pdev)
+ 		 */
+ 		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ 		if (err)
+-			goto err_mg_clk;
++			goto err_axi_clk;
+ 	}
+ 
+ 	/* Initialize network controller */
+ 	err = mvpp2_init(pdev, priv);
+ 	if (err < 0) {
+ 		dev_err(&pdev->dev, "failed to initialize controller\n");
+-		goto err_mg_clk;
++		goto err_axi_clk;
+ 	}
+ 
+ 	/* Initialize ports */
+@@ -8379,7 +8379,7 @@ static int mvpp2_probe(struct platform_device *pdev)
+ 	if (priv->port_count == 0) {
+ 		dev_err(&pdev->dev, "no ports enabled\n");
+ 		err = -ENODEV;
+-		goto err_mg_clk;
++		goto err_axi_clk;
+ 	}
+ 
+ 	/* Statistics must be gathered regularly because some of them (like
+@@ -8407,8 +8407,9 @@ static int mvpp2_probe(struct platform_device *pdev)
+ 			mvpp2_port_remove(priv->port_list[i]);
+ 		i++;
+ 	}
+-err_mg_clk:
++err_axi_clk:
+ 	clk_disable_unprepare(priv->axi_clk);
++err_mg_clk:
+ 	if (priv->hw_version == MVPP22)
+ 		clk_disable_unprepare(priv->mg_clk);
+ err_gop_clk:
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+index baaea6f1a9d8..6409957e1657 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+@@ -242,18 +242,49 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
+ 
+ void nfp_flower_cmsg_process_rx(struct work_struct *work)
+ {
++	struct sk_buff_head cmsg_joined;
+ 	struct nfp_flower_priv *priv;
+ 	struct sk_buff *skb;
+ 
+ 	priv = container_of(work, struct nfp_flower_priv, cmsg_work);
++	skb_queue_head_init(&cmsg_joined);
+ 
+-	while ((skb = skb_dequeue(&priv->cmsg_skbs)))
++	spin_lock_bh(&priv->cmsg_skbs_high.lock);
++	skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined);
++	spin_unlock_bh(&priv->cmsg_skbs_high.lock);
++
++	spin_lock_bh(&priv->cmsg_skbs_low.lock);
++	skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined);
++	spin_unlock_bh(&priv->cmsg_skbs_low.lock);
++
++	while ((skb = __skb_dequeue(&cmsg_joined)))
+ 		nfp_flower_cmsg_process_one_rx(priv->app, skb);
+ }
+ 
+-void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
++static void
++nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
+ {
+ 	struct nfp_flower_priv *priv = app->priv;
++	struct sk_buff_head *skb_head;
++
++	if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
++	    type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
++		skb_head = &priv->cmsg_skbs_high;
++	else
++		skb_head = &priv->cmsg_skbs_low;
++
++	if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) {
++		nfp_flower_cmsg_warn(app, "Dropping queued control messages\n");
++		dev_kfree_skb_any(skb);
++		return;
++	}
++
++	skb_queue_tail(skb_head, skb);
++	schedule_work(&priv->cmsg_work);
++}
++
++void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
++{
+ 	struct nfp_flower_cmsg_hdr *cmsg_hdr;
+ 
+ 	cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
+@@ -270,7 +301,6 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
+ 		nfp_flower_rx_flow_stats(app, skb);
+ 		dev_consume_skb_any(skb);
+ 	} else {
+-		skb_queue_tail(&priv->cmsg_skbs, skb);
+-		schedule_work(&priv->cmsg_work);
++		nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
+ 	}
+ }
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+index 329a9b6d453a..343f9117fb57 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
++++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+@@ -98,6 +98,8 @@
+ #define NFP_FL_IPV4_TUNNEL_TYPE		GENMASK(7, 4)
+ #define NFP_FL_IPV4_PRE_TUN_INDEX	GENMASK(2, 0)
+ 
++#define NFP_FLOWER_WORKQ_MAX_SKBS	30000
++
+ #define nfp_flower_cmsg_warn(app, fmt, args...)                         \
+ 	do {                                                            \
+ 		if (net_ratelimit())                                    \
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
+index 742d6f1575b5..646fc97f1f0b 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
+@@ -358,7 +358,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
+ 		}
+ 
+ 		SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
+-		nfp_net_get_mac_addr(app->pf, port);
++		nfp_net_get_mac_addr(app->pf, repr, port);
+ 
+ 		cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
+ 		err = nfp_repr_init(app, repr,
+@@ -517,7 +517,8 @@ static int nfp_flower_init(struct nfp_app *app)
+ 
+ 	app->priv = app_priv;
+ 	app_priv->app = app;
+-	skb_queue_head_init(&app_priv->cmsg_skbs);
++	skb_queue_head_init(&app_priv->cmsg_skbs_high);
++	skb_queue_head_init(&app_priv->cmsg_skbs_low);
+ 	INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
+ 	init_waitqueue_head(&app_priv->reify_wait_queue);
+ 
+@@ -544,7 +545,8 @@ static void nfp_flower_clean(struct nfp_app *app)
+ {
+ 	struct nfp_flower_priv *app_priv = app->priv;
+ 
+-	skb_queue_purge(&app_priv->cmsg_skbs);
++	skb_queue_purge(&app_priv->cmsg_skbs_high);
++	skb_queue_purge(&app_priv->cmsg_skbs_low);
+ 	flush_work(&app_priv->cmsg_work);
+ 
+ 	nfp_flower_metadata_cleanup(app);
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
+index 332ff0fdc038..1eca582c5846 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
++++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
+@@ -89,7 +89,10 @@ struct nfp_fl_stats_id {
+  * @mask_table:		Hash table used to store masks
+  * @flow_table:		Hash table used to store flower rules
+  * @cmsg_work:		Workqueue for control messages processing
+- * @cmsg_skbs:		List of skbs for control message processing
++ * @cmsg_skbs_high:	List of higher priority skbs for control message
++ *			processing
++ * @cmsg_skbs_low:	List of lower priority skbs for control message
++ *			processing
+  * @nfp_mac_off_list:	List of MAC addresses to offload
+  * @nfp_mac_index_list:	List of unique 8-bit indexes for non NFP netdevs
+  * @nfp_ipv4_off_list:	List of IPv4 addresses to offload
+@@ -117,7 +120,8 @@ struct nfp_flower_priv {
+ 	DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS);
+ 	DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS);
+ 	struct work_struct cmsg_work;
+-	struct sk_buff_head cmsg_skbs;
++	struct sk_buff_head cmsg_skbs_high;
++	struct sk_buff_head cmsg_skbs_low;
+ 	struct list_head nfp_mac_off_list;
+ 	struct list_head nfp_mac_index_list;
+ 	struct list_head nfp_ipv4_off_list;
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
+index 2a2f2fbc8850..b9618c37403f 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
+@@ -69,7 +69,7 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
+ 	if (err)
+ 		return err < 0 ? err : 0;
+ 
+-	nfp_net_get_mac_addr(app->pf, nn->port);
++	nfp_net_get_mac_addr(app->pf, nn->dp.netdev, nn->port);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
+index add46e28212b..42211083b51f 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
++++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
+@@ -171,7 +171,9 @@ void nfp_net_pci_remove(struct nfp_pf *pf);
+ int nfp_hwmon_register(struct nfp_pf *pf);
+ void nfp_hwmon_unregister(struct nfp_pf *pf);
+ 
+-void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port);
++void
++nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
++		     struct nfp_port *port);
+ 
+ bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
+ 
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+index 15fa47f622aa..45cd2092e498 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+@@ -67,23 +67,26 @@
+ /**
+  * nfp_net_get_mac_addr() - Get the MAC address.
+  * @pf:       NFP PF handle
++ * @netdev:   net_device to set MAC address on
+  * @port:     NFP port structure
+  *
+  * First try to get the MAC address from NSP ETH table. If that
+  * fails generate a random address.
+  */
+-void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port)
++void
++nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
++		     struct nfp_port *port)
+ {
+ 	struct nfp_eth_table_port *eth_port;
+ 
+ 	eth_port = __nfp_port_get_eth_port(port);
+ 	if (!eth_port) {
+-		eth_hw_addr_random(port->netdev);
++		eth_hw_addr_random(netdev);
+ 		return;
+ 	}
+ 
+-	ether_addr_copy(port->netdev->dev_addr, eth_port->mac_addr);
+-	ether_addr_copy(port->netdev->perm_addr, eth_port->mac_addr);
++	ether_addr_copy(netdev->dev_addr, eth_port->mac_addr);
++	ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
+ }
+ 
+ static struct nfp_eth_table_port *
+@@ -511,16 +514,18 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
+ 		return PTR_ERR(mem);
+ 	}
+ 
+-	min_size =  NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
+-	pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
+-					  "net.macstats", min_size,
+-					  &pf->mac_stats_bar);
+-	if (IS_ERR(pf->mac_stats_mem)) {
+-		if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
+-			err = PTR_ERR(pf->mac_stats_mem);
+-			goto err_unmap_ctrl;
++	if (pf->eth_tbl) {
++		min_size =  NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
++		pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
++						  "net.macstats", min_size,
++						  &pf->mac_stats_bar);
++		if (IS_ERR(pf->mac_stats_mem)) {
++			if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
++				err = PTR_ERR(pf->mac_stats_mem);
++				goto err_unmap_ctrl;
++			}
++			pf->mac_stats_mem = NULL;
+ 		}
+-		pf->mac_stats_mem = NULL;
+ 	}
+ 
+ 	pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg",
+diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+index 99bb679a9801..2abee0fe3a7c 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+@@ -281,8 +281,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr,
+ 		if ((*reg & mask) == val)
+ 			return 0;
+ 
+-		if (msleep_interruptible(25))
+-			return -ERESTARTSYS;
++		msleep(25);
+ 
+ 		if (time_after(start_time, wait_until))
+ 			return -ETIMEDOUT;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+index 893ef08a4b39..eaf50e6af6b3 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+@@ -115,8 +115,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn)
+ 
+ void qed_l2_setup(struct qed_hwfn *p_hwfn)
+ {
+-	if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
+-	    p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
++	if (!QED_IS_L2_PERSONALITY(p_hwfn))
+ 		return;
+ 
+ 	mutex_init(&p_hwfn->p_l2_info->lock);
+@@ -126,8 +125,7 @@ void qed_l2_free(struct qed_hwfn *p_hwfn)
+ {
+ 	u32 i;
+ 
+-	if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
+-	    p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
++	if (!QED_IS_L2_PERSONALITY(p_hwfn))
+ 		return;
+ 
+ 	if (!p_hwfn->p_l2_info)
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+index 50b142fad6b8..1900bf7e67d1 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+@@ -238,7 +238,7 @@ qede_rdma_get_free_event_node(struct qede_dev *edev)
+ 	}
+ 
+ 	if (!found) {
+-		event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
++		event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC);
+ 		if (!event_node) {
+ 			DP_NOTICE(edev,
+ 				  "qedr: Could not allocate memory for rdma work\n");
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index 3bb6b66dc7bf..f9c25912eb98 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -720,6 +720,15 @@ static struct phy_driver broadcom_drivers[] = {
+ 	.get_strings	= bcm_phy_get_strings,
+ 	.get_stats	= bcm53xx_phy_get_stats,
+ 	.probe		= bcm53xx_phy_probe,
++}, {
++	.phy_id         = PHY_ID_BCM89610,
++	.phy_id_mask    = 0xfffffff0,
++	.name           = "Broadcom BCM89610",
++	.features       = PHY_GBIT_FEATURES,
++	.flags          = PHY_HAS_INTERRUPT,
++	.config_init    = bcm54xx_config_init,
++	.ack_interrupt  = bcm_phy_ack_intr,
++	.config_intr    = bcm_phy_config_intr,
+ } };
+ 
+ module_phy_driver(broadcom_drivers);
+@@ -741,6 +750,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
+ 	{ PHY_ID_BCMAC131, 0xfffffff0 },
+ 	{ PHY_ID_BCM5241, 0xfffffff0 },
+ 	{ PHY_ID_BCM5395, 0xfffffff0 },
++	{ PHY_ID_BCM89610, 0xfffffff0 },
+ 	{ }
+ };
+ 
+diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
+index 0e0978d8a0eb..febbeeecb078 100644
+--- a/drivers/net/phy/marvell.c
++++ b/drivers/net/phy/marvell.c
+@@ -1377,6 +1377,15 @@ static int m88e1318_set_wol(struct phy_device *phydev,
+ 		if (err < 0)
+ 			goto error;
+ 
++		/* If WOL event happened once, the LED[2] interrupt pin
++		 * will not be cleared unless we reading the interrupt status
++		 * register. If interrupts are in use, the normal interrupt
++		 * handling will clear the WOL event. Clear the WOL event
++		 * before enabling it if !phy_interrupt_is_valid()
++		 */
++		if (!phy_interrupt_is_valid(phydev))
++			phy_read(phydev, MII_M1011_IEVENT);
++
+ 		/* Enable the WOL interrupt */
+ 		err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
+ 				   MII_88E1318S_PHY_CSIER_WOL_EIE);
+diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
+index 0f293ef28935..a97ac8c12c4c 100644
+--- a/drivers/net/phy/microchip.c
++++ b/drivers/net/phy/microchip.c
+@@ -20,6 +20,7 @@
+ #include <linux/ethtool.h>
+ #include <linux/phy.h>
+ #include <linux/microchipphy.h>
++#include <linux/delay.h>
+ 
+ #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
+ #define DRIVER_DESC	"Microchip LAN88XX PHY driver"
+@@ -30,6 +31,16 @@ struct lan88xx_priv {
+ 	__u32	wolopts;
+ };
+ 
++static int lan88xx_read_page(struct phy_device *phydev)
++{
++	return __phy_read(phydev, LAN88XX_EXT_PAGE_ACCESS);
++}
++
++static int lan88xx_write_page(struct phy_device *phydev, int page)
++{
++	return __phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, page);
++}
++
+ static int lan88xx_phy_config_intr(struct phy_device *phydev)
+ {
+ 	int rc;
+@@ -66,6 +77,150 @@ static int lan88xx_suspend(struct phy_device *phydev)
+ 	return 0;
+ }
+ 
++static int lan88xx_TR_reg_set(struct phy_device *phydev, u16 regaddr,
++			      u32 data)
++{
++	int val, save_page, ret = 0;
++	u16 buf;
++
++	/* Save current page */
++	save_page = phy_save_page(phydev);
++	if (save_page < 0) {
++		pr_warn("Failed to get current page\n");
++		goto err;
++	}
++
++	/* Switch to TR page */
++	lan88xx_write_page(phydev, LAN88XX_EXT_PAGE_ACCESS_TR);
++
++	ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_LOW_DATA,
++			  (data & 0xFFFF));
++	if (ret < 0) {
++		pr_warn("Failed to write TR low data\n");
++		goto err;
++	}
++
++	ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_HIGH_DATA,
++			  (data & 0x00FF0000) >> 16);
++	if (ret < 0) {
++		pr_warn("Failed to write TR high data\n");
++		goto err;
++	}
++
++	/* Config control bits [15:13] of register */
++	buf = (regaddr & ~(0x3 << 13));/* Clr [14:13] to write data in reg */
++	buf |= 0x8000; /* Set [15] to Packet transmit */
++
++	ret = __phy_write(phydev, LAN88XX_EXT_PAGE_TR_CR, buf);
++	if (ret < 0) {
++		pr_warn("Failed to write data in reg\n");
++		goto err;
++	}
++
++	usleep_range(1000, 2000);/* Wait for Data to be written */
++	val = __phy_read(phydev, LAN88XX_EXT_PAGE_TR_CR);
++	if (!(val & 0x8000))
++		pr_warn("TR Register[0x%X] configuration failed\n", regaddr);
++err:
++	return phy_restore_page(phydev, save_page, ret);
++}
++
++static void lan88xx_config_TR_regs(struct phy_device *phydev)
++{
++	int err;
++
++	/* Get access to Channel 0x1, Node 0xF , Register 0x01.
++	 * Write 24-bit value 0x12B00A to register. Setting MrvlTrFix1000Kf,
++	 * MrvlTrFix1000Kp, MasterEnableTR bits.
++	 */
++	err = lan88xx_TR_reg_set(phydev, 0x0F82, 0x12B00A);
++	if (err < 0)
++		pr_warn("Failed to Set Register[0x0F82]\n");
++
++	/* Get access to Channel b'10, Node b'1101, Register 0x06.
++	 * Write 24-bit value 0xD2C46F to register. Setting SSTrKf1000Slv,
++	 * SSTrKp1000Mas bits.
++	 */
++	err = lan88xx_TR_reg_set(phydev, 0x168C, 0xD2C46F);
++	if (err < 0)
++		pr_warn("Failed to Set Register[0x168C]\n");
++
++	/* Get access to Channel b'10, Node b'1111, Register 0x11.
++	 * Write 24-bit value 0x620 to register. Setting rem_upd_done_thresh
++	 * bits
++	 */
++	err = lan88xx_TR_reg_set(phydev, 0x17A2, 0x620);
++	if (err < 0)
++		pr_warn("Failed to Set Register[0x17A2]\n");
++
++	/* Get access to Channel b'10, Node b'1101, Register 0x10.
++	 * Write 24-bit value 0xEEFFDD to register. Setting
++	 * eee_TrKp1Long_1000, eee_TrKp2Long_1000, eee_TrKp3Long_1000,
++	 * eee_TrKp1Short_1000,eee_TrKp2Short_1000, eee_TrKp3Short_1000 bits.
++	 */
++	err = lan88xx_TR_reg_set(phydev, 0x16A0, 0xEEFFDD);
++	if (err < 0)
++		pr_warn("Failed to Set Register[0x16A0]\n");
++
++	/* Get access to Channel b'10, Node b'1101, Register 0x13.
++	 * Write 24-bit value 0x071448 to register. Setting
++	 * slv_lpi_tr_tmr_val1, slv_lpi_tr_tmr_val2 bits.
++	 */
++	err = lan88xx_TR_reg_set(phydev, 0x16A6, 0x071448);
++	if (err < 0)
++		pr_warn("Failed to Set Register[0x16A6]\n");
++
++	/* Get access to Channel b'10, Node b'1101, Register 0x12.
++	 * Write 24-bit value 0x13132F to register. Setting
++	 * slv_sigdet_timer_val1, slv_sigdet_timer_val2 bits.
++	 */
++	err = lan88xx_TR_reg_set(phydev, 0x16A4, 0x13132F);
++	if (err < 0)
++		pr_warn("Failed to Set Register[0x16A4]\n");
++
++	/* Get access to Channel b'10, Node b'1101, Register 0x14.
++	 * Write 24-bit value 0x0 to register. Setting eee_3level_delay,
++	 * eee_TrKf_freeze_delay bits.
++	 */
++	err = lan88xx_TR_reg_set(phydev, 0x16A8, 0x0);
++	if (err < 0)
++		pr_warn("Failed to Set Register[0x16A8]\n");
++
++	/* Get access to Channel b'01, Node b'1111, Register 0x34.
++	 * Write 24-bit value 0x91B06C to register. Setting
++	 * FastMseSearchThreshLong1000, FastMseSearchThreshShort1000,
++	 * FastMseSearchUpdGain1000 bits.
++	 */
++	err = lan88xx_TR_reg_set(phydev, 0x0FE8, 0x91B06C);
++	if (err < 0)
++		pr_warn("Failed to Set Register[0x0FE8]\n");
++
++	/* Get access to Channel b'01, Node b'1111, Register 0x3E.
++	 * Write 24-bit value 0xC0A028 to register. Setting
++	 * FastMseKp2ThreshLong1000, FastMseKp2ThreshShort1000,
++	 * FastMseKp2UpdGain1000, FastMseKp2ExitEn1000 bits.
++	 */
++	err = lan88xx_TR_reg_set(phydev, 0x0FFC, 0xC0A028);
++	if (err < 0)
++		pr_warn("Failed to Set Register[0x0FFC]\n");
++
++	/* Get access to Channel b'01, Node b'1111, Register 0x35.
++	 * Write 24-bit value 0x041600 to register. Setting
++	 * FastMseSearchPhShNum1000, FastMseSearchClksPerPh1000,
++	 * FastMsePhChangeDelay1000 bits.
++	 */
++	err = lan88xx_TR_reg_set(phydev, 0x0FEA, 0x041600);
++	if (err < 0)
++		pr_warn("Failed to Set Register[0x0FEA]\n");
++
++	/* Get access to Channel b'10, Node b'1101, Register 0x03.
++	 * Write 24-bit value 0x000004 to register. Setting TrFreeze bits.
++	 */
++	err = lan88xx_TR_reg_set(phydev, 0x1686, 0x000004);
++	if (err < 0)
++		pr_warn("Failed to Set Register[0x1686]\n");
++}
++
+ static int lan88xx_probe(struct phy_device *phydev)
+ {
+ 	struct device *dev = &phydev->mdio.dev;
+@@ -132,6 +287,25 @@ static void lan88xx_set_mdix(struct phy_device *phydev)
+ 	phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
+ }
+ 
++static int lan88xx_config_init(struct phy_device *phydev)
++{
++	int val;
++
++	genphy_config_init(phydev);
++	/*Zerodetect delay enable */
++	val = phy_read_mmd(phydev, MDIO_MMD_PCS,
++			   PHY_ARDENNES_MMD_DEV_3_PHY_CFG);
++	val |= PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_;
++
++	phy_write_mmd(phydev, MDIO_MMD_PCS, PHY_ARDENNES_MMD_DEV_3_PHY_CFG,
++		      val);
++
++	/* Config DSP registers */
++	lan88xx_config_TR_regs(phydev);
++
++	return 0;
++}
++
+ static int lan88xx_config_aneg(struct phy_device *phydev)
+ {
+ 	lan88xx_set_mdix(phydev);
+@@ -151,7 +325,7 @@ static struct phy_driver microchip_phy_driver[] = {
+ 	.probe		= lan88xx_probe,
+ 	.remove		= lan88xx_remove,
+ 
+-	.config_init	= genphy_config_init,
++	.config_init	= lan88xx_config_init,
+ 	.config_aneg	= lan88xx_config_aneg,
+ 
+ 	.ack_interrupt	= lan88xx_phy_ack_interrupt,
+@@ -160,6 +334,8 @@ static struct phy_driver microchip_phy_driver[] = {
+ 	.suspend	= lan88xx_suspend,
+ 	.resume		= genphy_resume,
+ 	.set_wol	= lan88xx_set_wol,
++	.read_page	= lan88xx_read_page,
++	.write_page	= lan88xx_write_page,
+ } };
+ 
+ module_phy_driver(microchip_phy_driver);
+diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
+index b979cf3bce65..88a8b5916624 100644
+--- a/drivers/nvme/host/Kconfig
++++ b/drivers/nvme/host/Kconfig
+@@ -27,7 +27,7 @@ config NVME_FABRICS
+ 
+ config NVME_RDMA
+ 	tristate "NVM Express over Fabrics RDMA host driver"
+-	depends on INFINIBAND && BLOCK
++	depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
+ 	select NVME_CORE
+ 	select NVME_FABRICS
+ 	select SG_POOL
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index df3d5051539d..4ae5be34131c 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -99,6 +99,7 @@ static struct class *nvme_subsys_class;
+ 
+ static void nvme_ns_remove(struct nvme_ns *ns);
+ static int nvme_revalidate_disk(struct gendisk *disk);
++static void nvme_put_subsystem(struct nvme_subsystem *subsys);
+ 
+ static __le32 nvme_get_log_dw10(u8 lid, size_t size)
+ {
+@@ -353,6 +354,7 @@ static void nvme_free_ns_head(struct kref *ref)
+ 	ida_simple_remove(&head->subsys->ns_ida, head->instance);
+ 	list_del_init(&head->entry);
+ 	cleanup_srcu_struct(&head->srcu);
++	nvme_put_subsystem(head->subsys);
+ 	kfree(head);
+ }
+ 
+@@ -767,6 +769,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
+ 				ret = PTR_ERR(meta);
+ 				goto out_unmap;
+ 			}
++			req->cmd_flags |= REQ_INTEGRITY;
+ 		}
+ 	}
+ 
+@@ -2842,6 +2845,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
+ 		goto out_cleanup_srcu;
+ 
+ 	list_add_tail(&head->entry, &ctrl->subsys->nsheads);
++
++	kref_get(&ctrl->subsys->ref);
++
+ 	return head;
+ out_cleanup_srcu:
+ 	cleanup_srcu_struct(&head->srcu);
+@@ -2978,31 +2984,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+ 	if (nvme_init_ns_head(ns, nsid, id))
+ 		goto out_free_id;
+ 	nvme_setup_streams_ns(ctrl, ns);
+-	
+-#ifdef CONFIG_NVME_MULTIPATH
+-	/*
+-	 * If multipathing is enabled we need to always use the subsystem
+-	 * instance number for numbering our devices to avoid conflicts
+-	 * between subsystems that have multiple controllers and thus use
+-	 * the multipath-aware subsystem node and those that have a single
+-	 * controller and use the controller node directly.
+-	 */
+-	if (ns->head->disk) {
+-		sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
+-				ctrl->cntlid, ns->head->instance);
+-		flags = GENHD_FL_HIDDEN;
+-	} else {
+-		sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
+-				ns->head->instance);
+-	}
+-#else
+-	/*
+-	 * But without the multipath code enabled, multiple controller per
+-	 * subsystems are visible as devices and thus we cannot use the
+-	 * subsystem instance.
+-	 */
+-	sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
+-#endif
++	nvme_set_disk_name(disk_name, ns, ctrl, &flags);
+ 
+ 	if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
+ 		if (nvme_nvm_register(ns, disk_name, node)) {
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index 124c458806df..7ae732a77fe8 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -668,6 +668,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
+ 				ret = -ENOMEM;
+ 				goto out;
+ 			}
++			kfree(opts->transport);
+ 			opts->transport = p;
+ 			break;
+ 		case NVMF_OPT_NQN:
+@@ -676,6 +677,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
+ 				ret = -ENOMEM;
+ 				goto out;
+ 			}
++			kfree(opts->subsysnqn);
+ 			opts->subsysnqn = p;
+ 			nqnlen = strlen(opts->subsysnqn);
+ 			if (nqnlen >= NVMF_NQN_SIZE) {
+@@ -698,6 +700,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
+ 				ret = -ENOMEM;
+ 				goto out;
+ 			}
++			kfree(opts->traddr);
+ 			opts->traddr = p;
+ 			break;
+ 		case NVMF_OPT_TRSVCID:
+@@ -706,6 +709,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
+ 				ret = -ENOMEM;
+ 				goto out;
+ 			}
++			kfree(opts->trsvcid);
+ 			opts->trsvcid = p;
+ 			break;
+ 		case NVMF_OPT_QUEUE_SIZE:
+@@ -792,6 +796,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
+ 				ret = -EINVAL;
+ 				goto out;
+ 			}
++			nvmf_host_put(opts->host);
+ 			opts->host = nvmf_host_add(p);
+ 			kfree(p);
+ 			if (!opts->host) {
+@@ -817,6 +822,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
+ 				ret = -ENOMEM;
+ 				goto out;
+ 			}
++			kfree(opts->host_traddr);
+ 			opts->host_traddr = p;
+ 			break;
+ 		case NVMF_OPT_HOST_ID:
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 060f69e03427..0949633ac87c 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -15,10 +15,32 @@
+ #include "nvme.h"
+ 
+ static bool multipath = true;
+-module_param(multipath, bool, 0644);
++module_param(multipath, bool, 0444);
+ MODULE_PARM_DESC(multipath,
+ 	"turn on native support for multiple controllers per subsystem");
+ 
++/*
++ * If multipathing is enabled we need to always use the subsystem instance
++ * number for numbering our devices to avoid conflicts between subsystems that
++ * have multiple controllers and thus use the multipath-aware subsystem node
++ * and those that have a single controller and use the controller node
++ * directly.
++ */
++void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
++			struct nvme_ctrl *ctrl, int *flags)
++{
++	if (!multipath) {
++		sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
++	} else if (ns->head->disk) {
++		sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
++				ctrl->cntlid, ns->head->instance);
++		*flags = GENHD_FL_HIDDEN;
++	} else {
++		sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
++				ns->head->instance);
++	}
++}
++
+ void nvme_failover_req(struct request *req)
+ {
+ 	struct nvme_ns *ns = req->q->queuedata;
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 0133f3d2ce94..011d67ba11d5 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -411,6 +411,8 @@ extern const struct attribute_group nvme_ns_id_attr_group;
+ extern const struct block_device_operations nvme_ns_head_ops;
+ 
+ #ifdef CONFIG_NVME_MULTIPATH
++void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
++			struct nvme_ctrl *ctrl, int *flags);
+ void nvme_failover_req(struct request *req);
+ bool nvme_req_needs_failover(struct request *req, blk_status_t error);
+ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
+@@ -436,6 +438,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+ }
+ 
+ #else
++/*
++ * Without the multipath code enabled, multiple controller per subsystems are
++ * visible as devices and thus we cannot use the subsystem instance.
++ */
++static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
++				      struct nvme_ctrl *ctrl, int *flags)
++{
++	sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
++}
++
+ static inline void nvme_failover_req(struct request *req)
+ {
+ }
+diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
+index 5f4f8b16685f..3c7b61ddb0d1 100644
+--- a/drivers/nvme/target/Kconfig
++++ b/drivers/nvme/target/Kconfig
+@@ -27,7 +27,7 @@ config NVME_TARGET_LOOP
+ 
+ config NVME_TARGET_RDMA
+ 	tristate "NVMe over Fabrics RDMA target support"
+-	depends on INFINIBAND
++	depends on INFINIBAND && INFINIBAND_ADDR_TRANS
+ 	depends on NVME_TARGET
+ 	select SGL_ALLOC
+ 	help
+diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c
+index 13d839bd6160..c1b396a36a20 100644
+--- a/drivers/pci/dwc/pcie-kirin.c
++++ b/drivers/pci/dwc/pcie-kirin.c
+@@ -487,7 +487,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
+ 		return ret;
+ 
+ 	kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
+-						      "reset-gpio", 0);
++						      "reset-gpios", 0);
+ 	if (kirin_pcie->gpio_id_reset < 0)
+ 		return -ENODEV;
+ 
+diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
+index b1ae1618fefe..fee9225ca559 100644
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -1622,22 +1622,30 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
+ 
+ 	if (!need_valid_mask) {
+ 		irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
+-						chip->ngpio, NUMA_NO_NODE);
++						community->npins, NUMA_NO_NODE);
+ 		if (irq_base < 0) {
+ 			dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
+ 			return irq_base;
+ 		}
+-	} else {
+-		irq_base = 0;
+ 	}
+ 
+-	ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base,
++	ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
+ 				   handle_bad_irq, IRQ_TYPE_NONE);
+ 	if (ret) {
+ 		dev_err(pctrl->dev, "failed to add IRQ chip\n");
+ 		return ret;
+ 	}
+ 
++	if (!need_valid_mask) {
++		for (i = 0; i < community->ngpio_ranges; i++) {
++			range = &community->gpio_ranges[i];
++
++			irq_domain_associate_many(chip->irq.domain, irq_base,
++						  range->base, range->npins);
++			irq_base += range->npins;
++		}
++	}
++
+ 	gpiochip_set_chained_irqchip(chip, &chv_gpio_irqchip, irq,
+ 				     chv_gpio_irq_handler);
+ 	return 0;
+diff --git a/drivers/pinctrl/meson/pinctrl-meson-axg.c b/drivers/pinctrl/meson/pinctrl-meson-axg.c
+index 4b91ff74779b..99a6ceac8e53 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson-axg.c
++++ b/drivers/pinctrl/meson/pinctrl-meson-axg.c
+@@ -898,7 +898,7 @@ static struct meson_bank meson_axg_periphs_banks[] = {
+ 
+ static struct meson_bank meson_axg_aobus_banks[] = {
+ 	/*   name    first      last      irq	pullen  pull    dir     out     in  */
+-	BANK("AO",   GPIOAO_0,  GPIOAO_9, 0, 13, 0,  16,  0, 0,  0,  0,  0, 16,  1,  0),
++	BANK("AO",   GPIOAO_0,  GPIOAO_13, 0, 13, 0,  16,  0, 0,  0,  0,  0, 16,  1,  0),
+ };
+ 
+ static struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = {
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 51a1b49760ea..6bfb47c18a15 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -168,8 +168,8 @@ config DELL_WMI
+ 	depends on DMI
+ 	depends on INPUT
+ 	depends on ACPI_VIDEO || ACPI_VIDEO = n
++	depends on DELL_SMBIOS
+ 	select DELL_WMI_DESCRIPTOR
+-	select DELL_SMBIOS
+ 	select INPUT_SPARSEKMAP
+ 	---help---
+ 	  Say Y here if you want to support WMI-based hotkeys on Dell laptops.
+diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
+index b4e5e725848d..5f5b57fcf792 100644
+--- a/drivers/remoteproc/qcom_q6v5_pil.c
++++ b/drivers/remoteproc/qcom_q6v5_pil.c
+@@ -1088,6 +1088,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
+ 		dev_err(qproc->dev, "unable to resolve mba region\n");
+ 		return ret;
+ 	}
++	of_node_put(node);
+ 
+ 	qproc->mba_phys = r.start;
+ 	qproc->mba_size = resource_size(&r);
+@@ -1105,6 +1106,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
+ 		dev_err(qproc->dev, "unable to resolve mpss region\n");
+ 		return ret;
+ 	}
++	of_node_put(node);
+ 
+ 	qproc->mpss_phys = qproc->mpss_reloc = r.start;
+ 	qproc->mpss_size = resource_size(&r);
+diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
+index e8bb023ff15e..3e3417c8bb9e 100644
+--- a/drivers/reset/reset-uniphier.c
++++ b/drivers/reset/reset-uniphier.c
+@@ -107,7 +107,7 @@ static const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = {
+ 	UNIPHIER_RESETX(4, 0x200c, 2),		/* eMMC */
+ 	UNIPHIER_RESETX(6, 0x200c, 6),		/* Ether */
+ 	UNIPHIER_RESETX(8, 0x200c, 8),		/* STDMAC (HSC) */
+-	UNIPHIER_RESETX(12, 0x200c, 5),		/* GIO (PCIe, USB3) */
++	UNIPHIER_RESETX(14, 0x200c, 5),		/* USB30 */
+ 	UNIPHIER_RESETX(16, 0x200c, 12),	/* USB30-PHY0 */
+ 	UNIPHIER_RESETX(17, 0x200c, 13),	/* USB30-PHY1 */
+ 	UNIPHIER_RESETX(18, 0x200c, 14),	/* USB30-PHY2 */
+@@ -122,8 +122,8 @@ static const struct uniphier_reset_data uniphier_pxs3_sys_reset_data[] = {
+ 	UNIPHIER_RESETX(2, 0x200c, 0),		/* NAND */
+ 	UNIPHIER_RESETX(4, 0x200c, 2),		/* eMMC */
+ 	UNIPHIER_RESETX(8, 0x200c, 12),		/* STDMAC */
+-	UNIPHIER_RESETX(12, 0x200c, 4),		/* USB30 link (GIO0) */
+-	UNIPHIER_RESETX(13, 0x200c, 5),		/* USB31 link (GIO1) */
++	UNIPHIER_RESETX(12, 0x200c, 4),		/* USB30 link */
++	UNIPHIER_RESETX(13, 0x200c, 5),		/* USB31 link */
+ 	UNIPHIER_RESETX(16, 0x200c, 16),	/* USB30-PHY0 */
+ 	UNIPHIER_RESETX(17, 0x200c, 18),	/* USB30-PHY1 */
+ 	UNIPHIER_RESETX(18, 0x200c, 20),	/* USB30-PHY2 */
+diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
+index 64b6de9763ee..1efdf9ff8679 100644
+--- a/drivers/rpmsg/rpmsg_char.c
++++ b/drivers/rpmsg/rpmsg_char.c
+@@ -581,4 +581,6 @@ static void rpmsg_chrdev_exit(void)
+ 	unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
+ }
+ module_exit(rpmsg_chrdev_exit);
++
++MODULE_ALIAS("rpmsg:rpmsg_chrdev");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index c11a083cd956..086f172d404c 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -706,7 +706,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
+ 		qeth_put_reply(reply);
+ 	}
+ 	spin_unlock_irqrestore(&card->lock, flags);
+-	atomic_set(&card->write.irq_pending, 0);
+ }
+ EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
+ 
+@@ -1101,14 +1100,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ {
+ 	int rc;
+ 	int cstat, dstat;
++	struct qeth_cmd_buffer *iob = NULL;
+ 	struct qeth_channel *channel;
+ 	struct qeth_card *card;
+-	struct qeth_cmd_buffer *iob;
+-
+-	if (__qeth_check_irb_error(cdev, intparm, irb))
+-		return;
+-	cstat = irb->scsw.cmd.cstat;
+-	dstat = irb->scsw.cmd.dstat;
+ 
+ 	card = CARD_FROM_CDEV(cdev);
+ 	if (!card)
+@@ -1126,6 +1120,19 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ 		channel = &card->data;
+ 		QETH_CARD_TEXT(card, 5, "data");
+ 	}
++
++	if (qeth_intparm_is_iob(intparm))
++		iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
++
++	if (__qeth_check_irb_error(cdev, intparm, irb)) {
++		/* IO was terminated, free its resources. */
++		if (iob)
++			qeth_release_buffer(iob->channel, iob);
++		atomic_set(&channel->irq_pending, 0);
++		wake_up(&card->wait_q);
++		return;
++	}
++
+ 	atomic_set(&channel->irq_pending, 0);
+ 
+ 	if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
+@@ -1149,6 +1156,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ 		/* we don't have to handle this further */
+ 		intparm = 0;
+ 	}
++
++	cstat = irb->scsw.cmd.cstat;
++	dstat = irb->scsw.cmd.dstat;
++
+ 	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
+ 	    (dstat & DEV_STAT_UNIT_CHECK) ||
+ 	    (cstat)) {
+@@ -1187,11 +1198,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ 	    channel->state == CH_STATE_UP)
+ 		__qeth_issue_next_read(card);
+ 
+-	if (intparm) {
+-		iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
+-		if (iob->callback)
+-			iob->callback(iob->channel, iob);
+-	}
++	if (iob && iob->callback)
++		iob->callback(iob->channel, iob);
+ 
+ out:
+ 	wake_up(&card->wait_q);
+@@ -1862,8 +1870,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
+ 		   atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
+ 	QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
+ 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+-	rc = ccw_device_start(channel->ccwdev,
+-			      &channel->ccw, (addr_t) iob, 0, 0);
++	rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
++				      (addr_t) iob, 0, 0, QETH_TIMEOUT);
+ 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ 
+ 	if (rc) {
+@@ -1880,7 +1888,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
+ 	if (channel->state != CH_STATE_UP) {
+ 		rc = -ETIME;
+ 		QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+-		qeth_clear_cmd_buffers(channel);
+ 	} else
+ 		rc = 0;
+ 	return rc;
+@@ -1934,8 +1941,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
+ 		   atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
+ 	QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
+ 	spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+-	rc = ccw_device_start(channel->ccwdev,
+-			      &channel->ccw, (addr_t) iob, 0, 0);
++	rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
++				      (addr_t) iob, 0, 0, QETH_TIMEOUT);
+ 	spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ 
+ 	if (rc) {
+@@ -1956,7 +1963,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
+ 		QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
+ 			dev_name(&channel->ccwdev->dev));
+ 		QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
+-		qeth_clear_cmd_buffers(channel);
+ 		return -ETIME;
+ 	}
+ 	return qeth_idx_activate_get_answer(channel, idx_reply_cb);
+@@ -2158,8 +2164,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
+ 
+ 	QETH_CARD_TEXT(card, 6, "noirqpnd");
+ 	spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
+-	rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
+-			      (addr_t) iob, 0, 0);
++	rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
++				      (addr_t) iob, 0, 0, event_timeout);
+ 	spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
+ 	if (rc) {
+ 		QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
+@@ -2191,8 +2197,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
+ 		}
+ 	}
+ 
+-	if (reply->rc == -EIO)
+-		goto error;
+ 	rc = reply->rc;
+ 	qeth_put_reply(reply);
+ 	return rc;
+@@ -2203,9 +2207,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
+ 	list_del_init(&reply->list);
+ 	spin_unlock_irqrestore(&reply->card->lock, flags);
+ 	atomic_inc(&reply->received);
+-error:
+-	atomic_set(&card->write.irq_pending, 0);
+-	qeth_release_buffer(iob->channel, iob);
+ 	rc = reply->rc;
+ 	qeth_put_reply(reply);
+ 	return rc;
+diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
+index 619f897b4bb0..f4d1ec0b8f5a 100644
+--- a/drivers/s390/net/qeth_core_mpc.h
++++ b/drivers/s390/net/qeth_core_mpc.h
+@@ -35,6 +35,18 @@ extern unsigned char IPA_PDU_HEADER[];
+ #define QETH_HALT_CHANNEL_PARM	-11
+ #define QETH_RCD_PARM -12
+ 
++static inline bool qeth_intparm_is_iob(unsigned long intparm)
++{
++	switch (intparm) {
++	case QETH_CLEAR_CHANNEL_PARM:
++	case QETH_HALT_CHANNEL_PARM:
++	case QETH_RCD_PARM:
++	case 0:
++		return false;
++	}
++	return true;
++}
++
+ /*****************************************************************************/
+ /* IP Assist related definitions                                             */
+ /*****************************************************************************/
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index 5ef4c978ad19..eb5ca4701cec 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -1339,8 +1339,8 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
+ 	qeth_prepare_control_data(card, len, iob);
+ 	QETH_CARD_TEXT(card, 6, "osnoirqp");
+ 	spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
+-	rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
+-			      (addr_t) iob, 0, 0);
++	rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
++				      (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
+ 	spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
+ 	if (rc) {
+ 		QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
+diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
+index 3b0c8b8a7634..066b5c3aaae6 100644
+--- a/drivers/s390/net/smsgiucv.c
++++ b/drivers/s390/net/smsgiucv.c
+@@ -176,7 +176,7 @@ static struct device_driver smsg_driver = {
+ 
+ static void __exit smsg_exit(void)
+ {
+-	cpcmd("SET SMSG IUCV", NULL, 0, NULL);
++	cpcmd("SET SMSG OFF", NULL, 0, NULL);
+ 	device_unregister(smsg_dev);
+ 	iucv_unregister(&smsg_handler, 1);
+ 	driver_unregister(&smsg_driver);
+diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
+index edb7be786c65..9e8de1462593 100644
+--- a/drivers/scsi/isci/port_config.c
++++ b/drivers/scsi/isci/port_config.c
+@@ -291,7 +291,7 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
+ 		 * Note: We have not moved the current phy_index so we will actually
+ 		 *       compare the startting phy with itself.
+ 		 *       This is expected and required to add the phy to the port. */
+-		while (phy_index < SCI_MAX_PHYS) {
++		for (; phy_index < SCI_MAX_PHYS; phy_index++) {
+ 			if ((phy_mask & (1 << phy_index)) == 0)
+ 				continue;
+ 			sci_phy_get_sas_address(&ihost->phys[phy_index],
+@@ -311,7 +311,6 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
+ 					      &ihost->phys[phy_index]);
+ 
+ 			assigned_phy_mask |= (1 << phy_index);
+-			phy_index++;
+ 		}
+ 
+ 	}
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 5ec3b74e8aed..2834171b5012 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -1124,12 +1124,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
+ 		goto fail_fw_init;
+ 	}
+ 
+-	ret = 0;
++	return 0;
+ 
+ fail_fw_init:
+ 	dev_err(&instance->pdev->dev,
+-		"Init cmd return status %s for SCSI host %d\n",
+-		ret ? "FAILED" : "SUCCESS", instance->host->host_no);
++		"Init cmd return status FAILED for SCSI host %d\n",
++		instance->host->host_no);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index f4b52b44b966..65f6c94f2e9b 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -2322,6 +2322,12 @@ iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
+ 	return nlmsg_multicast(nls, skb, 0, group, gfp);
+ }
+ 
++static int
++iscsi_unicast_skb(struct sk_buff *skb, u32 portid)
++{
++	return nlmsg_unicast(nls, skb, portid);
++}
++
+ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
+ 		   char *data, uint32_t data_size)
+ {
+@@ -2524,14 +2530,11 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
+ EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
+ 
+ static int
+-iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
+-		    void *payload, int size)
++iscsi_if_send_reply(u32 portid, int type, void *payload, int size)
+ {
+ 	struct sk_buff	*skb;
+ 	struct nlmsghdr	*nlh;
+ 	int len = nlmsg_total_size(size);
+-	int flags = multi ? NLM_F_MULTI : 0;
+-	int t = done ? NLMSG_DONE : type;
+ 
+ 	skb = alloc_skb(len, GFP_ATOMIC);
+ 	if (!skb) {
+@@ -2539,10 +2542,9 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
+ 		return -ENOMEM;
+ 	}
+ 
+-	nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
+-	nlh->nlmsg_flags = flags;
++	nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0);
+ 	memcpy(nlmsg_data(nlh), payload, size);
+-	return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
++	return iscsi_unicast_skb(skb, portid);
+ }
+ 
+ static int
+@@ -3470,6 +3472,7 @@ static int
+ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ {
+ 	int err = 0;
++	u32 portid;
+ 	struct iscsi_uevent *ev = nlmsg_data(nlh);
+ 	struct iscsi_transport *transport = NULL;
+ 	struct iscsi_internal *priv;
+@@ -3490,10 +3493,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 	if (!try_module_get(transport->owner))
+ 		return -EINVAL;
+ 
++	portid = NETLINK_CB(skb).portid;
++
+ 	switch (nlh->nlmsg_type) {
+ 	case ISCSI_UEVENT_CREATE_SESSION:
+ 		err = iscsi_if_create_session(priv, ep, ev,
+-					      NETLINK_CB(skb).portid,
++					      portid,
+ 					      ev->u.c_session.initial_cmdsn,
+ 					      ev->u.c_session.cmds_max,
+ 					      ev->u.c_session.queue_depth);
+@@ -3506,7 +3511,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ 		}
+ 
+ 		err = iscsi_if_create_session(priv, ep, ev,
+-					NETLINK_CB(skb).portid,
++					portid,
+ 					ev->u.c_bound_session.initial_cmdsn,
+ 					ev->u.c_bound_session.cmds_max,
+ 					ev->u.c_bound_session.queue_depth);
+@@ -3664,6 +3669,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
+ static void
+ iscsi_if_rx(struct sk_buff *skb)
+ {
++	u32 portid = NETLINK_CB(skb).portid;
++
+ 	mutex_lock(&rx_queue_mutex);
+ 	while (skb->len >= NLMSG_HDRLEN) {
+ 		int err;
+@@ -3699,8 +3706,8 @@ iscsi_if_rx(struct sk_buff *skb)
+ 				break;
+ 			if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
+ 				break;
+-			err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
+-				nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
++			err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
++						  ev, sizeof(*ev));
+ 		} while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
+ 		skb_pull(skb, rlen);
+ 	}
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 8c51d628b52e..a2ec0bc9e9fa 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1722,11 +1722,14 @@ static int storvsc_probe(struct hv_device *device,
+ 		max_targets = STORVSC_MAX_TARGETS;
+ 		max_channels = STORVSC_MAX_CHANNELS;
+ 		/*
+-		 * On Windows8 and above, we support sub-channels for storage.
++		 * On Windows8 and above, we support sub-channels for storage
++		 * on SCSI and FC controllers.
+ 		 * The number of sub-channels offerred is based on the number of
+ 		 * VCPUs in the guest.
+ 		 */
+-		max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
++		if (!dev_is_ide)
++			max_sub_channels =
++				(num_cpus - 1) / storvsc_vcpus_per_sub_channel;
+ 	}
+ 
+ 	scsi_driver.can_queue = (max_outstanding_req_per_channel *
+diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
+index c374e3b5c678..777e5f1e52d1 100644
+--- a/drivers/scsi/vmw_pvscsi.c
++++ b/drivers/scsi/vmw_pvscsi.c
+@@ -609,7 +609,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
+ 			break;
+ 
+ 		case BTSTAT_ABORTQUEUE:
+-			cmd->result = (DID_ABORT << 16);
++			cmd->result = (DID_BUS_BUSY << 16);
+ 			break;
+ 
+ 		case BTSTAT_SCSIPARITY:
+diff --git a/drivers/soc/bcm/raspberrypi-power.c b/drivers/soc/bcm/raspberrypi-power.c
+index fe96a8b956fb..f7ed1187518b 100644
+--- a/drivers/soc/bcm/raspberrypi-power.c
++++ b/drivers/soc/bcm/raspberrypi-power.c
+@@ -45,7 +45,7 @@ struct rpi_power_domains {
+ struct rpi_power_domain_packet {
+ 	u32 domain;
+ 	u32 on;
+-} __packet;
++};
+ 
+ /*
+  * Asks the firmware to enable or disable power on a specific power
+diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
+index 7428091d3f5b..bd00b7cc8b78 100644
+--- a/drivers/spi/spi-bcm2835aux.c
++++ b/drivers/spi/spi-bcm2835aux.c
+@@ -184,6 +184,11 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
+ 	struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
+ 	irqreturn_t ret = IRQ_NONE;
+ 
++	/* IRQ may be shared, so return if our interrupts are disabled */
++	if (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_CNTL1) &
++	      (BCM2835_AUX_SPI_CNTL1_TXEMPTY | BCM2835_AUX_SPI_CNTL1_IDLE)))
++		return ret;
++
+ 	/* check if we have data to read */
+ 	while (bs->rx_len &&
+ 	       (!(bcm2835aux_rd(bs, BCM2835_AUX_SPI_STAT) &
+diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
+index 5c9516ae4942..4a001634023e 100644
+--- a/drivers/spi/spi-cadence.c
++++ b/drivers/spi/spi-cadence.c
+@@ -313,6 +313,14 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
+ 
+ 	while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
+ 	       (xspi->tx_bytes > 0)) {
++
++		/* When xspi in busy condition, bytes may send failed,
++		 * then spi control did't work thoroughly, add one byte delay
++		 */
++		if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
++		    CDNS_SPI_IXR_TXFULL)
++			usleep_range(10, 20);
++
+ 		if (xspi->txbuf)
+ 			cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
+ 		else
+diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
+index c5dcfb434a49..584118ed12eb 100644
+--- a/drivers/spi/spi-sh-msiof.c
++++ b/drivers/spi/spi-sh-msiof.c
+@@ -283,6 +283,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
+ 	}
+ 
+ 	k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
++	brps = min_t(int, brps, 32);
+ 
+ 	scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
+ 	sh_msiof_write(p, TSCR, scr);
+diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
+index 0d99b242e82e..6cb933ecc084 100644
+--- a/drivers/target/target_core_pscsi.c
++++ b/drivers/target/target_core_pscsi.c
+@@ -890,6 +890,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ 			bytes = min(bytes, data_len);
+ 
+ 			if (!bio) {
++new_bio:
+ 				nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
+ 				nr_pages -= nr_vecs;
+ 				/*
+@@ -931,6 +932,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
+ 				 * be allocated with pscsi_get_bio() above.
+ 				 */
+ 				bio = NULL;
++				goto new_bio;
+ 			}
+ 
+ 			data_len -= bytes;
+diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
+index 6c4b200a4560..9dbbb3c3bf35 100644
+--- a/drivers/tee/tee_core.c
++++ b/drivers/tee/tee_core.c
+@@ -238,6 +238,17 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params,
+ 			if (IS_ERR(shm))
+ 				return PTR_ERR(shm);
+ 
++			/*
++			 * Ensure offset + size does not overflow offset
++			 * and does not overflow the size of the referred
++			 * shared memory object.
++			 */
++			if ((ip.a + ip.b) < ip.a ||
++			    (ip.a + ip.b) > shm->size) {
++				tee_shm_put(shm);
++				return -EINVAL;
++			}
++
+ 			params[n].u.memref.shm_offs = ip.a;
+ 			params[n].u.memref.size = ip.b;
+ 			params[n].u.memref.shm = shm;
+diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
+index 8a7f24dd9315..0c19fcd56a0d 100644
+--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
++++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
+@@ -194,6 +194,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
+ 		return -EFAULT;
+ 	}
+ 
++	priv->priv = obj;
+ 	obj->max_state = p->package.count - 1;
+ 	obj->cdev =
+ 		thermal_cooling_device_register(acpi_device_bid(priv->adev),
+@@ -201,8 +202,6 @@ static int int3403_cdev_add(struct int3403_priv *priv)
+ 	if (IS_ERR(obj->cdev))
+ 		result = PTR_ERR(obj->cdev);
+ 
+-	priv->priv = obj;
+-
+ 	kfree(buf.pointer);
+ 	/* TODO: add ACPI notification support */
+ 
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 0ee0c6d7f194..f4c42ac62789 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -2530,8 +2530,11 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
+ {
+ 	struct musb	*musb = hcd_to_musb(hcd);
+ 	u8		devctl;
++	int		ret;
+ 
+-	musb_port_suspend(musb, true);
++	ret = musb_port_suspend(musb, true);
++	if (ret)
++		return ret;
+ 
+ 	if (!is_host_active(musb))
+ 		return 0;
+diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
+index 72392bbcd0a4..2999845632ce 100644
+--- a/drivers/usb/musb/musb_host.h
++++ b/drivers/usb/musb/musb_host.h
+@@ -67,7 +67,7 @@ extern void musb_host_rx(struct musb *, u8);
+ extern void musb_root_disconnect(struct musb *musb);
+ extern void musb_host_resume_root_hub(struct musb *musb);
+ extern void musb_host_poke_root_hub(struct musb *musb);
+-extern void musb_port_suspend(struct musb *musb, bool do_suspend);
++extern int musb_port_suspend(struct musb *musb, bool do_suspend);
+ extern void musb_port_reset(struct musb *musb, bool do_reset);
+ extern void musb_host_finish_resume(struct work_struct *work);
+ #else
+@@ -99,7 +99,10 @@ static inline void musb_root_disconnect(struct musb *musb)	{}
+ static inline void musb_host_resume_root_hub(struct musb *musb)	{}
+ static inline void musb_host_poll_rh_status(struct musb *musb)	{}
+ static inline void musb_host_poke_root_hub(struct musb *musb)	{}
+-static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {}
++static inline int musb_port_suspend(struct musb *musb, bool do_suspend)
++{
++	return 0;
++}
+ static inline void musb_port_reset(struct musb *musb, bool do_reset) {}
+ static inline void musb_host_finish_resume(struct work_struct *work) {}
+ #endif
+diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
+index 5165d2b07ade..2f8dd9826e94 100644
+--- a/drivers/usb/musb/musb_virthub.c
++++ b/drivers/usb/musb/musb_virthub.c
+@@ -48,14 +48,14 @@ void musb_host_finish_resume(struct work_struct *work)
+ 	spin_unlock_irqrestore(&musb->lock, flags);
+ }
+ 
+-void musb_port_suspend(struct musb *musb, bool do_suspend)
++int musb_port_suspend(struct musb *musb, bool do_suspend)
+ {
+ 	struct usb_otg	*otg = musb->xceiv->otg;
+ 	u8		power;
+ 	void __iomem	*mbase = musb->mregs;
+ 
+ 	if (!is_host_active(musb))
+-		return;
++		return 0;
+ 
+ 	/* NOTE:  this doesn't necessarily put PHY into low power mode,
+ 	 * turning off its clock; that's a function of PHY integration and
+@@ -66,16 +66,20 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
+ 	if (do_suspend) {
+ 		int retries = 10000;
+ 
+-		power &= ~MUSB_POWER_RESUME;
+-		power |= MUSB_POWER_SUSPENDM;
+-		musb_writeb(mbase, MUSB_POWER, power);
++		if (power & MUSB_POWER_RESUME)
++			return -EBUSY;
+ 
+-		/* Needed for OPT A tests */
+-		power = musb_readb(mbase, MUSB_POWER);
+-		while (power & MUSB_POWER_SUSPENDM) {
++		if (!(power & MUSB_POWER_SUSPENDM)) {
++			power |= MUSB_POWER_SUSPENDM;
++			musb_writeb(mbase, MUSB_POWER, power);
++
++			/* Needed for OPT A tests */
+ 			power = musb_readb(mbase, MUSB_POWER);
+-			if (retries-- < 1)
+-				break;
++			while (power & MUSB_POWER_SUSPENDM) {
++				power = musb_readb(mbase, MUSB_POWER);
++				if (retries-- < 1)
++					break;
++			}
+ 		}
+ 
+ 		musb_dbg(musb, "Root port suspended, power %02x", power);
+@@ -111,6 +115,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
+ 		schedule_delayed_work(&musb->finish_resume_work,
+ 				      msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ 	}
++	return 0;
+ }
+ 
+ void musb_port_reset(struct musb *musb, bool do_reset)
+diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
+index 2719f5d382f7..7b01648c85ca 100644
+--- a/drivers/usb/typec/tps6598x.c
++++ b/drivers/usb/typec/tps6598x.c
+@@ -73,6 +73,7 @@ struct tps6598x {
+ 	struct device *dev;
+ 	struct regmap *regmap;
+ 	struct mutex lock; /* device lock */
++	u8 i2c_protocol:1;
+ 
+ 	struct typec_port *port;
+ 	struct typec_partner *partner;
+@@ -80,19 +81,39 @@ struct tps6598x {
+ 	struct typec_capability typec_cap;
+ };
+ 
++static int
++tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
++{
++	u8 data[len + 1];
++	int ret;
++
++	if (!tps->i2c_protocol)
++		return regmap_raw_read(tps->regmap, reg, val, len);
++
++	ret = regmap_raw_read(tps->regmap, reg, data, sizeof(data));
++	if (ret)
++		return ret;
++
++	if (data[0] < len)
++		return -EIO;
++
++	memcpy(val, &data[1], len);
++	return 0;
++}
++
+ static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val)
+ {
+-	return regmap_raw_read(tps->regmap, reg, val, sizeof(u16));
++	return tps6598x_block_read(tps, reg, val, sizeof(u16));
+ }
+ 
+ static inline int tps6598x_read32(struct tps6598x *tps, u8 reg, u32 *val)
+ {
+-	return regmap_raw_read(tps->regmap, reg, val, sizeof(u32));
++	return tps6598x_block_read(tps, reg, val, sizeof(u32));
+ }
+ 
+ static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val)
+ {
+-	return regmap_raw_read(tps->regmap, reg, val, sizeof(u64));
++	return tps6598x_block_read(tps, reg, val, sizeof(u64));
+ }
+ 
+ static inline int tps6598x_write16(struct tps6598x *tps, u8 reg, u16 val)
+@@ -121,8 +142,8 @@ static int tps6598x_read_partner_identity(struct tps6598x *tps)
+ 	struct tps6598x_rx_identity_reg id;
+ 	int ret;
+ 
+-	ret = regmap_raw_read(tps->regmap, TPS_REG_RX_IDENTITY_SOP,
+-			      &id, sizeof(id));
++	ret = tps6598x_block_read(tps, TPS_REG_RX_IDENTITY_SOP,
++				  &id, sizeof(id));
+ 	if (ret)
+ 		return ret;
+ 
+@@ -223,13 +244,13 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
+ 	} while (val);
+ 
+ 	if (out_len) {
+-		ret = regmap_raw_read(tps->regmap, TPS_REG_DATA1,
+-				      out_data, out_len);
++		ret = tps6598x_block_read(tps, TPS_REG_DATA1,
++					  out_data, out_len);
+ 		if (ret)
+ 			return ret;
+ 		val = out_data[0];
+ 	} else {
+-		ret = regmap_read(tps->regmap, TPS_REG_DATA1, &val);
++		ret = tps6598x_block_read(tps, TPS_REG_DATA1, &val, sizeof(u8));
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -384,6 +405,16 @@ static int tps6598x_probe(struct i2c_client *client)
+ 	if (!vid)
+ 		return -ENODEV;
+ 
++	/*
++	 * Checking can the adapter handle SMBus protocol. If it can not, the
++	 * driver needs to take care of block reads separately.
++	 *
++	 * FIXME: Testing with I2C_FUNC_I2C. regmap-i2c uses I2C protocol
++	 * unconditionally if the adapter has I2C_FUNC_I2C set.
++	 */
++	if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
++		tps->i2c_protocol = true;
++
+ 	ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
+ 	if (ret < 0)
+ 		return ret;
+diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
+index b57891c1fd31..7afbea512207 100644
+--- a/drivers/usb/typec/ucsi/Makefile
++++ b/drivers/usb/typec/ucsi/Makefile
+@@ -5,6 +5,6 @@ obj-$(CONFIG_TYPEC_UCSI)	+= typec_ucsi.o
+ 
+ typec_ucsi-y			:= ucsi.o
+ 
+-typec_ucsi-$(CONFIG_FTRACE)	+= trace.o
++typec_ucsi-$(CONFIG_TRACING)	+= trace.o
+ 
+ obj-$(CONFIG_UCSI_ACPI)		+= ucsi_acpi.o
+diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
+index 81a84b3c1c50..728870c9e6b4 100644
+--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
+@@ -403,7 +403,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
+ {
+ 	struct {
+ 		struct xsd_sockmsg hdr;
+-		const char body[16];
++		char body[16];
+ 	} msg;
+ 	int rc;
+ 
+@@ -412,6 +412,7 @@ static int xenbus_command_reply(struct xenbus_file_priv *u,
+ 	msg.hdr.len = strlen(reply) + 1;
+ 	if (msg.hdr.len > sizeof(msg.body))
+ 		return -E2BIG;
++	memcpy(&msg.body, reply, msg.hdr.len);
+ 
+ 	mutex_lock(&u->reply_mutex);
+ 	rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
+diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c
+index fd9f28b8a933..88391c7a8462 100644
+--- a/fs/afs/addr_list.c
++++ b/fs/afs/addr_list.c
+@@ -121,7 +121,7 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
+ 	p = text;
+ 	do {
+ 		struct sockaddr_rxrpc *srx = &alist->addrs[alist->nr_addrs];
+-		char tdelim = delim;
++		const char *q, *stop;
+ 
+ 		if (*p == delim) {
+ 			p++;
+@@ -130,28 +130,33 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
+ 
+ 		if (*p == '[') {
+ 			p++;
+-			tdelim = ']';
++			q = memchr(p, ']', end - p);
++		} else {
++			for (q = p; q < end; q++)
++				if (*q == '+' || *q == delim)
++					break;
+ 		}
+ 
+-		if (in4_pton(p, end - p,
++		if (in4_pton(p, q - p,
+ 			     (u8 *)&srx->transport.sin6.sin6_addr.s6_addr32[3],
+-			     tdelim, &p)) {
++			     -1, &stop)) {
+ 			srx->transport.sin6.sin6_addr.s6_addr32[0] = 0;
+ 			srx->transport.sin6.sin6_addr.s6_addr32[1] = 0;
+ 			srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
+-		} else if (in6_pton(p, end - p,
++		} else if (in6_pton(p, q - p,
+ 				    srx->transport.sin6.sin6_addr.s6_addr,
+-				    tdelim, &p)) {
++				    -1, &stop)) {
+ 			/* Nothing to do */
+ 		} else {
+ 			goto bad_address;
+ 		}
+ 
+-		if (tdelim == ']') {
+-			if (p == end || *p != ']')
+-				goto bad_address;
++		if (stop != q)
++			goto bad_address;
++
++		p = q;
++		if (q < end && *q == ']')
+ 			p++;
+-		}
+ 
+ 		if (p < end) {
+ 			if (*p == '+') {
+diff --git a/fs/afs/callback.c b/fs/afs/callback.c
+index f4291b576054..96125c9e3e17 100644
+--- a/fs/afs/callback.c
++++ b/fs/afs/callback.c
+@@ -23,36 +23,55 @@
+ /*
+  * Set up an interest-in-callbacks record for a volume on a server and
+  * register it with the server.
+- * - Called with volume->server_sem held.
++ * - Called with vnode->io_lock held.
+  */
+ int afs_register_server_cb_interest(struct afs_vnode *vnode,
+-				    struct afs_server_entry *entry)
++				    struct afs_server_list *slist,
++				    unsigned int index)
+ {
+-	struct afs_cb_interest *cbi = entry->cb_interest, *vcbi, *new, *x;
++	struct afs_server_entry *entry = &slist->servers[index];
++	struct afs_cb_interest *cbi, *vcbi, *new, *old;
+ 	struct afs_server *server = entry->server;
+ 
+ again:
++	if (vnode->cb_interest &&
++	    likely(vnode->cb_interest == entry->cb_interest))
++		return 0;
++
++	read_lock(&slist->lock);
++	cbi = afs_get_cb_interest(entry->cb_interest);
++	read_unlock(&slist->lock);
++
+ 	vcbi = vnode->cb_interest;
+ 	if (vcbi) {
+-		if (vcbi == cbi)
++		if (vcbi == cbi) {
++			afs_put_cb_interest(afs_v2net(vnode), cbi);
+ 			return 0;
++		}
+ 
++		/* Use a new interest in the server list for the same server
++		 * rather than an old one that's still attached to a vnode.
++		 */
+ 		if (cbi && vcbi->server == cbi->server) {
+ 			write_seqlock(&vnode->cb_lock);
+-			vnode->cb_interest = afs_get_cb_interest(cbi);
++			old = vnode->cb_interest;
++			vnode->cb_interest = cbi;
+ 			write_sequnlock(&vnode->cb_lock);
+-			afs_put_cb_interest(afs_v2net(vnode), cbi);
++			afs_put_cb_interest(afs_v2net(vnode), old);
+ 			return 0;
+ 		}
+ 
++		/* Re-use the one attached to the vnode. */
+ 		if (!cbi && vcbi->server == server) {
+-			afs_get_cb_interest(vcbi);
+-			x = cmpxchg(&entry->cb_interest, cbi, vcbi);
+-			if (x != cbi) {
+-				cbi = x;
+-				afs_put_cb_interest(afs_v2net(vnode), vcbi);
++			write_lock(&slist->lock);
++			if (entry->cb_interest) {
++				write_unlock(&slist->lock);
++				afs_put_cb_interest(afs_v2net(vnode), cbi);
+ 				goto again;
+ 			}
++
++			entry->cb_interest = cbi;
++			write_unlock(&slist->lock);
+ 			return 0;
+ 		}
+ 	}
+@@ -72,13 +91,16 @@ int afs_register_server_cb_interest(struct afs_vnode *vnode,
+ 		list_add_tail(&new->cb_link, &server->cb_interests);
+ 		write_unlock(&server->cb_break_lock);
+ 
+-		x = cmpxchg(&entry->cb_interest, cbi, new);
+-		if (x == cbi) {
++		write_lock(&slist->lock);
++		if (!entry->cb_interest) {
++			entry->cb_interest = afs_get_cb_interest(new);
+ 			cbi = new;
++			new = NULL;
+ 		} else {
+-			cbi = x;
+-			afs_put_cb_interest(afs_v2net(vnode), new);
++			cbi = afs_get_cb_interest(entry->cb_interest);
+ 		}
++		write_unlock(&slist->lock);
++		afs_put_cb_interest(afs_v2net(vnode), new);
+ 	}
+ 
+ 	ASSERT(cbi);
+@@ -88,11 +110,13 @@ int afs_register_server_cb_interest(struct afs_vnode *vnode,
+ 	 */
+ 	write_seqlock(&vnode->cb_lock);
+ 
+-	vnode->cb_interest = afs_get_cb_interest(cbi);
++	old = vnode->cb_interest;
++	vnode->cb_interest = cbi;
+ 	vnode->cb_s_break = cbi->server->cb_s_break;
+ 	clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ 
+ 	write_sequnlock(&vnode->cb_lock);
++	afs_put_cb_interest(afs_v2net(vnode), old);
+ 	return 0;
+ }
+ 
+diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
+index 41e277f57b20..c0b53bfef490 100644
+--- a/fs/afs/cmservice.c
++++ b/fs/afs/cmservice.c
+@@ -341,7 +341,6 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
+  */
+ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
+ {
+-	struct sockaddr_rxrpc srx;
+ 	struct afs_server *server;
+ 	struct afs_uuid *r;
+ 	unsigned loop;
+@@ -398,8 +397,9 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
+ 
+ 	/* we'll need the file server record as that tells us which set of
+ 	 * vnodes to operate upon */
+-	rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
+-	server = afs_find_server(call->net, &srx);
++	rcu_read_lock();
++	server = afs_find_server_by_uuid(call->net, call->request);
++	rcu_read_unlock();
+ 	if (!server)
+ 		return -ENOTCONN;
+ 	call->cm_server = server;
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index f38d6a561a84..0aac3b5eb2ac 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -399,6 +399,7 @@ struct afs_server_list {
+ 	unsigned short		index;		/* Server currently in use */
+ 	unsigned short		vnovol_mask;	/* Servers to be skipped due to VNOVOL */
+ 	unsigned int		seq;		/* Set to ->servers_seq when installed */
++	rwlock_t		lock;
+ 	struct afs_server_entry	servers[];
+ };
+ 
+@@ -605,13 +606,15 @@ extern void afs_init_callback_state(struct afs_server *);
+ extern void afs_break_callback(struct afs_vnode *);
+ extern void afs_break_callbacks(struct afs_server *, size_t,struct afs_callback[]);
+ 
+-extern int afs_register_server_cb_interest(struct afs_vnode *, struct afs_server_entry *);
++extern int afs_register_server_cb_interest(struct afs_vnode *,
++					   struct afs_server_list *, unsigned int);
+ extern void afs_put_cb_interest(struct afs_net *, struct afs_cb_interest *);
+ extern void afs_clear_callback_interests(struct afs_net *, struct afs_server_list *);
+ 
+ static inline struct afs_cb_interest *afs_get_cb_interest(struct afs_cb_interest *cbi)
+ {
+-	refcount_inc(&cbi->usage);
++	if (cbi)
++		refcount_inc(&cbi->usage);
+ 	return cbi;
+ }
+ 
+diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
+index ad1328d85526..9caf7410aff3 100644
+--- a/fs/afs/rotate.c
++++ b/fs/afs/rotate.c
+@@ -179,7 +179,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
+ 			 */
+ 			if (fc->flags & AFS_FS_CURSOR_VNOVOL) {
+ 				fc->ac.error = -EREMOTEIO;
+-				goto failed;
++				goto next_server;
+ 			}
+ 
+ 			write_lock(&vnode->volume->servers_lock);
+@@ -201,7 +201,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
+ 			 */
+ 			if (vnode->volume->servers == fc->server_list) {
+ 				fc->ac.error = -EREMOTEIO;
+-				goto failed;
++				goto next_server;
+ 			}
+ 
+ 			/* Try again */
+@@ -350,8 +350,8 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
+ 	 * break request before we've finished decoding the reply and
+ 	 * installing the vnode.
+ 	 */
+-	fc->ac.error = afs_register_server_cb_interest(
+-		vnode, &fc->server_list->servers[fc->index]);
++	fc->ac.error = afs_register_server_cb_interest(vnode, fc->server_list,
++						       fc->index);
+ 	if (fc->ac.error < 0)
+ 		goto failed;
+ 
+@@ -369,8 +369,16 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
+ 	if (!test_bit(AFS_SERVER_FL_PROBED, &server->flags)) {
+ 		fc->ac.alist = afs_get_addrlist(alist);
+ 
+-		if (!afs_probe_fileserver(fc))
+-			goto failed;
++		if (!afs_probe_fileserver(fc)) {
++			switch (fc->ac.error) {
++			case -ENOMEM:
++			case -ERESTARTSYS:
++			case -EINTR:
++				goto failed;
++			default:
++				goto next_server;
++			}
++		}
+ 	}
+ 
+ 	if (!fc->ac.alist)
+diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
+index e1126659f043..e294a420d9db 100644
+--- a/fs/afs/rxrpc.c
++++ b/fs/afs/rxrpc.c
+@@ -41,6 +41,7 @@ int afs_open_socket(struct afs_net *net)
+ {
+ 	struct sockaddr_rxrpc srx;
+ 	struct socket *socket;
++	unsigned int min_level;
+ 	int ret;
+ 
+ 	_enter("");
+@@ -60,6 +61,12 @@ int afs_open_socket(struct afs_net *net)
+ 	srx.transport.sin6.sin6_family	= AF_INET6;
+ 	srx.transport.sin6.sin6_port	= htons(AFS_CM_PORT);
+ 
++	min_level = RXRPC_SECURITY_ENCRYPT;
++	ret = kernel_setsockopt(socket, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL,
++				(void *)&min_level, sizeof(min_level));
++	if (ret < 0)
++		goto error_2;
++
+ 	ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
+ 	if (ret == -EADDRINUSE) {
+ 		srx.transport.sin6.sin6_port = 0;
+diff --git a/fs/afs/server.c b/fs/afs/server.c
+index 1880f1b6a9f1..90f1ae7c3a1f 100644
+--- a/fs/afs/server.c
++++ b/fs/afs/server.c
+@@ -66,12 +66,6 @@ struct afs_server *afs_find_server(struct afs_net *net,
+ 							      sizeof(struct in6_addr));
+ 					if (diff == 0)
+ 						goto found;
+-					if (diff < 0) {
+-						// TODO: Sort the list
+-						//if (i == alist->nr_ipv4)
+-						//	goto not_found;
+-						break;
+-					}
+ 				}
+ 			}
+ 		} else {
+@@ -85,17 +79,10 @@ struct afs_server *afs_find_server(struct afs_net *net,
+ 							(u32)b->sin6_addr.s6_addr32[3]);
+ 					if (diff == 0)
+ 						goto found;
+-					if (diff < 0) {
+-						// TODO: Sort the list
+-						//if (i == 0)
+-						//	goto not_found;
+-						break;
+-					}
+ 				}
+ 			}
+ 		}
+ 
+-	//not_found:
+ 		server = NULL;
+ 	found:
+ 		if (server && !atomic_inc_not_zero(&server->usage))
+@@ -426,8 +413,15 @@ static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list)
+ 		}
+ 		write_sequnlock(&net->fs_lock);
+ 
+-		if (deleted)
++		if (deleted) {
++			write_seqlock(&net->fs_addr_lock);
++			if (!hlist_unhashed(&server->addr4_link))
++				hlist_del_rcu(&server->addr4_link);
++			if (!hlist_unhashed(&server->addr6_link))
++				hlist_del_rcu(&server->addr6_link);
++			write_sequnlock(&net->fs_addr_lock);
+ 			afs_destroy_server(net, server);
++		}
+ 	}
+ }
+ 
+diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
+index 0f8dc4c8f07c..8a5760aa5832 100644
+--- a/fs/afs/server_list.c
++++ b/fs/afs/server_list.c
+@@ -49,6 +49,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
+ 		goto error;
+ 
+ 	refcount_set(&slist->usage, 1);
++	rwlock_init(&slist->lock);
+ 
+ 	/* Make sure a records exists for each server in the list. */
+ 	for (i = 0; i < vldb->nr_servers; i++) {
+@@ -64,9 +65,11 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
+ 			goto error_2;
+ 		}
+ 
+-		/* Insertion-sort by server pointer */
++		/* Insertion-sort by UUID */
+ 		for (j = 0; j < slist->nr_servers; j++)
+-			if (slist->servers[j].server >= server)
++			if (memcmp(&slist->servers[j].server->uuid,
++				   &server->uuid,
++				   sizeof(server->uuid)) >= 0)
+ 				break;
+ 		if (j < slist->nr_servers) {
+ 			if (slist->servers[j].server == server) {
+diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
+index e901ef6a4813..bda6175c2cbe 100644
+--- a/fs/cifs/Kconfig
++++ b/fs/cifs/Kconfig
+@@ -198,7 +198,7 @@ config CIFS_SMB311
+ 
+ config CIFS_SMB_DIRECT
+ 	bool "SMB Direct support (Experimental)"
+-	depends on CIFS=m && INFINIBAND || CIFS=y && INFINIBAND=y
++	depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y
+ 	help
+ 	  Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1.
+ 	  SMB Direct allows transferring SMB packets over RDMA. If unsure,
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 1c1940d90c96..097598543403 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -589,9 +589,15 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
+ 
+ 	SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+ 
++	/*
++	 * If ea_name is NULL (listxattr) and there are no EAs, return 0 as it's
++	 * not an error. Otherwise, the specified ea_name was not found.
++	 */
+ 	if (!rc)
+ 		rc = move_smb2_ea_to_cifs(ea_data, buf_size, smb2_data,
+ 					  SMB2_MAX_EA_BUF, ea_name);
++	else if (!ea_name && rc == -ENODATA)
++		rc = 0;
+ 
+ 	kfree(smb2_data);
+ 	return rc;
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 8ae6a089489c..93d3f4a14b32 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -621,8 +621,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+ 
+ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
+ {
+-	int rc = 0;
+-	struct validate_negotiate_info_req vneg_inbuf;
++	int rc;
++	struct validate_negotiate_info_req *pneg_inbuf;
+ 	struct validate_negotiate_info_rsp *pneg_rsp = NULL;
+ 	u32 rsplen;
+ 	u32 inbuflen; /* max of 4 dialects */
+@@ -656,63 +656,69 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
+ 	if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
+ 		cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
+ 
+-	vneg_inbuf.Capabilities =
++	pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
++	if (!pneg_inbuf)
++		return -ENOMEM;
++
++	pneg_inbuf->Capabilities =
+ 			cpu_to_le32(tcon->ses->server->vals->req_capabilities);
+-	memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
++	memcpy(pneg_inbuf->Guid, tcon->ses->server->client_guid,
+ 					SMB2_CLIENT_GUID_SIZE);
+ 
+ 	if (tcon->ses->sign)
+-		vneg_inbuf.SecurityMode =
++		pneg_inbuf->SecurityMode =
+ 			cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
+ 	else if (global_secflags & CIFSSEC_MAY_SIGN)
+-		vneg_inbuf.SecurityMode =
++		pneg_inbuf->SecurityMode =
+ 			cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
+ 	else
+-		vneg_inbuf.SecurityMode = 0;
++		pneg_inbuf->SecurityMode = 0;
+ 
+ 
+ 	if (strcmp(tcon->ses->server->vals->version_string,
+ 		SMB3ANY_VERSION_STRING) == 0) {
+-		vneg_inbuf.Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
+-		vneg_inbuf.Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
+-		vneg_inbuf.DialectCount = cpu_to_le16(2);
++		pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
++		pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
++		pneg_inbuf->DialectCount = cpu_to_le16(2);
+ 		/* structure is big enough for 3 dialects, sending only 2 */
+-		inbuflen = sizeof(struct validate_negotiate_info_req) - 2;
++		inbuflen = sizeof(*pneg_inbuf) -
++				sizeof(pneg_inbuf->Dialects[0]);
+ 	} else if (strcmp(tcon->ses->server->vals->version_string,
+ 		SMBDEFAULT_VERSION_STRING) == 0) {
+-		vneg_inbuf.Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
+-		vneg_inbuf.Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
+-		vneg_inbuf.Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
+-		vneg_inbuf.DialectCount = cpu_to_le16(3);
++		pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
++		pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
++		pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
++		pneg_inbuf->DialectCount = cpu_to_le16(3);
+ 		/* structure is big enough for 3 dialects */
+-		inbuflen = sizeof(struct validate_negotiate_info_req);
++		inbuflen = sizeof(*pneg_inbuf);
+ 	} else {
+ 		/* otherwise specific dialect was requested */
+-		vneg_inbuf.Dialects[0] =
++		pneg_inbuf->Dialects[0] =
+ 			cpu_to_le16(tcon->ses->server->vals->protocol_id);
+-		vneg_inbuf.DialectCount = cpu_to_le16(1);
++		pneg_inbuf->DialectCount = cpu_to_le16(1);
+ 		/* structure is big enough for 3 dialects, sending only 1 */
+-		inbuflen = sizeof(struct validate_negotiate_info_req) - 4;
++		inbuflen = sizeof(*pneg_inbuf) -
++				sizeof(pneg_inbuf->Dialects[0]) * 2;
+ 	}
+ 
+ 	rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+ 		FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
+-		(char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
+-		(char **)&pneg_rsp, &rsplen);
++		(char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen);
+ 
+ 	if (rc != 0) {
+ 		cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
+-		return -EIO;
++		rc = -EIO;
++		goto out_free_inbuf;
+ 	}
+ 
+-	if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
++	rc = -EIO;
++	if (rsplen != sizeof(*pneg_rsp)) {
+ 		cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n",
+ 			 rsplen);
+ 
+ 		/* relax check since Mac returns max bufsize allowed on ioctl */
+-		if ((rsplen > CIFSMaxBufSize)
+-		     || (rsplen < sizeof(struct validate_negotiate_info_rsp)))
+-			goto err_rsp_free;
++		if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp))
++			goto out_free_rsp;
+ 	}
+ 
+ 	/* check validate negotiate info response matches what we got earlier */
+@@ -729,15 +735,17 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
+ 		goto vneg_out;
+ 
+ 	/* validate negotiate successful */
++	rc = 0;
+ 	cifs_dbg(FYI, "validate negotiate info successful\n");
+-	kfree(pneg_rsp);
+-	return 0;
++	goto out_free_rsp;
+ 
+ vneg_out:
+ 	cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
+-err_rsp_free:
++out_free_rsp:
+ 	kfree(pneg_rsp);
+-	return -EIO;
++out_free_inbuf:
++	kfree(pneg_inbuf);
++	return rc;
+ }
+ 
+ enum securityEnum
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 1b5cd3b8617c..a56abb46613e 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -833,8 +833,11 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
+ 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
+ 		new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
+ 				  GFP_KERNEL);
+-		if (!new_iov)
++		if (!new_iov) {
++			/* otherwise cifs_send_recv below sets resp_buf_type */
++			*resp_buf_type = CIFS_NO_BUFFER;
+ 			return -ENOMEM;
++		}
+ 	} else
+ 		new_iov = s_iov;
+ 
+diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
+index 846ca150d52e..4dd842f72846 100644
+--- a/fs/ecryptfs/crypto.c
++++ b/fs/ecryptfs/crypto.c
+@@ -1997,6 +1997,16 @@ int ecryptfs_encrypt_and_encode_filename(
+ 	return rc;
+ }
+ 
++static bool is_dot_dotdot(const char *name, size_t name_size)
++{
++	if (name_size == 1 && name[0] == '.')
++		return true;
++	else if (name_size == 2 && name[0] == '.' && name[1] == '.')
++		return true;
++
++	return false;
++}
++
+ /**
+  * ecryptfs_decode_and_decrypt_filename - converts the encoded cipher text name to decoded plaintext
+  * @plaintext_name: The plaintext name
+@@ -2021,13 +2031,21 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name,
+ 	size_t packet_size;
+ 	int rc = 0;
+ 
+-	if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)
+-	    && !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
+-	    && (name_size > ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE)
+-	    && (strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX,
+-			ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE) == 0)) {
+-		const char *orig_name = name;
+-		size_t orig_name_size = name_size;
++	if ((mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) &&
++	    !(mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)) {
++		if (is_dot_dotdot(name, name_size)) {
++			rc = ecryptfs_copy_filename(plaintext_name,
++						    plaintext_name_size,
++						    name, name_size);
++			goto out;
++		}
++
++		if (name_size <= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE ||
++		    strncmp(name, ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX,
++			    ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE)) {
++			rc = -EINVAL;
++			goto out;
++		}
+ 
+ 		name += ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
+ 		name_size -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
+@@ -2047,12 +2065,9 @@ int ecryptfs_decode_and_decrypt_filename(char **plaintext_name,
+ 						  decoded_name,
+ 						  decoded_name_size);
+ 		if (rc) {
+-			printk(KERN_INFO "%s: Could not parse tag 70 packet "
+-			       "from filename; copying through filename "
+-			       "as-is\n", __func__);
+-			rc = ecryptfs_copy_filename(plaintext_name,
+-						    plaintext_name_size,
+-						    orig_name, orig_name_size);
++			ecryptfs_printk(KERN_DEBUG,
++					"%s: Could not parse tag 70 packet from filename\n",
++					__func__);
+ 			goto out_free;
+ 		}
+ 	} else {
+diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
+index c74ed3ca3372..b76a9853325e 100644
+--- a/fs/ecryptfs/file.c
++++ b/fs/ecryptfs/file.c
+@@ -82,17 +82,28 @@ ecryptfs_filldir(struct dir_context *ctx, const char *lower_name,
+ 						  buf->sb, lower_name,
+ 						  lower_namelen);
+ 	if (rc) {
+-		printk(KERN_ERR "%s: Error attempting to decode and decrypt "
+-		       "filename [%s]; rc = [%d]\n", __func__, lower_name,
+-		       rc);
+-		goto out;
++		if (rc != -EINVAL) {
++			ecryptfs_printk(KERN_DEBUG,
++					"%s: Error attempting to decode and decrypt filename [%s]; rc = [%d]\n",
++					__func__, lower_name, rc);
++			return rc;
++		}
++
++		/* Mask -EINVAL errors as these are most likely due a plaintext
++		 * filename present in the lower filesystem despite filename
++		 * encryption being enabled. One unavoidable example would be
++		 * the "lost+found" dentry in the root directory of an Ext4
++		 * filesystem.
++		 */
++		return 0;
+ 	}
++
+ 	buf->caller->pos = buf->ctx.pos;
+ 	rc = !dir_emit(buf->caller, name, name_size, ino, d_type);
+ 	kfree(name);
+ 	if (!rc)
+ 		buf->entries_written++;
+-out:
++
+ 	return rc;
+ }
+ 
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index bc258a4402f6..ec3fba7d492f 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -394,7 +394,10 @@ static int parse_options(char *options, struct iso9660_options *popt)
+ 			break;
+ #ifdef CONFIG_JOLIET
+ 		case Opt_iocharset:
++			kfree(popt->iocharset);
+ 			popt->iocharset = match_strdup(&args[0]);
++			if (!popt->iocharset)
++				return 0;
+ 			break;
+ #endif
+ 		case Opt_map_a:
+diff --git a/fs/namespace.c b/fs/namespace.c
+index c3ed9dc78655..cb20c4ee97fc 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2810,7 +2810,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
+ 		mnt_flags |= MNT_NODIRATIME;
+ 	if (flags & MS_STRICTATIME)
+ 		mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME);
+-	if (flags & SB_RDONLY)
++	if (flags & MS_RDONLY)
+ 		mnt_flags |= MNT_READONLY;
+ 
+ 	/* The default atime for remount is preservation */
+diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
+index 219b269c737e..613ec7e5a465 100644
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -192,8 +192,9 @@ static int send_to_group(struct inode *to_tell,
+ 			 struct fsnotify_iter_info *iter_info)
+ {
+ 	struct fsnotify_group *group = NULL;
+-	__u32 inode_test_mask = 0;
+-	__u32 vfsmount_test_mask = 0;
++	__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
++	__u32 marks_mask = 0;
++	__u32 marks_ignored_mask = 0;
+ 
+ 	if (unlikely(!inode_mark && !vfsmount_mark)) {
+ 		BUG();
+@@ -213,29 +214,25 @@ static int send_to_group(struct inode *to_tell,
+ 	/* does the inode mark tell us to do something? */
+ 	if (inode_mark) {
+ 		group = inode_mark->group;
+-		inode_test_mask = (mask & ~FS_EVENT_ON_CHILD);
+-		inode_test_mask &= inode_mark->mask;
+-		inode_test_mask &= ~inode_mark->ignored_mask;
++		marks_mask |= inode_mark->mask;
++		marks_ignored_mask |= inode_mark->ignored_mask;
+ 	}
+ 
+ 	/* does the vfsmount_mark tell us to do something? */
+ 	if (vfsmount_mark) {
+-		vfsmount_test_mask = (mask & ~FS_EVENT_ON_CHILD);
+ 		group = vfsmount_mark->group;
+-		vfsmount_test_mask &= vfsmount_mark->mask;
+-		vfsmount_test_mask &= ~vfsmount_mark->ignored_mask;
+-		if (inode_mark)
+-			vfsmount_test_mask &= ~inode_mark->ignored_mask;
++		marks_mask |= vfsmount_mark->mask;
++		marks_ignored_mask |= vfsmount_mark->ignored_mask;
+ 	}
+ 
+ 	pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p"
+-		 " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x"
++		 " vfsmount_mark=%p marks_mask=%x marks_ignored_mask=%x"
+ 		 " data=%p data_is=%d cookie=%d\n",
+-		 __func__, group, to_tell, mask, inode_mark,
+-		 inode_test_mask, vfsmount_mark, vfsmount_test_mask, data,
++		 __func__, group, to_tell, mask, inode_mark, vfsmount_mark,
++		 marks_mask, marks_ignored_mask, data,
+ 		 data_is, cookie);
+ 
+-	if (!inode_test_mask && !vfsmount_test_mask)
++	if (!(test_mask & marks_mask & ~marks_ignored_mask))
+ 		return 0;
+ 
+ 	return group->ops->handle_event(group, to_tell, inode_mark,
+diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
+index ab156e35ec00..1b1283f07941 100644
+--- a/fs/ocfs2/refcounttree.c
++++ b/fs/ocfs2/refcounttree.c
+@@ -4250,10 +4250,11 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
+ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
+ 			 struct dentry *new_dentry, bool preserve)
+ {
+-	int error;
++	int error, had_lock;
+ 	struct inode *inode = d_inode(old_dentry);
+ 	struct buffer_head *old_bh = NULL;
+ 	struct inode *new_orphan_inode = NULL;
++	struct ocfs2_lock_holder oh;
+ 
+ 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
+ 		return -EOPNOTSUPP;
+@@ -4295,6 +4296,14 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
+ 		goto out;
+ 	}
+ 
++	had_lock = ocfs2_inode_lock_tracker(new_orphan_inode, NULL, 1,
++					    &oh);
++	if (had_lock < 0) {
++		error = had_lock;
++		mlog_errno(error);
++		goto out;
++	}
++
+ 	/* If the security isn't preserved, we need to re-initialize them. */
+ 	if (!preserve) {
+ 		error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
+@@ -4302,14 +4311,15 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
+ 		if (error)
+ 			mlog_errno(error);
+ 	}
+-out:
+ 	if (!error) {
+ 		error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
+ 						       new_dentry);
+ 		if (error)
+ 			mlog_errno(error);
+ 	}
++	ocfs2_inode_unlock_tracker(new_orphan_inode, 1, &oh, had_lock);
+ 
++out:
+ 	if (new_orphan_inode) {
+ 		/*
+ 		 * We need to open_unlock the inode no matter whether we
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index f034eccd8616..d256b24f7d28 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -1693,6 +1693,12 @@ void task_dump_owner(struct task_struct *task, umode_t mode,
+ 	kuid_t uid;
+ 	kgid_t gid;
+ 
++	if (unlikely(task->flags & PF_KTHREAD)) {
++		*ruid = GLOBAL_ROOT_UID;
++		*rgid = GLOBAL_ROOT_GID;
++		return;
++	}
++
+ 	/* Default to the tasks effective ownership */
+ 	rcu_read_lock();
+ 	cred = __task_cred(task);
+diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
+index d1e82761de81..e64ecb9f2720 100644
+--- a/fs/proc/kcore.c
++++ b/fs/proc/kcore.c
+@@ -209,25 +209,34 @@ kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
+ {
+ 	struct list_head *head = (struct list_head *)arg;
+ 	struct kcore_list *ent;
++	struct page *p;
++
++	if (!pfn_valid(pfn))
++		return 1;
++
++	p = pfn_to_page(pfn);
++	if (!memmap_valid_within(pfn, p, page_zone(p)))
++		return 1;
+ 
+ 	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+ 	if (!ent)
+ 		return -ENOMEM;
+-	ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
++	ent->addr = (unsigned long)page_to_virt(p);
+ 	ent->size = nr_pages << PAGE_SHIFT;
+ 
+-	/* Sanity check: Can happen in 32bit arch...maybe */
+-	if (ent->addr < (unsigned long) __va(0))
++	if (!virt_addr_valid(ent->addr))
+ 		goto free_out;
+ 
+ 	/* cut not-mapped area. ....from ppc-32 code. */
+ 	if (ULONG_MAX - ent->addr < ent->size)
+ 		ent->size = ULONG_MAX - ent->addr;
+ 
+-	/* cut when vmalloc() area is higher than direct-map area */
+-	if (VMALLOC_START > (unsigned long)__va(0)) {
+-		if (ent->addr > VMALLOC_START)
+-			goto free_out;
++	/*
++	 * We've already checked virt_addr_valid so we know this address
++	 * is a valid pointer, therefore we can check against it to determine
++	 * if we need to trim
++	 */
++	if (VMALLOC_START > ent->addr) {
+ 		if (VMALLOC_START - ent->addr < ent->size)
+ 			ent->size = VMALLOC_START - ent->addr;
+ 	}
+diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c
+index a000d7547479..b572cc865b92 100644
+--- a/fs/proc/loadavg.c
++++ b/fs/proc/loadavg.c
+@@ -24,7 +24,7 @@ static int loadavg_proc_show(struct seq_file *m, void *v)
+ 		LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
+ 		LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
+ 		nr_running(), nr_threads,
+-		idr_get_cursor(&task_active_pid_ns(current)->idr));
++		idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
+ 	return 0;
+ }
+ 
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index ec6d2983a5cb..dd1b2aeb01e8 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1329,9 +1329,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+ 		else if (is_swap_pmd(pmd)) {
+ 			swp_entry_t entry = pmd_to_swp_entry(pmd);
++			unsigned long offset = swp_offset(entry);
+ 
++			offset += (addr & ~PMD_MASK) >> PAGE_SHIFT;
+ 			frame = swp_type(entry) |
+-				(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
++				(offset << MAX_SWAPFILES_SHIFT);
+ 			flags |= PM_SWAP;
+ 			if (pmd_swp_soft_dirty(pmd))
+ 				flags |= PM_SOFT_DIRTY;
+@@ -1351,6 +1353,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ 				break;
+ 			if (pm->show_pfn && (flags & PM_PRESENT))
+ 				frame++;
++			else if (flags & PM_SWAP)
++				frame += (1 << MAX_SWAPFILES_SHIFT);
+ 		}
+ 		spin_unlock(ptl);
+ 		return err;
+diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
+index d3339dd48b1a..b324e01ccf2d 100644
+--- a/include/linux/brcmphy.h
++++ b/include/linux/brcmphy.h
+@@ -25,6 +25,7 @@
+ #define PHY_ID_BCM54612E		0x03625e60
+ #define PHY_ID_BCM54616S		0x03625d10
+ #define PHY_ID_BCM57780			0x03625d90
++#define PHY_ID_BCM89610			0x03625cd0
+ 
+ #define PHY_ID_BCM7250			0xae025280
+ #define PHY_ID_BCM7260			0xae025190
+diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
+index f711be6e8c44..f3ae6ae7e786 100644
+--- a/include/linux/clk-provider.h
++++ b/include/linux/clk-provider.h
+@@ -755,6 +755,9 @@ int __clk_mux_determine_rate(struct clk_hw *hw,
+ int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
+ int __clk_mux_determine_rate_closest(struct clk_hw *hw,
+ 				     struct clk_rate_request *req);
++int clk_mux_determine_rate_flags(struct clk_hw *hw,
++				 struct clk_rate_request *req,
++				 unsigned long flags);
+ void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
+ void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
+ 			   unsigned long max_rate);
+diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
+index 2ec41a7eb54f..35e5954a5a15 100644
+--- a/include/linux/ethtool.h
++++ b/include/linux/ethtool.h
+@@ -310,6 +310,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
+  *	fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
+  *	instead of the latter), any change to them will be overwritten
+  *	by kernel. Returns a negative error code or zero.
++ * @get_fecparam: Get the network device Forward Error Correction parameters.
++ * @set_fecparam: Set the network device Forward Error Correction parameters.
+  *
+  * All operations are optional (i.e. the function pointer may be set
+  * to %NULL) and callers must take this into account.  Callers must
+diff --git a/include/linux/genhd.h b/include/linux/genhd.h
+index c826b0b5232a..6cb8a5789668 100644
+--- a/include/linux/genhd.h
++++ b/include/linux/genhd.h
+@@ -368,7 +368,9 @@ static inline void free_part_stats(struct hd_struct *part)
+ 	part_stat_add(cpu, gendiskp, field, -subnd)
+ 
+ void part_in_flight(struct request_queue *q, struct hd_struct *part,
+-			unsigned int inflight[2]);
++		    unsigned int inflight[2]);
++void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
++		       unsigned int inflight[2]);
+ void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
+ 			int rw);
+ void part_inc_in_flight(struct request_queue *q, struct hd_struct *part,
+diff --git a/include/linux/kthread.h b/include/linux/kthread.h
+index c1961761311d..2803264c512f 100644
+--- a/include/linux/kthread.h
++++ b/include/linux/kthread.h
+@@ -62,6 +62,7 @@ void *kthread_probe_data(struct task_struct *k);
+ int kthread_park(struct task_struct *k);
+ void kthread_unpark(struct task_struct *k);
+ void kthread_parkme(void);
++void kthread_park_complete(struct task_struct *k);
+ 
+ int kthreadd(void *unused);
+ extern struct task_struct *kthreadd_task;
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 6930c63126c7..6d6e79c59e68 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -1045,13 +1045,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
+ 
+ #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
+ 
+-#ifdef CONFIG_S390
+-#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
+-#elif defined(CONFIG_ARM64)
+-#define KVM_MAX_IRQ_ROUTES 4096
+-#else
+-#define KVM_MAX_IRQ_ROUTES 1024
+-#endif
++#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
+ 
+ bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
+ int kvm_set_irq_routing(struct kvm *kvm,
+diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h
+index eb492d47f717..8f9c90379732 100644
+--- a/include/linux/microchipphy.h
++++ b/include/linux/microchipphy.h
+@@ -70,4 +70,12 @@
+ #define	LAN88XX_MMD3_CHIP_ID			(32877)
+ #define	LAN88XX_MMD3_CHIP_REV			(32878)
+ 
++/* DSP registers */
++#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG		(0x806A)
++#define PHY_ARDENNES_MMD_DEV_3_PHY_CFG_ZD_DLY_EN_	(0x2000)
++#define LAN88XX_EXT_PAGE_ACCESS_TR		(0x52B5)
++#define LAN88XX_EXT_PAGE_TR_CR			16
++#define LAN88XX_EXT_PAGE_TR_LOW_DATA		17
++#define LAN88XX_EXT_PAGE_TR_HIGH_DATA		18
++
+ #endif /* _MICROCHIPPHY_H */
+diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
+index b5b43f94f311..01b990e4b228 100644
+--- a/include/linux/mtd/map.h
++++ b/include/linux/mtd/map.h
+@@ -312,7 +312,7 @@ void map_destroy(struct mtd_info *mtd);
+ ({									\
+ 	int i, ret = 1;							\
+ 	for (i = 0; i < map_words(map); i++) {				\
+-		if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) {	\
++		if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) {	\
+ 			ret = 0;					\
+ 			break;						\
+ 		}							\
+diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
+index 56c5570aadbe..694f718d012f 100644
+--- a/include/linux/mtd/rawnand.h
++++ b/include/linux/mtd/rawnand.h
+@@ -824,12 +824,18 @@ struct nand_op_instr {
+  * tBERS (during an erase) which all of them are u64 values that cannot be
+  * divided by usual kernel macros and must be handled with the special
+  * DIV_ROUND_UP_ULL() macro.
++ *
++ * Cast to type of dividend is needed here to guarantee that the result won't
++ * be an unsigned long long when the dividend is an unsigned long (or smaller),
++ * which is what the compiler does when it sees ternary operator with 2
++ * different return types (picks the largest type to make sure there's no
++ * loss).
+  */
+-#define __DIVIDE(dividend, divisor) ({					\
+-	sizeof(dividend) == sizeof(u32) ?				\
+-		DIV_ROUND_UP(dividend, divisor) :			\
+-		DIV_ROUND_UP_ULL(dividend, divisor);			\
+-		})
++#define __DIVIDE(dividend, divisor) ({						\
++	(__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ?	\
++			       DIV_ROUND_UP(dividend, divisor) :		\
++			       DIV_ROUND_UP_ULL(dividend, divisor)); 		\
++	})
+ #define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
+ #define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
+ 
+diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
+index b1f37a89e368..79b99d653e03 100644
+--- a/include/linux/percpu-rwsem.h
++++ b/include/linux/percpu-rwsem.h
+@@ -133,7 +133,7 @@ static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
+ 	lock_release(&sem->rw_sem.dep_map, 1, ip);
+ #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+ 	if (!read)
+-		sem->rw_sem.owner = NULL;
++		sem->rw_sem.owner = RWSEM_OWNER_UNKNOWN;
+ #endif
+ }
+ 
+@@ -141,6 +141,10 @@ static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
+ 					bool read, unsigned long ip)
+ {
+ 	lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
++#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
++	if (!read)
++		sem->rw_sem.owner = current;
++#endif
+ }
+ 
+ #endif
+diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
+index 56707d5ff6ad..ab93b6eae696 100644
+--- a/include/linux/rwsem.h
++++ b/include/linux/rwsem.h
+@@ -44,6 +44,12 @@ struct rw_semaphore {
+ #endif
+ };
+ 
++/*
++ * Setting bit 0 of the owner field with other non-zero bits will indicate
++ * that the rwsem is writer-owned with an unknown owner.
++ */
++#define RWSEM_OWNER_UNKNOWN	((struct task_struct *)-1L)
++
+ extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+ extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
+ extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 710508af02c8..8145cb4ee838 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -113,17 +113,36 @@ struct task_group;
+ 
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ 
++/*
++ * Special states are those that do not use the normal wait-loop pattern. See
++ * the comment with set_special_state().
++ */
++#define is_special_task_state(state)				\
++	((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
++
+ #define __set_current_state(state_value)			\
+ 	do {							\
++		WARN_ON_ONCE(is_special_task_state(state_value));\
+ 		current->task_state_change = _THIS_IP_;		\
+ 		current->state = (state_value);			\
+ 	} while (0)
++
+ #define set_current_state(state_value)				\
+ 	do {							\
++		WARN_ON_ONCE(is_special_task_state(state_value));\
+ 		current->task_state_change = _THIS_IP_;		\
+ 		smp_store_mb(current->state, (state_value));	\
+ 	} while (0)
+ 
++#define set_special_state(state_value)					\
++	do {								\
++		unsigned long flags; /* may shadow */			\
++		WARN_ON_ONCE(!is_special_task_state(state_value));	\
++		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
++		current->task_state_change = _THIS_IP_;			\
++		current->state = (state_value);				\
++		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
++	} while (0)
+ #else
+ /*
+  * set_current_state() includes a barrier so that the write of current->state
+@@ -145,8 +164,8 @@ struct task_group;
+  *
+  * The above is typically ordered against the wakeup, which does:
+  *
+- *	need_sleep = false;
+- *	wake_up_state(p, TASK_UNINTERRUPTIBLE);
++ *   need_sleep = false;
++ *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
+  *
+  * Where wake_up_state() (and all other wakeup primitives) imply enough
+  * barriers to order the store of the variable against wakeup.
+@@ -155,12 +174,33 @@ struct task_group;
+  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
+  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
+  *
+- * This is obviously fine, since they both store the exact same value.
++ * However, with slightly different timing the wakeup TASK_RUNNING store can
++ * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
++ * a problem either because that will result in one extra go around the loop
++ * and our @cond test will save the day.
+  *
+  * Also see the comments of try_to_wake_up().
+  */
+-#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
+-#define set_current_state(state_value)	 smp_store_mb(current->state, (state_value))
++#define __set_current_state(state_value)				\
++	current->state = (state_value)
++
++#define set_current_state(state_value)					\
++	smp_store_mb(current->state, (state_value))
++
++/*
++ * set_special_state() should be used for those states when the blocking task
++ * can not use the regular condition based wait-loop. In that case we must
++ * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
++ * will not collide with our state change.
++ */
++#define set_special_state(state_value)					\
++	do {								\
++		unsigned long flags; /* may shadow */			\
++		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
++		current->state = (state_value);				\
++		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
++	} while (0)
++
+ #endif
+ 
+ /* Task command name length: */
+diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
+index 23b4f9cb82db..acf701e057af 100644
+--- a/include/linux/sched/signal.h
++++ b/include/linux/sched/signal.h
+@@ -280,7 +280,7 @@ static inline void kernel_signal_stop(void)
+ {
+ 	spin_lock_irq(&current->sighand->siglock);
+ 	if (current->jobctl & JOBCTL_STOP_DEQUEUED)
+-		__set_current_state(TASK_STOPPED);
++		set_special_state(TASK_STOPPED);
+ 	spin_unlock_irq(&current->sighand->siglock);
+ 
+ 	schedule();
+diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h
+index e8f0f852968f..c0c5c5b73dc0 100644
+--- a/include/linux/stringhash.h
++++ b/include/linux/stringhash.h
+@@ -50,9 +50,9 @@ partial_name_hash(unsigned long c, unsigned long prevhash)
+  * losing bits).  This also has the property (wanted by the dcache)
+  * that the msbits make a good hash table index.
+  */
+-static inline unsigned long end_name_hash(unsigned long hash)
++static inline unsigned int end_name_hash(unsigned long hash)
+ {
+-	return __hash_32((unsigned int)hash);
++	return hash_long(hash, 32);
+ }
+ 
+ /*
+diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
+index cb979ad90401..b86c4c367004 100644
+--- a/include/soc/bcm2835/raspberrypi-firmware.h
++++ b/include/soc/bcm2835/raspberrypi-firmware.h
+@@ -125,13 +125,13 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
+ static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
+ 					void *data, size_t len)
+ {
+-	return 0;
++	return -ENOSYS;
+ }
+ 
+ static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
+ 					     void *data, size_t tag_size)
+ {
+-	return 0;
++	return -ENOSYS;
+ }
+ 
+ static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
+diff --git a/init/main.c b/init/main.c
+index 21efbf6ace93..dacaf589226a 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -981,6 +981,13 @@ __setup("rodata=", set_debug_rodata);
+ static void mark_readonly(void)
+ {
+ 	if (rodata_enabled) {
++		/*
++		 * load_module() results in W+X mappings, which are cleaned up
++		 * with call_rcu_sched().  Let's make sure that queued work is
++		 * flushed so that we don't hit false positives looking for
++		 * insecure pages which are W+X.
++		 */
++		rcu_barrier_sched();
+ 		mark_rodata_ro();
+ 		rodata_test();
+ 	} else
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 43f95d190eea..d18c8bf4051b 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -26,6 +26,7 @@
+ #include <linux/cred.h>
+ #include <linux/timekeeping.h>
+ #include <linux/ctype.h>
++#include <linux/nospec.h>
+ 
+ #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
+ 			   (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
+@@ -102,12 +103,14 @@ const struct bpf_map_ops bpf_map_offload_ops = {
+ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
+ {
+ 	const struct bpf_map_ops *ops;
++	u32 type = attr->map_type;
+ 	struct bpf_map *map;
+ 	int err;
+ 
+-	if (attr->map_type >= ARRAY_SIZE(bpf_map_types))
++	if (type >= ARRAY_SIZE(bpf_map_types))
+ 		return ERR_PTR(-EINVAL);
+-	ops = bpf_map_types[attr->map_type];
++	type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
++	ops = bpf_map_types[type];
+ 	if (!ops)
+ 		return ERR_PTR(-EINVAL);
+ 
+@@ -122,7 +125,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
+ 	if (IS_ERR(map))
+ 		return map;
+ 	map->ops = ops;
+-	map->map_type = attr->map_type;
++	map->map_type = type;
+ 	return map;
+ }
+ 
+@@ -869,11 +872,17 @@ static const struct bpf_prog_ops * const bpf_prog_types[] = {
+ 
+ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
+ {
+-	if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
++	const struct bpf_prog_ops *ops;
++
++	if (type >= ARRAY_SIZE(bpf_prog_types))
++		return -EINVAL;
++	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
++	ops = bpf_prog_types[type];
++	if (!ops)
+ 		return -EINVAL;
+ 
+ 	if (!bpf_prog_is_dev_bound(prog->aux))
+-		prog->aux->ops = bpf_prog_types[type];
++		prog->aux->ops = ops;
+ 	else
+ 		prog->aux->ops = &bpf_offload_prog_ops;
+ 	prog->type = type;
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index cd50e99202b0..2017a39ab490 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -55,7 +55,6 @@ enum KTHREAD_BITS {
+ 	KTHREAD_IS_PER_CPU = 0,
+ 	KTHREAD_SHOULD_STOP,
+ 	KTHREAD_SHOULD_PARK,
+-	KTHREAD_IS_PARKED,
+ };
+ 
+ static inline void set_kthread_struct(void *kthread)
+@@ -177,14 +176,12 @@ void *kthread_probe_data(struct task_struct *task)
+ 
+ static void __kthread_parkme(struct kthread *self)
+ {
+-	__set_current_state(TASK_PARKED);
+-	while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
+-		if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
+-			complete(&self->parked);
++	for (;;) {
++		set_current_state(TASK_PARKED);
++		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
++			break;
+ 		schedule();
+-		__set_current_state(TASK_PARKED);
+ 	}
+-	clear_bit(KTHREAD_IS_PARKED, &self->flags);
+ 	__set_current_state(TASK_RUNNING);
+ }
+ 
+@@ -194,6 +191,11 @@ void kthread_parkme(void)
+ }
+ EXPORT_SYMBOL_GPL(kthread_parkme);
+ 
++void kthread_park_complete(struct task_struct *k)
++{
++	complete(&to_kthread(k)->parked);
++}
++
+ static int kthread(void *_create)
+ {
+ 	/* Copy data: it's on kthread's stack */
+@@ -450,22 +452,15 @@ void kthread_unpark(struct task_struct *k)
+ {
+ 	struct kthread *kthread = to_kthread(k);
+ 
+-	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+ 	/*
+-	 * We clear the IS_PARKED bit here as we don't wait
+-	 * until the task has left the park code. So if we'd
+-	 * park before that happens we'd see the IS_PARKED bit
+-	 * which might be about to be cleared.
++	 * Newly created kthread was parked when the CPU was offline.
++	 * The binding was lost and we need to set it again.
+ 	 */
+-	if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
+-		/*
+-		 * Newly created kthread was parked when the CPU was offline.
+-		 * The binding was lost and we need to set it again.
+-		 */
+-		if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
+-			__kthread_bind(k, kthread->cpu, TASK_PARKED);
+-		wake_up_state(k, TASK_PARKED);
+-	}
++	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
++		__kthread_bind(k, kthread->cpu, TASK_PARKED);
++
++	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
++	wake_up_state(k, TASK_PARKED);
+ }
+ EXPORT_SYMBOL_GPL(kthread_unpark);
+ 
+@@ -488,12 +483,13 @@ int kthread_park(struct task_struct *k)
+ 	if (WARN_ON(k->flags & PF_EXITING))
+ 		return -ENOSYS;
+ 
+-	if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
+-		set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+-		if (k != current) {
+-			wake_up_process(k);
+-			wait_for_completion(&kthread->parked);
+-		}
++	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
++		return -EBUSY;
++
++	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
++	if (k != current) {
++		wake_up_process(k);
++		wait_for_completion(&kthread->parked);
+ 	}
+ 
+ 	return 0;
+diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
+index e795908f3607..a90336779375 100644
+--- a/kernel/locking/rwsem-xadd.c
++++ b/kernel/locking/rwsem-xadd.c
+@@ -352,16 +352,15 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
+ 	struct task_struct *owner;
+ 	bool ret = true;
+ 
++	BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN));
++
+ 	if (need_resched())
+ 		return false;
+ 
+ 	rcu_read_lock();
+ 	owner = READ_ONCE(sem->owner);
+-	if (!rwsem_owner_is_writer(owner)) {
+-		/*
+-		 * Don't spin if the rwsem is readers owned.
+-		 */
+-		ret = !rwsem_owner_is_reader(owner);
++	if (!owner || !is_rwsem_owner_spinnable(owner)) {
++		ret = !owner;	/* !owner is spinnable */
+ 		goto done;
+ 	}
+ 
+@@ -382,11 +381,11 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
+ {
+ 	struct task_struct *owner = READ_ONCE(sem->owner);
+ 
+-	if (!rwsem_owner_is_writer(owner))
+-		goto out;
++	if (!is_rwsem_owner_spinnable(owner))
++		return false;
+ 
+ 	rcu_read_lock();
+-	while (sem->owner == owner) {
++	while (owner && (READ_ONCE(sem->owner) == owner)) {
+ 		/*
+ 		 * Ensure we emit the owner->on_cpu, dereference _after_
+ 		 * checking sem->owner still matches owner, if that fails,
+@@ -408,12 +407,12 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
+ 		cpu_relax();
+ 	}
+ 	rcu_read_unlock();
+-out:
++
+ 	/*
+ 	 * If there is a new owner or the owner is not set, we continue
+ 	 * spinning.
+ 	 */
+-	return !rwsem_owner_is_reader(READ_ONCE(sem->owner));
++	return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
+ }
+ 
+ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
+diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
+index f549c552dbf1..abbf506b1c72 100644
+--- a/kernel/locking/rwsem.c
++++ b/kernel/locking/rwsem.c
+@@ -217,5 +217,3 @@ void up_read_non_owner(struct rw_semaphore *sem)
+ EXPORT_SYMBOL(up_read_non_owner);
+ 
+ #endif
+-
+-
+diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
+index a883b8f1fdc6..410ee7b9ac2c 100644
+--- a/kernel/locking/rwsem.h
++++ b/kernel/locking/rwsem.h
+@@ -1,20 +1,24 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ /*
+  * The owner field of the rw_semaphore structure will be set to
+- * RWSEM_READ_OWNED when a reader grabs the lock. A writer will clear
++ * RWSEM_READER_OWNED when a reader grabs the lock. A writer will clear
+  * the owner field when it unlocks. A reader, on the other hand, will
+  * not touch the owner field when it unlocks.
+  *
+- * In essence, the owner field now has the following 3 states:
++ * In essence, the owner field now has the following 4 states:
+  *  1) 0
+  *     - lock is free or the owner hasn't set the field yet
+  *  2) RWSEM_READER_OWNED
+  *     - lock is currently or previously owned by readers (lock is free
+  *       or not set by owner yet)
+- *  3) Other non-zero value
+- *     - a writer owns the lock
++ *  3) RWSEM_ANONYMOUSLY_OWNED bit set with some other bits set as well
++ *     - lock is owned by an anonymous writer, so spinning on the lock
++ *       owner should be disabled.
++ *  4) Other non-zero value
++ *     - a writer owns the lock and other writers can spin on the lock owner.
+  */
+-#define RWSEM_READER_OWNED	((struct task_struct *)1UL)
++#define RWSEM_ANONYMOUSLY_OWNED	(1UL << 0)
++#define RWSEM_READER_OWNED	((struct task_struct *)RWSEM_ANONYMOUSLY_OWNED)
+ 
+ #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+ /*
+@@ -45,14 +49,22 @@ static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
+ 		WRITE_ONCE(sem->owner, RWSEM_READER_OWNED);
+ }
+ 
+-static inline bool rwsem_owner_is_writer(struct task_struct *owner)
++/*
++ * Return true if the a rwsem waiter can spin on the rwsem's owner
++ * and steal the lock, i.e. the lock is not anonymously owned.
++ * N.B. !owner is considered spinnable.
++ */
++static inline bool is_rwsem_owner_spinnable(struct task_struct *owner)
+ {
+-	return owner && owner != RWSEM_READER_OWNED;
++	return !((unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED);
+ }
+ 
+-static inline bool rwsem_owner_is_reader(struct task_struct *owner)
++/*
++ * Return true if rwsem is owned by an anonymous writer or readers.
++ */
++static inline bool rwsem_has_anonymous_owner(struct task_struct *owner)
+ {
+-	return owner == RWSEM_READER_OWNED;
++	return (unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED;
+ }
+ #else
+ static inline void rwsem_set_owner(struct rw_semaphore *sem)
+diff --git a/kernel/module.c b/kernel/module.c
+index bbb45c038321..c3cc1f8615e1 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -3521,6 +3521,11 @@ static noinline int do_init_module(struct module *mod)
+ 	 * walking this with preempt disabled.  In all the failure paths, we
+ 	 * call synchronize_sched(), but we don't want to slow down the success
+ 	 * path, so use actual RCU here.
++	 * Note that module_alloc() on most architectures creates W+X page
++	 * mappings which won't be cleaned up until do_free_init() runs.  Any
++	 * code such as mark_rodata_ro() which depends on those mappings to
++	 * be cleaned up needs to sync with the queued work - ie
++	 * rcu_barrier_sched()
+ 	 */
+ 	call_rcu_sched(&freeinit->rcu, do_free_init);
+ 	mutex_unlock(&module_mutex);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 5f37ef9f6cd5..ce2716bccc8e 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -30,6 +30,8 @@
+ #include <linux/syscalls.h>
+ #include <linux/sched/isolation.h>
+ 
++#include <linux/kthread.h>
++
+ #include <asm/switch_to.h>
+ #include <asm/tlb.h>
+ #ifdef CONFIG_PARAVIRT
+@@ -2733,20 +2735,28 @@ static struct rq *finish_task_switch(struct task_struct *prev)
+ 		membarrier_mm_sync_core_before_usermode(mm);
+ 		mmdrop(mm);
+ 	}
+-	if (unlikely(prev_state == TASK_DEAD)) {
+-		if (prev->sched_class->task_dead)
+-			prev->sched_class->task_dead(prev);
++	if (unlikely(prev_state & (TASK_DEAD|TASK_PARKED))) {
++		switch (prev_state) {
++		case TASK_DEAD:
++			if (prev->sched_class->task_dead)
++				prev->sched_class->task_dead(prev);
+ 
+-		/*
+-		 * Remove function-return probe instances associated with this
+-		 * task and put them back on the free list.
+-		 */
+-		kprobe_flush_task(prev);
++			/*
++			 * Remove function-return probe instances associated with this
++			 * task and put them back on the free list.
++			 */
++			kprobe_flush_task(prev);
++
++			/* Task is done with its stack. */
++			put_task_stack(prev);
+ 
+-		/* Task is done with its stack. */
+-		put_task_stack(prev);
++			put_task_struct(prev);
++			break;
+ 
+-		put_task_struct(prev);
++		case TASK_PARKED:
++			kthread_park_complete(prev);
++			break;
++		}
+ 	}
+ 
+ 	tick_nohz_task_switch();
+@@ -3449,23 +3459,8 @@ static void __sched notrace __schedule(bool preempt)
+ 
+ void __noreturn do_task_dead(void)
+ {
+-	/*
+-	 * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
+-	 * when the following two conditions become true.
+-	 *   - There is race condition of mmap_sem (It is acquired by
+-	 *     exit_mm()), and
+-	 *   - SMI occurs before setting TASK_RUNINNG.
+-	 *     (or hypervisor of virtual machine switches to other guest)
+-	 *  As a result, we may become TASK_RUNNING after becoming TASK_DEAD
+-	 *
+-	 * To avoid it, we have to wait for releasing tsk->pi_lock which
+-	 * is held by try_to_wake_up()
+-	 */
+-	raw_spin_lock_irq(&current->pi_lock);
+-	raw_spin_unlock_irq(&current->pi_lock);
+-
+ 	/* Causes final put_task_struct in finish_task_switch(): */
+-	__set_current_state(TASK_DEAD);
++	set_special_state(TASK_DEAD);
+ 
+ 	/* Tell freezer to ignore us: */
+ 	current->flags |= PF_NOFREEZE;
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 9df09782025c..a6b6b45a0c68 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -1121,7 +1121,7 @@ extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
+  * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
+  * So, overflow is not an issue here.
+  */
+-u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
++static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
+ {
+ 	u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
+ 	u64 u_act;
+@@ -2723,8 +2723,6 @@ bool dl_cpu_busy(unsigned int cpu)
+ #endif
+ 
+ #ifdef CONFIG_SCHED_DEBUG
+-extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
+-
+ void print_dl_stats(struct seq_file *m, int cpu)
+ {
+ 	print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 84bf1a24a55a..cf52bf16aa7e 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -2691,8 +2691,6 @@ int sched_rr_handler(struct ctl_table *table, int write,
+ }
+ 
+ #ifdef CONFIG_SCHED_DEBUG
+-extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
+-
+ void print_rt_stats(struct seq_file *m, int cpu)
+ {
+ 	rt_rq_iter_t iter;
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index fb5fc458547f..b0c98ff56071 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1986,8 +1986,9 @@ extern bool sched_debug_enabled;
+ extern void print_cfs_stats(struct seq_file *m, int cpu);
+ extern void print_rt_stats(struct seq_file *m, int cpu);
+ extern void print_dl_stats(struct seq_file *m, int cpu);
+-extern void
+-print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
++extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
++extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
++extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
+ #ifdef CONFIG_NUMA_BALANCING
+ extern void
+ show_numa_stats(struct task_struct *p, struct seq_file *m);
+diff --git a/kernel/signal.c b/kernel/signal.c
+index c6e4c83dc090..365aacb46aa6 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -1961,14 +1961,27 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
+ 			return;
+ 	}
+ 
++	set_special_state(TASK_TRACED);
++
+ 	/*
+ 	 * We're committing to trapping.  TRACED should be visible before
+ 	 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
+ 	 * Also, transition to TRACED and updates to ->jobctl should be
+ 	 * atomic with respect to siglock and should be done after the arch
+ 	 * hook as siglock is released and regrabbed across it.
++	 *
++	 *     TRACER				    TRACEE
++	 *
++	 *     ptrace_attach()
++	 * [L]   wait_on_bit(JOBCTL_TRAPPING)	[S] set_special_state(TRACED)
++	 *     do_wait()
++	 *       set_current_state()                smp_wmb();
++	 *       ptrace_do_wait()
++	 *         wait_task_stopped()
++	 *           task_stopped_code()
++	 * [L]         task_is_traced()		[S] task_clear_jobctl_trapping();
+ 	 */
+-	set_current_state(TASK_TRACED);
++	smp_wmb();
+ 
+ 	current->last_siginfo = info;
+ 	current->exit_code = exit_code;
+@@ -2176,7 +2189,7 @@ static bool do_signal_stop(int signr)
+ 		if (task_participate_group_stop(current))
+ 			notify = CLD_STOPPED;
+ 
+-		__set_current_state(TASK_STOPPED);
++		set_special_state(TASK_STOPPED);
+ 		spin_unlock_irq(&current->sighand->siglock);
+ 
+ 		/*
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index b7591261652d..64c0291b579c 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -21,6 +21,7 @@
+ #include <linux/smpboot.h>
+ #include <linux/atomic.h>
+ #include <linux/nmi.h>
++#include <linux/sched/wake_q.h>
+ 
+ /*
+  * Structure to determine completion condition and record errors.  May
+@@ -65,27 +66,31 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done)
+ }
+ 
+ static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
+-					struct cpu_stop_work *work)
++					struct cpu_stop_work *work,
++					struct wake_q_head *wakeq)
+ {
+ 	list_add_tail(&work->list, &stopper->works);
+-	wake_up_process(stopper->thread);
++	wake_q_add(wakeq, stopper->thread);
+ }
+ 
+ /* queue @work to @stopper.  if offline, @work is completed immediately */
+ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
+ {
+ 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
++	DEFINE_WAKE_Q(wakeq);
+ 	unsigned long flags;
+ 	bool enabled;
+ 
+ 	spin_lock_irqsave(&stopper->lock, flags);
+ 	enabled = stopper->enabled;
+ 	if (enabled)
+-		__cpu_stop_queue_work(stopper, work);
++		__cpu_stop_queue_work(stopper, work, &wakeq);
+ 	else if (work->done)
+ 		cpu_stop_signal_done(work->done);
+ 	spin_unlock_irqrestore(&stopper->lock, flags);
+ 
++	wake_up_q(&wakeq);
++
+ 	return enabled;
+ }
+ 
+@@ -229,6 +234,7 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
+ {
+ 	struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
+ 	struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
++	DEFINE_WAKE_Q(wakeq);
+ 	int err;
+ retry:
+ 	spin_lock_irq(&stopper1->lock);
+@@ -252,8 +258,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
+ 			goto unlock;
+ 
+ 	err = 0;
+-	__cpu_stop_queue_work(stopper1, work1);
+-	__cpu_stop_queue_work(stopper2, work2);
++	__cpu_stop_queue_work(stopper1, work1, &wakeq);
++	__cpu_stop_queue_work(stopper2, work2, &wakeq);
+ unlock:
+ 	spin_unlock(&stopper2->lock);
+ 	spin_unlock_irq(&stopper1->lock);
+@@ -263,6 +269,9 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
+ 			cpu_relax();
+ 		goto retry;
+ 	}
++
++	wake_up_q(&wakeq);
++
+ 	return err;
+ }
+ /**
+diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
+index 5985a25e6cbc..5367ffa5c18f 100644
+--- a/lib/find_bit_benchmark.c
++++ b/lib/find_bit_benchmark.c
+@@ -132,7 +132,12 @@ static int __init find_bit_test(void)
+ 	test_find_next_bit(bitmap, BITMAP_LEN);
+ 	test_find_next_zero_bit(bitmap, BITMAP_LEN);
+ 	test_find_last_bit(bitmap, BITMAP_LEN);
+-	test_find_first_bit(bitmap, BITMAP_LEN);
++
++	/*
++	 * test_find_first_bit() may take some time, so
++	 * traverse only part of bitmap to avoid soft lockup.
++	 */
++	test_find_first_bit(bitmap, BITMAP_LEN / 10);
+ 	test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
+ 
+ 	pr_err("\nStart testing find_bit() with sparse bitmap\n");
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 88719f53ae3b..b1b13c214e95 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2192,7 +2192,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
+ {
+ 	struct memcg_kmem_cache_create_work *cw;
+ 
+-	cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
++	cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
+ 	if (!cw)
+ 		return;
+ 
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index 3726dc797847..a57788b0082e 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -669,7 +669,7 @@ static void vti6_link_config(struct ip6_tnl *t, bool keep_mtu)
+ 	else
+ 		mtu = ETH_DATA_LEN - LL_MAX_HEADER - sizeof(struct ipv6hdr);
+ 
+-	dev->mtu = max_t(int, mtu, IPV6_MIN_MTU);
++	dev->mtu = max_t(int, mtu, IPV4_MIN_MTU);
+ }
+ 
+ /**
+@@ -881,7 +881,7 @@ static void vti6_dev_setup(struct net_device *dev)
+ 	dev->priv_destructor = vti6_dev_free;
+ 
+ 	dev->type = ARPHRD_TUNNEL6;
+-	dev->min_mtu = IPV6_MIN_MTU;
++	dev->min_mtu = IPV4_MIN_MTU;
+ 	dev->max_mtu = IP_MAX_MTU - sizeof(struct ipv6hdr);
+ 	dev->flags |= IFF_NOARP;
+ 	dev->addr_len = sizeof(struct in6_addr);
+diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
+index d395d1590699..94b2d286a1b2 100644
+--- a/net/ipv6/netfilter/Kconfig
++++ b/net/ipv6/netfilter/Kconfig
+@@ -48,6 +48,34 @@ config NFT_CHAIN_ROUTE_IPV6
+ 	  fields such as the source, destination, flowlabel, hop-limit and
+ 	  the packet mark.
+ 
++if NF_NAT_IPV6
++
++config NFT_CHAIN_NAT_IPV6
++	tristate "IPv6 nf_tables nat chain support"
++	help
++	  This option enables the "nat" chain for IPv6 in nf_tables. This
++	  chain type is used to perform Network Address Translation (NAT)
++	  packet transformations such as the source, destination address and
++	  source and destination ports.
++
++config NFT_MASQ_IPV6
++	tristate "IPv6 masquerade support for nf_tables"
++	depends on NFT_MASQ
++	select NF_NAT_MASQUERADE_IPV6
++	help
++	  This is the expression that provides IPv4 masquerading support for
++	  nf_tables.
++
++config NFT_REDIR_IPV6
++	tristate "IPv6 redirect support for nf_tables"
++	depends on NFT_REDIR
++	select NF_NAT_REDIRECT
++	help
++	  This is the expression that provides IPv4 redirect support for
++	  nf_tables.
++
++endif # NF_NAT_IPV6
++
+ config NFT_REJECT_IPV6
+ 	select NF_REJECT_IPV6
+ 	default NFT_REJECT
+@@ -107,39 +135,12 @@ config NF_NAT_IPV6
+ 
+ if NF_NAT_IPV6
+ 
+-config NFT_CHAIN_NAT_IPV6
+-	depends on NF_TABLES_IPV6
+-	tristate "IPv6 nf_tables nat chain support"
+-	help
+-	  This option enables the "nat" chain for IPv6 in nf_tables. This
+-	  chain type is used to perform Network Address Translation (NAT)
+-	  packet transformations such as the source, destination address and
+-	  source and destination ports.
+-
+ config NF_NAT_MASQUERADE_IPV6
+ 	tristate "IPv6 masquerade support"
+ 	help
+ 	  This is the kernel functionality to provide NAT in the masquerade
+ 	  flavour (automatic source address selection) for IPv6.
+ 
+-config NFT_MASQ_IPV6
+-	tristate "IPv6 masquerade support for nf_tables"
+-	depends on NF_TABLES_IPV6
+-	depends on NFT_MASQ
+-	select NF_NAT_MASQUERADE_IPV6
+-	help
+-	  This is the expression that provides IPv4 masquerading support for
+-	  nf_tables.
+-
+-config NFT_REDIR_IPV6
+-	tristate "IPv6 redirect support for nf_tables"
+-	depends on NF_TABLES_IPV6
+-	depends on NFT_REDIR
+-	select NF_NAT_REDIRECT
+-	help
+-	  This is the expression that provides IPv4 redirect support for
+-	  nf_tables.
+-
+ endif # NF_NAT_IPV6
+ 
+ config IP6_NF_IPTABLES
+diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
+index 595c662a61e8..ac4295296514 100644
+--- a/net/mac80211/agg-tx.c
++++ b/net/mac80211/agg-tx.c
+@@ -8,6 +8,7 @@
+  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+  * Copyright 2007-2010, Intel Corporation
+  * Copyright(c) 2015-2017 Intel Deutschland GmbH
++ * Copyright (C) 2018 Intel Corporation
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 as
+@@ -970,6 +971,9 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
+ 
+ 		sta->ampdu_mlme.addba_req_num[tid] = 0;
+ 
++		tid_tx->timeout =
++			le16_to_cpu(mgmt->u.action.u.addba_resp.timeout);
++
+ 		if (tid_tx->timeout) {
+ 			mod_timer(&tid_tx->session_timer,
+ 				  TU_TO_EXP_TIME(tid_tx->timeout));
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 5f303abac5ad..b2457d560e7a 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -35,6 +35,7 @@
+ #define IEEE80211_AUTH_TIMEOUT		(HZ / 5)
+ #define IEEE80211_AUTH_TIMEOUT_LONG	(HZ / 2)
+ #define IEEE80211_AUTH_TIMEOUT_SHORT	(HZ / 10)
++#define IEEE80211_AUTH_TIMEOUT_SAE	(HZ * 2)
+ #define IEEE80211_AUTH_MAX_TRIES	3
+ #define IEEE80211_AUTH_WAIT_ASSOC	(HZ * 5)
+ #define IEEE80211_ASSOC_TIMEOUT		(HZ / 5)
+@@ -3788,16 +3789,19 @@ static int ieee80211_auth(struct ieee80211_sub_if_data *sdata)
+ 			    tx_flags);
+ 
+ 	if (tx_flags == 0) {
+-		auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
+-		auth_data->timeout_started = true;
+-		run_again(sdata, auth_data->timeout);
++		if (auth_data->algorithm == WLAN_AUTH_SAE)
++			auth_data->timeout = jiffies +
++				IEEE80211_AUTH_TIMEOUT_SAE;
++		else
++			auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
+ 	} else {
+ 		auth_data->timeout =
+ 			round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
+-		auth_data->timeout_started = true;
+-		run_again(sdata, auth_data->timeout);
+ 	}
+ 
++	auth_data->timeout_started = true;
++	run_again(sdata, auth_data->timeout);
++
+ 	return 0;
+ }
+ 
+@@ -3868,8 +3872,15 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
+ 		ifmgd->status_received = false;
+ 		if (ifmgd->auth_data && ieee80211_is_auth(fc)) {
+ 			if (status_acked) {
+-				ifmgd->auth_data->timeout =
+-					jiffies + IEEE80211_AUTH_TIMEOUT_SHORT;
++				if (ifmgd->auth_data->algorithm ==
++				    WLAN_AUTH_SAE)
++					ifmgd->auth_data->timeout =
++						jiffies +
++						IEEE80211_AUTH_TIMEOUT_SAE;
++				else
++					ifmgd->auth_data->timeout =
++						jiffies +
++						IEEE80211_AUTH_TIMEOUT_SHORT;
+ 				run_again(sdata, ifmgd->auth_data->timeout);
+ 			} else {
+ 				ifmgd->auth_data->timeout = jiffies - 1;
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 69722504e3e1..516b63db8d5d 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -4,6 +4,7 @@
+  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
+  * Copyright 2007	Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
++ * Copyright (C) 2018 Intel Corporation
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 as
+@@ -1138,7 +1139,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
+ 	}
+ 
+ 	/* reset session timer */
+-	if (reset_agg_timer && tid_tx->timeout)
++	if (reset_agg_timer)
+ 		tid_tx->last_tx = jiffies;
+ 
+ 	return queued;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index c853386b86ff..e6e6f4ce6322 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5741,7 +5741,7 @@ static void nft_chain_commit_update(struct nft_trans *trans)
+ 	struct nft_base_chain *basechain;
+ 
+ 	if (nft_trans_chain_name(trans))
+-		strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans));
++		swap(trans->ctx.chain->name, nft_trans_chain_name(trans));
+ 
+ 	if (!nft_is_base_chain(trans->ctx.chain))
+ 		return;
+diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
+index eea1d8611b20..13b38ad0fa4a 100644
+--- a/net/rds/ib_cm.c
++++ b/net/rds/ib_cm.c
+@@ -547,7 +547,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
+ 	rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
+ 		 ic->i_send_cq, ic->i_recv_cq);
+ 
+-	return ret;
++	goto out;
+ 
+ sends_out:
+ 	vfree(ic->i_sends);
+@@ -572,6 +572,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
+ 		ic->i_send_cq = NULL;
+ rds_ibdev_out:
+ 	rds_ib_remove_conn(rds_ibdev, conn);
++out:
+ 	rds_ib_dev_put(rds_ibdev);
+ 
+ 	return ret;
+diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
+index 0c9c18aa7c77..cfcedfcccf10 100644
+--- a/net/rxrpc/af_rxrpc.c
++++ b/net/rxrpc/af_rxrpc.c
+@@ -310,7 +310,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
+ 	memset(&cp, 0, sizeof(cp));
+ 	cp.local		= rx->local;
+ 	cp.key			= key;
+-	cp.security_level	= 0;
++	cp.security_level	= rx->min_sec_level;
+ 	cp.exclusive		= false;
+ 	cp.upgrade		= upgrade;
+ 	cp.service_id		= srx->srx_service;
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index 416688381eb7..0aa8c7ff1143 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -464,6 +464,7 @@ enum rxrpc_call_flag {
+ 	RXRPC_CALL_SEND_PING,		/* A ping will need to be sent */
+ 	RXRPC_CALL_PINGING,		/* Ping in process */
+ 	RXRPC_CALL_RETRANS_TIMEOUT,	/* Retransmission due to timeout occurred */
++	RXRPC_CALL_BEGAN_RX_TIMER,	/* We began the expect_rx_by timer */
+ };
+ 
+ /*
+diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
+index 34db634594c4..c01a7fb280cc 100644
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -971,7 +971,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
+ 	if (timo) {
+ 		unsigned long now = jiffies, expect_rx_by;
+ 
+-		expect_rx_by = jiffies + timo;
++		expect_rx_by = now + timo;
+ 		WRITE_ONCE(call->expect_rx_by, expect_rx_by);
+ 		rxrpc_reduce_call_timer(call, expect_rx_by, now,
+ 					rxrpc_timer_set_for_normal);
+diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
+index 38b99db30e54..2af42c7d5b82 100644
+--- a/net/rxrpc/local_object.c
++++ b/net/rxrpc/local_object.c
+@@ -133,22 +133,49 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
+ 		}
+ 	}
+ 
+-	/* we want to receive ICMP errors */
+-	opt = 1;
+-	ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
+-				(char *) &opt, sizeof(opt));
+-	if (ret < 0) {
+-		_debug("setsockopt failed");
+-		goto error;
+-	}
++	switch (local->srx.transport.family) {
++	case AF_INET:
++		/* we want to receive ICMP errors */
++		opt = 1;
++		ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
++					(char *) &opt, sizeof(opt));
++		if (ret < 0) {
++			_debug("setsockopt failed");
++			goto error;
++		}
+ 
+-	/* we want to set the don't fragment bit */
+-	opt = IP_PMTUDISC_DO;
+-	ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
+-				(char *) &opt, sizeof(opt));
+-	if (ret < 0) {
+-		_debug("setsockopt failed");
+-		goto error;
++		/* we want to set the don't fragment bit */
++		opt = IP_PMTUDISC_DO;
++		ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
++					(char *) &opt, sizeof(opt));
++		if (ret < 0) {
++			_debug("setsockopt failed");
++			goto error;
++		}
++		break;
++
++	case AF_INET6:
++		/* we want to receive ICMP errors */
++		opt = 1;
++		ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
++					(char *) &opt, sizeof(opt));
++		if (ret < 0) {
++			_debug("setsockopt failed");
++			goto error;
++		}
++
++		/* we want to set the don't fragment bit */
++		opt = IPV6_PMTUDISC_DO;
++		ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
++					(char *) &opt, sizeof(opt));
++		if (ret < 0) {
++			_debug("setsockopt failed");
++			goto error;
++		}
++		break;
++
++	default:
++		BUG();
+ 	}
+ 
+ 	/* set the socket up */
+diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
+index cf73dc006c3b..8787ff39e4f8 100644
+--- a/net/rxrpc/output.c
++++ b/net/rxrpc/output.c
+@@ -407,6 +407,17 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
+ 							rxrpc_timer_set_for_lost_ack);
+ 			}
+ 		}
++
++		if (sp->hdr.seq == 1 &&
++		    !test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER,
++				      &call->flags)) {
++			unsigned long nowj = jiffies, expect_rx_by;
++
++			expect_rx_by = nowj + call->next_rx_timo;
++			WRITE_ONCE(call->expect_rx_by, expect_rx_by);
++			rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
++						rxrpc_timer_set_for_normal);
++		}
+ 	}
+ 
+ 	rxrpc_set_keepalive(call);
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 7a94ce92ffdc..28f9e1584ff3 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -223,6 +223,15 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
+ 
+ 	ret = rxrpc_send_data_packet(call, skb, false);
+ 	if (ret < 0) {
++		switch (ret) {
++		case -ENETUNREACH:
++		case -EHOSTUNREACH:
++		case -ECONNREFUSED:
++			rxrpc_set_call_completion(call,
++						  RXRPC_CALL_LOCAL_ERROR,
++						  0, ret);
++			goto out;
++		}
+ 		_debug("need instant resend %d", ret);
+ 		rxrpc_instant_resend(call, ix);
+ 	} else {
+@@ -241,6 +250,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
+ 					rxrpc_timer_set_for_send);
+ 	}
+ 
++out:
+ 	rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ 	_leave("");
+ }
+diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
+index 5a3f691bb545..c8ba29535919 100644
+--- a/net/sched/act_skbedit.c
++++ b/net/sched/act_skbedit.c
+@@ -121,7 +121,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
+ 		return 0;
+ 
+ 	if (!flags) {
+-		tcf_idr_release(*a, bind);
++		if (exists)
++			tcf_idr_release(*a, bind);
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 5a983c9bea53..0132c08b0680 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -1313,8 +1313,11 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
+ 
+ 	smc = smc_sk(sk);
+ 	lock_sock(sk);
+-	if (sk->sk_state != SMC_ACTIVE)
++	if (sk->sk_state != SMC_ACTIVE) {
++		release_sock(sk);
+ 		goto out;
++	}
++	release_sock(sk);
+ 	if (smc->use_fallback)
+ 		rc = kernel_sendpage(smc->clcsock, page, offset,
+ 				     size, flags);
+@@ -1322,7 +1325,6 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
+ 		rc = sock_no_sendpage(sock, page, offset, size, flags);
+ 
+ out:
+-	release_sock(sk);
+ 	return rc;
+ }
+ 
+diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
+index d5f95bb39300..5679b5374dfb 100644
+--- a/net/sunrpc/xprtrdma/fmr_ops.c
++++ b/net/sunrpc/xprtrdma/fmr_ops.c
+@@ -72,6 +72,7 @@ fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
+ 	if (IS_ERR(mr->fmr.fm_mr))
+ 		goto out_fmr_err;
+ 
++	INIT_LIST_HEAD(&mr->mr_list);
+ 	return 0;
+ 
+ out_fmr_err:
+@@ -102,10 +103,6 @@ fmr_op_release_mr(struct rpcrdma_mr *mr)
+ 	LIST_HEAD(unmap_list);
+ 	int rc;
+ 
+-	/* Ensure MW is not on any rl_registered list */
+-	if (!list_empty(&mr->mr_list))
+-		list_del(&mr->mr_list);
+-
+ 	kfree(mr->fmr.fm_physaddrs);
+ 	kfree(mr->mr_sg);
+ 
+diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
+index 90f688f19783..4d11dc5190b8 100644
+--- a/net/sunrpc/xprtrdma/frwr_ops.c
++++ b/net/sunrpc/xprtrdma/frwr_ops.c
+@@ -110,6 +110,7 @@ frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
+ 	if (!mr->mr_sg)
+ 		goto out_list_err;
+ 
++	INIT_LIST_HEAD(&mr->mr_list);
+ 	sg_init_table(mr->mr_sg, depth);
+ 	init_completion(&frwr->fr_linv_done);
+ 	return 0;
+@@ -133,10 +134,6 @@ frwr_op_release_mr(struct rpcrdma_mr *mr)
+ {
+ 	int rc;
+ 
+-	/* Ensure MR is not on any rl_registered list */
+-	if (!list_empty(&mr->mr_list))
+-		list_del(&mr->mr_list);
+-
+ 	rc = ib_dereg_mr(mr->frwr.fr_mr);
+ 	if (rc)
+ 		pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
+@@ -195,7 +192,7 @@ frwr_op_recover_mr(struct rpcrdma_mr *mr)
+ 	return;
+ 
+ out_release:
+-	pr_err("rpcrdma: FRWR reset failed %d, %p release\n", rc, mr);
++	pr_err("rpcrdma: FRWR reset failed %d, %p released\n", rc, mr);
+ 	r_xprt->rx_stats.mrs_orphaned++;
+ 
+ 	spin_lock(&r_xprt->rx_buf.rb_mrlock);
+@@ -458,7 +455,7 @@ frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
+ 
+ 	list_for_each_entry(mr, mrs, mr_list)
+ 		if (mr->mr_handle == rep->rr_inv_rkey) {
+-			list_del(&mr->mr_list);
++			list_del_init(&mr->mr_list);
+ 			trace_xprtrdma_remoteinv(mr);
+ 			mr->frwr.fr_state = FRWR_IS_INVALID;
+ 			rpcrdma_mr_unmap_and_put(mr);
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 25b0ecbd37e2..20ad7bc1021c 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -1244,6 +1244,11 @@ rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
+ 		list_del(&mr->mr_all);
+ 
+ 		spin_unlock(&buf->rb_mrlock);
++
++		/* Ensure MW is not on any rl_registered list */
++		if (!list_empty(&mr->mr_list))
++			list_del(&mr->mr_list);
++
+ 		ia->ri_ops->ro_release_mr(mr);
+ 		count++;
+ 		spin_lock(&buf->rb_mrlock);
+diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
+index 430a6de8300e..99c96bf33fce 100644
+--- a/net/sunrpc/xprtrdma/xprt_rdma.h
++++ b/net/sunrpc/xprtrdma/xprt_rdma.h
+@@ -381,7 +381,7 @@ rpcrdma_mr_pop(struct list_head *list)
+ 	struct rpcrdma_mr *mr;
+ 
+ 	mr = list_first_entry(list, struct rpcrdma_mr, mr_list);
+-	list_del(&mr->mr_list);
++	list_del_init(&mr->mr_list);
+ 	return mr;
+ }
+ 
+diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
+index 32dc33a94bc7..5453e564da82 100644
+--- a/net/tipc/monitor.c
++++ b/net/tipc/monitor.c
+@@ -777,7 +777,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
+ 
+ 	ret = tipc_bearer_get_name(net, bearer_name, bearer_id);
+ 	if (ret || !mon)
+-		return -EINVAL;
++		return 0;
+ 
+ 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
+ 			  NLM_F_MULTI, TIPC_NL_MON_GET);
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 9036d8756e73..63f621e13d63 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -1863,6 +1863,7 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
+ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
+ {
+ 	struct net *net = genl_info_net(info);
++	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
+ 	struct tipc_nl_msg msg;
+ 	char *name;
+ 	int err;
+@@ -1870,9 +1871,19 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
+ 	msg.portid = info->snd_portid;
+ 	msg.seq = info->snd_seq;
+ 
+-	if (!info->attrs[TIPC_NLA_LINK_NAME])
++	if (!info->attrs[TIPC_NLA_LINK])
+ 		return -EINVAL;
+-	name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
++
++	err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
++			       info->attrs[TIPC_NLA_LINK],
++			       tipc_nl_link_policy, info->extack);
++	if (err)
++		return err;
++
++	if (!attrs[TIPC_NLA_LINK_NAME])
++		return -EINVAL;
++
++	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
+ 
+ 	msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ 	if (!msg.skb)
+@@ -2145,8 +2156,8 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
+ 	struct net *net = sock_net(skb->sk);
+ 	u32 prev_bearer = cb->args[0];
+ 	struct tipc_nl_msg msg;
++	int bearer_id;
+ 	int err;
+-	int i;
+ 
+ 	if (prev_bearer == MAX_BEARERS)
+ 		return 0;
+@@ -2156,16 +2167,13 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
+ 	msg.seq = cb->nlh->nlmsg_seq;
+ 
+ 	rtnl_lock();
+-	for (i = prev_bearer; i < MAX_BEARERS; i++) {
+-		prev_bearer = i;
+-		err = __tipc_nl_add_monitor(net, &msg, prev_bearer);
++	for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
++		err = __tipc_nl_add_monitor(net, &msg, bearer_id);
+ 		if (err)
+-			goto out;
++			break;
+ 	}
+-
+-out:
+ 	rtnl_unlock();
+-	cb->args[0] = prev_bearer;
++	cb->args[0] = bearer_id;
+ 
+ 	return skb->len;
+ }
+diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
+index b2a95af7df18..7f5c86246138 100644
+--- a/scripts/Makefile.gcc-plugins
++++ b/scripts/Makefile.gcc-plugins
+@@ -14,7 +14,7 @@ ifdef CONFIG_GCC_PLUGINS
+   endif
+ 
+   ifdef CONFIG_GCC_PLUGIN_SANCOV
+-    ifeq ($(CFLAGS_KCOV),)
++    ifeq ($(strip $(CFLAGS_KCOV)),)
+       # It is needed because of the gcc-plugin.sh and gcc version checks.
+       gcc-plugin-$(CONFIG_GCC_PLUGIN_SANCOV)           += sancov_plugin.so
+ 
+diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
+index 44062bb7bf2f..f53922f4ee4e 100644
+--- a/sound/soc/codecs/msm8916-wcd-analog.c
++++ b/sound/soc/codecs/msm8916-wcd-analog.c
+@@ -1185,7 +1185,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
+ 		return irq;
+ 	}
+ 
+-	ret = devm_request_irq(dev, irq, pm8916_mbhc_switch_irq_handler,
++	ret = devm_request_threaded_irq(dev, irq, NULL,
++			       pm8916_mbhc_switch_irq_handler,
+ 			       IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ 			       IRQF_ONESHOT,
+ 			       "mbhc switch irq", priv);
+@@ -1199,7 +1200,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
+ 			return irq;
+ 		}
+ 
+-		ret = devm_request_irq(dev, irq, mbhc_btn_press_irq_handler,
++		ret = devm_request_threaded_irq(dev, irq, NULL,
++				       mbhc_btn_press_irq_handler,
+ 				       IRQF_TRIGGER_RISING |
+ 				       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ 				       "mbhc btn press irq", priv);
+@@ -1212,7 +1214,8 @@ static int pm8916_wcd_analog_spmi_probe(struct platform_device *pdev)
+ 			return irq;
+ 		}
+ 
+-		ret = devm_request_irq(dev, irq, mbhc_btn_release_irq_handler,
++		ret = devm_request_threaded_irq(dev, irq, NULL,
++				       mbhc_btn_release_irq_handler,
+ 				       IRQF_TRIGGER_RISING |
+ 				       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ 				       "mbhc btn release irq", priv);
+diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
+index 198df016802f..74cb1d28e0f4 100644
+--- a/sound/soc/codecs/rt5514.c
++++ b/sound/soc/codecs/rt5514.c
+@@ -89,6 +89,7 @@ static const struct reg_default rt5514_reg[] = {
+ 	{RT5514_PLL3_CALIB_CTRL5,	0x40220012},
+ 	{RT5514_DELAY_BUF_CTRL1,	0x7fff006a},
+ 	{RT5514_DELAY_BUF_CTRL3,	0x00000000},
++	{RT5514_ASRC_IN_CTRL1,		0x00000003},
+ 	{RT5514_DOWNFILTER0_CTRL1,	0x00020c2f},
+ 	{RT5514_DOWNFILTER0_CTRL2,	0x00020c2f},
+ 	{RT5514_DOWNFILTER0_CTRL3,	0x10000362},
+@@ -181,6 +182,7 @@ static bool rt5514_readable_register(struct device *dev, unsigned int reg)
+ 	case RT5514_PLL3_CALIB_CTRL5:
+ 	case RT5514_DELAY_BUF_CTRL1:
+ 	case RT5514_DELAY_BUF_CTRL3:
++	case RT5514_ASRC_IN_CTRL1:
+ 	case RT5514_DOWNFILTER0_CTRL1:
+ 	case RT5514_DOWNFILTER0_CTRL2:
+ 	case RT5514_DOWNFILTER0_CTRL3:
+@@ -238,6 +240,7 @@ static bool rt5514_i2c_readable_register(struct device *dev,
+ 	case RT5514_DSP_MAPPING | RT5514_PLL3_CALIB_CTRL5:
+ 	case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL1:
+ 	case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL3:
++	case RT5514_DSP_MAPPING | RT5514_ASRC_IN_CTRL1:
+ 	case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL1:
+ 	case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL2:
+ 	case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL3:
+diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
+index ceb105cbd461..addac2a8e52a 100644
+--- a/sound/soc/intel/Kconfig
++++ b/sound/soc/intel/Kconfig
+@@ -72,24 +72,28 @@ config SND_SOC_INTEL_BAYTRAIL
+ 	  for Baytrail Chromebooks but this option is now deprecated and is
+ 	  not recommended, use SND_SST_ATOM_HIFI2_PLATFORM instead.
+ 
++config SND_SST_ATOM_HIFI2_PLATFORM
++	tristate
++	select SND_SOC_COMPRESS
++
+ config SND_SST_ATOM_HIFI2_PLATFORM_PCI
+-	tristate "PCI HiFi2 (Medfield, Merrifield) Platforms"
++	tristate "PCI HiFi2 (Merrifield) Platforms"
+ 	depends on X86 && PCI
+ 	select SND_SST_IPC_PCI
+-	select SND_SOC_COMPRESS
++	select SND_SST_ATOM_HIFI2_PLATFORM
+ 	help
+-	  If you have a Intel Medfield or Merrifield/Edison platform, then
++	  If you have a Intel Merrifield/Edison platform, then
+ 	  enable this option by saying Y or m. Distros will typically not
+-	  enable this option: Medfield devices are not available to
+-	  developers and while Merrifield/Edison can run a mainline kernel with
+-	  limited functionality it will require a firmware file which
+-	  is not in the standard firmware tree
++	  enable this option: while Merrifield/Edison can run a mainline
++	  kernel with limited functionality it will require a firmware file
++	  which is not in the standard firmware tree
+ 
+-config SND_SST_ATOM_HIFI2_PLATFORM
++config SND_SST_ATOM_HIFI2_PLATFORM_ACPI
+ 	tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms"
++	default ACPI
+ 	depends on X86 && ACPI
+ 	select SND_SST_IPC_ACPI
+-	select SND_SOC_COMPRESS
++	select SND_SST_ATOM_HIFI2_PLATFORM
+ 	select SND_SOC_ACPI_INTEL_MATCH
+ 	select IOSF_MBI
+ 	help
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index e5049fbfc4f1..30cdad2eab7f 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -510,7 +510,7 @@ static void remove_widget(struct snd_soc_component *comp,
+ 	 */
+ 	if (dobj->widget.kcontrol_type == SND_SOC_TPLG_TYPE_ENUM) {
+ 		/* enumerated widget mixer */
+-		for (i = 0; i < w->num_kcontrols; i++) {
++		for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) {
+ 			struct snd_kcontrol *kcontrol = w->kcontrols[i];
+ 			struct soc_enum *se =
+ 				(struct soc_enum *)kcontrol->private_value;
+@@ -528,7 +528,7 @@ static void remove_widget(struct snd_soc_component *comp,
+ 		kfree(w->kcontrol_news);
+ 	} else {
+ 		/* volume mixer or bytes controls */
+-		for (i = 0; i < w->num_kcontrols; i++) {
++		for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) {
+ 			struct snd_kcontrol *kcontrol = w->kcontrols[i];
+ 
+ 			if (dobj->widget.kcontrol_type
+@@ -2571,7 +2571,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
+ 
+ 			/* match index */
+ 			if (dobj->index != index &&
+-				dobj->index != SND_SOC_TPLG_INDEX_ALL)
++				index != SND_SOC_TPLG_INDEX_ALL)
+ 				continue;
+ 
+ 			switch (dobj->type) {
+diff --git a/tools/bpf/bpf_dbg.c b/tools/bpf/bpf_dbg.c
+index 4f254bcc4423..61b9aa5d6415 100644
+--- a/tools/bpf/bpf_dbg.c
++++ b/tools/bpf/bpf_dbg.c
+@@ -1063,7 +1063,7 @@ static int cmd_load_pcap(char *file)
+ 
+ static int cmd_load(char *arg)
+ {
+-	char *subcmd, *cont, *tmp = strdup(arg);
++	char *subcmd, *cont = NULL, *tmp = strdup(arg);
+ 	int ret = CMD_OK;
+ 
+ 	subcmd = strtok_r(tmp, " ", &cont);
+@@ -1073,7 +1073,10 @@ static int cmd_load(char *arg)
+ 		bpf_reset();
+ 		bpf_reset_breakpoints();
+ 
+-		ret = cmd_load_bpf(cont);
++		if (!cont)
++			ret = CMD_ERR;
++		else
++			ret = cmd_load_bpf(cont);
+ 	} else if (matches(subcmd, "pcap") == 0) {
+ 		ret = cmd_load_pcap(cont);
+ 	} else {
+diff --git a/tools/objtool/arch/x86/include/asm/insn.h b/tools/objtool/arch/x86/include/asm/insn.h
+index b3e32b010ab1..c2c01f84df75 100644
+--- a/tools/objtool/arch/x86/include/asm/insn.h
++++ b/tools/objtool/arch/x86/include/asm/insn.h
+@@ -208,4 +208,22 @@ static inline int insn_offset_immediate(struct insn *insn)
+ 	return insn_offset_displacement(insn) + insn->displacement.nbytes;
+ }
+ 
++#define POP_SS_OPCODE 0x1f
++#define MOV_SREG_OPCODE 0x8e
++
++/*
++ * Intel SDM Vol.3A 6.8.3 states;
++ * "Any single-step trap that would be delivered following the MOV to SS
++ * instruction or POP to SS instruction (because EFLAGS.TF is 1) is
++ * suppressed."
++ * This function returns true if @insn is MOV SS or POP SS. On these
++ * instructions, single stepping is suppressed.
++ */
++static inline int insn_masking_exception(struct insn *insn)
++{
++	return insn->opcode.bytes[0] == POP_SS_OPCODE ||
++		(insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
++		 X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
++}
++
+ #endif /* _ASM_X86_INSN_H */
+diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
+index b9f0a53dfa65..409d9d524bf9 100644
+--- a/tools/perf/util/cs-etm.c
++++ b/tools/perf/util/cs-etm.c
+@@ -212,6 +212,7 @@ static void cs_etm__free(struct perf_session *session)
+ 	for (i = 0; i < aux->num_cpu; i++)
+ 		zfree(&aux->metadata[i]);
+ 
++	thread__zput(aux->unknown_thread);
+ 	zfree(&aux->metadata);
+ 	zfree(&aux);
+ }
+@@ -980,6 +981,23 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
+ 	etm->auxtrace.free = cs_etm__free;
+ 	session->auxtrace = &etm->auxtrace;
+ 
++	etm->unknown_thread = thread__new(999999999, 999999999);
++	if (!etm->unknown_thread)
++		goto err_free_queues;
++
++	/*
++	 * Initialize list node so that at thread__zput() we can avoid
++	 * segmentation fault at list_del_init().
++	 */
++	INIT_LIST_HEAD(&etm->unknown_thread->node);
++
++	err = thread__set_comm(etm->unknown_thread, "unknown", 0);
++	if (err)
++		goto err_delete_thread;
++
++	if (thread__init_map_groups(etm->unknown_thread, etm->machine))
++		goto err_delete_thread;
++
+ 	if (dump_trace) {
+ 		cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
+ 		return 0;
+@@ -994,16 +1012,18 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
+ 
+ 	err = cs_etm__synth_events(etm, session);
+ 	if (err)
+-		goto err_free_queues;
++		goto err_delete_thread;
+ 
+ 	err = auxtrace_queues__process_index(&etm->queues, session);
+ 	if (err)
+-		goto err_free_queues;
++		goto err_delete_thread;
+ 
+ 	etm->data_queued = etm->queues.populated;
+ 
+ 	return 0;
+ 
++err_delete_thread:
++	thread__zput(etm->unknown_thread);
+ err_free_queues:
+ 	auxtrace_queues__free(&etm->queues);
+ 	session->auxtrace = NULL;
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index 57e38fdf0b34..60d0419bd41e 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -539,9 +539,10 @@ static bool pmu_is_uncore(const char *name)
+ 
+ /*
+  *  PMU CORE devices have different name other than cpu in sysfs on some
+- *  platforms. looking for possible sysfs files to identify as core device.
++ *  platforms.
++ *  Looking for possible sysfs files to identify the arm core device.
+  */
+-static int is_pmu_core(const char *name)
++static int is_arm_pmu_core(const char *name)
+ {
+ 	struct stat st;
+ 	char path[PATH_MAX];
+@@ -550,12 +551,6 @@ static int is_pmu_core(const char *name)
+ 	if (!sysfs)
+ 		return 0;
+ 
+-	/* Look for cpu sysfs (x86 and others) */
+-	scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu", sysfs);
+-	if ((stat(path, &st) == 0) &&
+-			(strncmp(name, "cpu", strlen("cpu")) == 0))
+-		return 1;
+-
+ 	/* Look for cpu sysfs (specific to arm) */
+ 	scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/cpus",
+ 				sysfs, name);
+@@ -651,6 +646,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
+ 	struct pmu_events_map *map;
+ 	struct pmu_event *pe;
+ 	const char *name = pmu->name;
++	const char *pname;
+ 
+ 	map = perf_pmu__find_map(pmu);
+ 	if (!map)
+@@ -669,11 +665,9 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
+ 			break;
+ 		}
+ 
+-		if (!is_pmu_core(name)) {
+-			/* check for uncore devices */
+-			if (pe->pmu == NULL)
+-				continue;
+-			if (strncmp(pe->pmu, name, strlen(pe->pmu)))
++		if (!is_arm_pmu_core(name)) {
++			pname = pe->pmu ? pe->pmu : "cpu";
++			if (strncmp(pname, name, strlen(pname)))
+ 				continue;
+ 		}
+ 
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index cc065d4bfafc..902597b0e492 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -2094,16 +2094,14 @@ static bool symbol__read_kptr_restrict(void)
+ 
+ int symbol__annotation_init(void)
+ {
++	if (symbol_conf.init_annotation)
++		return 0;
++
+ 	if (symbol_conf.initialized) {
+ 		pr_err("Annotation needs to be init before symbol__init()\n");
+ 		return -1;
+ 	}
+ 
+-	if (symbol_conf.init_annotation) {
+-		pr_warning("Annotation being initialized multiple times\n");
+-		return 0;
+-	}
+-
+ 	symbol_conf.priv_size += sizeof(struct annotation);
+ 	symbol_conf.init_annotation = true;
+ 	return 0;
+diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
+new file mode 100644
+index 000000000000..c193dce611a2
+--- /dev/null
++++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
+@@ -0,0 +1,44 @@
++#!/bin/sh
++# description: event trigger - test multiple actions on hist trigger
++
++
++do_reset() {
++    reset_trigger
++    echo > set_event
++    clear_trace
++}
++
++fail() { #msg
++    do_reset
++    echo $1
++    exit_fail
++}
++
++if [ ! -f set_event ]; then
++    echo "event tracing is not supported"
++    exit_unsupported
++fi
++
++if [ ! -f synthetic_events ]; then
++    echo "synthetic event is not supported"
++    exit_unsupported
++fi
++
++clear_synthetic_events
++reset_tracer
++do_reset
++
++echo "Test multiple actions on hist trigger"
++echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events
++TRIGGER1=events/sched/sched_wakeup/trigger
++TRIGGER2=events/sched/sched_switch/trigger
++
++echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="cyclictest"' > $TRIGGER1
++echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0 if next_comm=="cyclictest"' >> $TRIGGER2
++echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,next_pid) if next_comm=="cyclictest"' >> $TRIGGER2
++echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,prev_pid) if next_comm=="cyclictest"' >> $TRIGGER2
++echo 'hist:keys=next_pid if next_comm=="cyclictest"' >> $TRIGGER2
++
++do_reset
++
++exit 0
+diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
+index d744991c0f4f..39f66bc29b82 100644
+--- a/tools/testing/selftests/x86/Makefile
++++ b/tools/testing/selftests/x86/Makefile
+@@ -11,7 +11,7 @@ CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
+ 
+ TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt test_mremap_vdso \
+ 			check_initial_reg_state sigreturn iopl mpx-mini-test ioperm \
+-			protection_keys test_vdso test_vsyscall
++			protection_keys test_vdso test_vsyscall mov_ss_trap
+ TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
+ 			test_FCMOV test_FCOMI test_FISTTP \
+ 			vdso_restorer
+diff --git a/tools/testing/selftests/x86/mov_ss_trap.c b/tools/testing/selftests/x86/mov_ss_trap.c
+new file mode 100644
+index 000000000000..3c3a022654f3
+--- /dev/null
++++ b/tools/testing/selftests/x86/mov_ss_trap.c
+@@ -0,0 +1,285 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * mov_ss_trap.c: Exercise the bizarre side effects of a watchpoint on MOV SS
++ *
++ * This does MOV SS from a watchpointed address followed by various
++ * types of kernel entries.  A MOV SS that hits a watchpoint will queue
++ * up a #DB trap but will not actually deliver that trap.  The trap
++ * will be delivered after the next instruction instead.  The CPU's logic
++ * seems to be:
++ *
++ *  - Any fault: drop the pending #DB trap.
++ *  - INT $N, INT3, INTO, SYSCALL, SYSENTER: enter the kernel and then
++ *    deliver #DB.
++ *  - ICEBP: enter the kernel but do not deliver the watchpoint trap
++ *  - breakpoint: only one #DB is delivered (phew!)
++ *
++ * There are plenty of ways for a kernel to handle this incorrectly.  This
++ * test tries to exercise all the cases.
++ *
++ * This should mostly cover CVE-2018-1087 and CVE-2018-8897.
++ */
++#define _GNU_SOURCE
++
++#include <stdlib.h>
++#include <sys/ptrace.h>
++#include <sys/types.h>
++#include <sys/wait.h>
++#include <sys/user.h>
++#include <sys/syscall.h>
++#include <unistd.h>
++#include <errno.h>
++#include <stddef.h>
++#include <stdio.h>
++#include <err.h>
++#include <string.h>
++#include <setjmp.h>
++#include <sys/prctl.h>
++
++#define X86_EFLAGS_RF (1UL << 16)
++
++#if __x86_64__
++# define REG_IP REG_RIP
++#else
++# define REG_IP REG_EIP
++#endif
++
++unsigned short ss;
++extern unsigned char breakpoint_insn[];
++sigjmp_buf jmpbuf;
++static unsigned char altstack_data[SIGSTKSZ];
++
++static void enable_watchpoint(void)
++{
++	pid_t parent = getpid();
++	int status;
++
++	pid_t child = fork();
++	if (child < 0)
++		err(1, "fork");
++
++	if (child) {
++		if (waitpid(child, &status, 0) != child)
++			err(1, "waitpid for child");
++	} else {
++		unsigned long dr0, dr1, dr7;
++
++		dr0 = (unsigned long)&ss;
++		dr1 = (unsigned long)breakpoint_insn;
++		dr7 = ((1UL << 1) |	/* G0 */
++		       (3UL << 16) |	/* RW0 = read or write */
++		       (1UL << 18) |	/* LEN0 = 2 bytes */
++		       (1UL << 3));	/* G1, RW1 = insn */
++
++		if (ptrace(PTRACE_ATTACH, parent, NULL, NULL) != 0)
++			err(1, "PTRACE_ATTACH");
++
++		if (waitpid(parent, &status, 0) != parent)
++			err(1, "waitpid for child");
++
++		if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[0]), dr0) != 0)
++			err(1, "PTRACE_POKEUSER DR0");
++
++		if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[1]), dr1) != 0)
++			err(1, "PTRACE_POKEUSER DR1");
++
++		if (ptrace(PTRACE_POKEUSER, parent, (void *)offsetof(struct user, u_debugreg[7]), dr7) != 0)
++			err(1, "PTRACE_POKEUSER DR7");
++
++		printf("\tDR0 = %lx, DR1 = %lx, DR7 = %lx\n", dr0, dr1, dr7);
++
++		if (ptrace(PTRACE_DETACH, parent, NULL, NULL) != 0)
++			err(1, "PTRACE_DETACH");
++
++		exit(0);
++	}
++}
++
++static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
++		       int flags)
++{
++	struct sigaction sa;
++	memset(&sa, 0, sizeof(sa));
++	sa.sa_sigaction = handler;
++	sa.sa_flags = SA_SIGINFO | flags;
++	sigemptyset(&sa.sa_mask);
++	if (sigaction(sig, &sa, 0))
++		err(1, "sigaction");
++}
++
++static char const * const signames[] = {
++	[SIGSEGV] = "SIGSEGV",
++	[SIGBUS] = "SIBGUS",
++	[SIGTRAP] = "SIGTRAP",
++	[SIGILL] = "SIGILL",
++};
++
++static void sigtrap(int sig, siginfo_t *si, void *ctx_void)
++{
++	ucontext_t *ctx = ctx_void;
++
++	printf("\tGot SIGTRAP with RIP=%lx, EFLAGS.RF=%d\n",
++	       (unsigned long)ctx->uc_mcontext.gregs[REG_IP],
++	       !!(ctx->uc_mcontext.gregs[REG_EFL] & X86_EFLAGS_RF));
++}
++
++static void handle_and_return(int sig, siginfo_t *si, void *ctx_void)
++{
++	ucontext_t *ctx = ctx_void;
++
++	printf("\tGot %s with RIP=%lx\n", signames[sig],
++	       (unsigned long)ctx->uc_mcontext.gregs[REG_IP]);
++}
++
++static void handle_and_longjmp(int sig, siginfo_t *si, void *ctx_void)
++{
++	ucontext_t *ctx = ctx_void;
++
++	printf("\tGot %s with RIP=%lx\n", signames[sig],
++	       (unsigned long)ctx->uc_mcontext.gregs[REG_IP]);
++
++	siglongjmp(jmpbuf, 1);
++}
++
++int main()
++{
++	unsigned long nr;
++
++	asm volatile ("mov %%ss, %[ss]" : [ss] "=m" (ss));
++	printf("\tSS = 0x%hx, &SS = 0x%p\n", ss, &ss);
++
++	if (prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0) == 0)
++		printf("\tPR_SET_PTRACER_ANY succeeded\n");
++
++	printf("\tSet up a watchpoint\n");
++	sethandler(SIGTRAP, sigtrap, 0);
++	enable_watchpoint();
++
++	printf("[RUN]\tRead from watched memory (should get SIGTRAP)\n");
++	asm volatile ("mov %[ss], %[tmp]" : [tmp] "=r" (nr) : [ss] "m" (ss));
++
++	printf("[RUN]\tMOV SS; INT3\n");
++	asm volatile ("mov %[ss], %%ss; int3" :: [ss] "m" (ss));
++
++	printf("[RUN]\tMOV SS; INT 3\n");
++	asm volatile ("mov %[ss], %%ss; .byte 0xcd, 0x3" :: [ss] "m" (ss));
++
++	printf("[RUN]\tMOV SS; CS CS INT3\n");
++	asm volatile ("mov %[ss], %%ss; .byte 0x2e, 0x2e; int3" :: [ss] "m" (ss));
++
++	printf("[RUN]\tMOV SS; CSx14 INT3\n");
++	asm volatile ("mov %[ss], %%ss; .fill 14,1,0x2e; int3" :: [ss] "m" (ss));
++
++	printf("[RUN]\tMOV SS; INT 4\n");
++	sethandler(SIGSEGV, handle_and_return, SA_RESETHAND);
++	asm volatile ("mov %[ss], %%ss; int $4" :: [ss] "m" (ss));
++
++#ifdef __i386__
++	printf("[RUN]\tMOV SS; INTO\n");
++	sethandler(SIGSEGV, handle_and_return, SA_RESETHAND);
++	nr = -1;
++	asm volatile ("add $1, %[tmp]; mov %[ss], %%ss; into"
++		      : [tmp] "+r" (nr) : [ss] "m" (ss));
++#endif
++
++	if (sigsetjmp(jmpbuf, 1) == 0) {
++		printf("[RUN]\tMOV SS; ICEBP\n");
++
++		/* Some emulators (e.g. QEMU TCG) don't emulate ICEBP. */
++		sethandler(SIGILL, handle_and_longjmp, SA_RESETHAND);
++
++		asm volatile ("mov %[ss], %%ss; .byte 0xf1" :: [ss] "m" (ss));
++	}
++
++	if (sigsetjmp(jmpbuf, 1) == 0) {
++		printf("[RUN]\tMOV SS; CLI\n");
++		sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
++		asm volatile ("mov %[ss], %%ss; cli" :: [ss] "m" (ss));
++	}
++
++	if (sigsetjmp(jmpbuf, 1) == 0) {
++		printf("[RUN]\tMOV SS; #PF\n");
++		sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
++		asm volatile ("mov %[ss], %%ss; mov (-1), %[tmp]"
++			      : [tmp] "=r" (nr) : [ss] "m" (ss));
++	}
++
++	/*
++	 * INT $1: if #DB has DPL=3 and there isn't special handling,
++	 * then the kernel will die.
++	 */
++	if (sigsetjmp(jmpbuf, 1) == 0) {
++		printf("[RUN]\tMOV SS; INT 1\n");
++		sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
++		asm volatile ("mov %[ss], %%ss; int $1" :: [ss] "m" (ss));
++	}
++
++#ifdef __x86_64__
++	/*
++	 * In principle, we should test 32-bit SYSCALL as well, but
++	 * the calling convention is so unpredictable that it's
++	 * not obviously worth the effort.
++	 */
++	if (sigsetjmp(jmpbuf, 1) == 0) {
++		printf("[RUN]\tMOV SS; SYSCALL\n");
++		sethandler(SIGILL, handle_and_longjmp, SA_RESETHAND);
++		nr = SYS_getpid;
++		/*
++		 * Toggle the high bit of RSP to make it noncanonical to
++		 * strengthen this test on non-SMAP systems.
++		 */
++		asm volatile ("btc $63, %%rsp\n\t"
++			      "mov %[ss], %%ss; syscall\n\t"
++			      "btc $63, %%rsp"
++			      : "+a" (nr) : [ss] "m" (ss)
++			      : "rcx"
++#ifdef __x86_64__
++				, "r11"
++#endif
++			);
++	}
++#endif
++
++	printf("[RUN]\tMOV SS; breakpointed NOP\n");
++	asm volatile ("mov %[ss], %%ss; breakpoint_insn: nop" :: [ss] "m" (ss));
++
++	/*
++	 * Invoking SYSENTER directly breaks all the rules.  Just handle
++	 * the SIGSEGV.
++	 */
++	if (sigsetjmp(jmpbuf, 1) == 0) {
++		printf("[RUN]\tMOV SS; SYSENTER\n");
++		stack_t stack = {
++			.ss_sp = altstack_data,
++			.ss_size = SIGSTKSZ,
++		};
++		if (sigaltstack(&stack, NULL) != 0)
++			err(1, "sigaltstack");
++		sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND | SA_ONSTACK);
++		nr = SYS_getpid;
++		asm volatile ("mov %[ss], %%ss; SYSENTER" : "+a" (nr)
++			      : [ss] "m" (ss) : "flags", "rcx"
++#ifdef __x86_64__
++				, "r11"
++#endif
++			);
++
++		/* We're unreachable here.  SYSENTER forgets RIP. */
++	}
++
++	if (sigsetjmp(jmpbuf, 1) == 0) {
++		printf("[RUN]\tMOV SS; INT $0x80\n");
++		sethandler(SIGSEGV, handle_and_longjmp, SA_RESETHAND);
++		nr = 20;	/* compat getpid */
++		asm volatile ("mov %[ss], %%ss; int $0x80"
++			      : "+a" (nr) : [ss] "m" (ss)
++			      : "flags"
++#ifdef __x86_64__
++				, "r8", "r9", "r10", "r11"
++#endif
++			);
++	}
++
++	printf("[OK]\tI aten't dead\n");
++	return 0;
++}
+diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
+index 9c0325e1ea68..50f7e9272481 100644
+--- a/tools/testing/selftests/x86/mpx-mini-test.c
++++ b/tools/testing/selftests/x86/mpx-mini-test.c
+@@ -368,6 +368,11 @@ static int expected_bnd_index = -1;
+ uint64_t shadow_plb[NR_MPX_BOUNDS_REGISTERS][2]; /* shadow MPX bound registers */
+ unsigned long shadow_map[NR_MPX_BOUNDS_REGISTERS];
+ 
++/* Failed address bound checks: */
++#ifndef SEGV_BNDERR
++# define SEGV_BNDERR	3
++#endif
++
+ /*
+  * The kernel is supposed to provide some information about the bounds
+  * exception in the siginfo.  It should match what we have in the bounds
+@@ -419,8 +424,6 @@ void handler(int signum, siginfo_t *si, void *vucontext)
+ 		br_count++;
+ 		dprintf1("#BR 0x%jx (total seen: %d)\n", status, br_count);
+ 
+-#define SEGV_BNDERR     3  /* failed address bound checks */
+-
+ 		dprintf2("Saw a #BR! status 0x%jx at %016lx br_reason: %jx\n",
+ 				status, ip, br_reason);
+ 		dprintf2("si_signo: %d\n", si->si_signo);
+diff --git a/tools/testing/selftests/x86/pkey-helpers.h b/tools/testing/selftests/x86/pkey-helpers.h
+index b3cb7670e026..254e5436bdd9 100644
+--- a/tools/testing/selftests/x86/pkey-helpers.h
++++ b/tools/testing/selftests/x86/pkey-helpers.h
+@@ -26,30 +26,26 @@ static inline void sigsafe_printf(const char *format, ...)
+ {
+ 	va_list ap;
+ 
+-	va_start(ap, format);
+ 	if (!dprint_in_signal) {
++		va_start(ap, format);
+ 		vprintf(format, ap);
++		va_end(ap);
+ 	} else {
+ 		int ret;
+-		int len = vsnprintf(dprint_in_signal_buffer,
+-				    DPRINT_IN_SIGNAL_BUF_SIZE,
+-				    format, ap);
+ 		/*
+-		 * len is amount that would have been printed,
+-		 * but actual write is truncated at BUF_SIZE.
++		 * No printf() functions are signal-safe.
++		 * They deadlock easily. Write the format
++		 * string to get some output, even if
++		 * incomplete.
+ 		 */
+-		if (len > DPRINT_IN_SIGNAL_BUF_SIZE)
+-			len = DPRINT_IN_SIGNAL_BUF_SIZE;
+-		ret = write(1, dprint_in_signal_buffer, len);
++		ret = write(1, format, strlen(format));
+ 		if (ret < 0)
+-			abort();
++			exit(1);
+ 	}
+-	va_end(ap);
+ }
+ #define dprintf_level(level, args...) do {	\
+ 	if (level <= DEBUG_LEVEL)		\
+ 		sigsafe_printf(args);		\
+-	fflush(NULL);				\
+ } while (0)
+ #define dprintf0(args...) dprintf_level(0, args)
+ #define dprintf1(args...) dprintf_level(1, args)
+diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
+index f15aa5a76fe3..460b4bdf4c1e 100644
+--- a/tools/testing/selftests/x86/protection_keys.c
++++ b/tools/testing/selftests/x86/protection_keys.c
+@@ -72,10 +72,9 @@ extern void abort_hooks(void);
+ 				test_nr, iteration_nr);	\
+ 		dprintf0("errno at assert: %d", errno);	\
+ 		abort_hooks();			\
+-		assert(condition);		\
++		exit(__LINE__);			\
+ 	}					\
+ } while (0)
+-#define raw_assert(cond) assert(cond)
+ 
+ void cat_into_file(char *str, char *file)
+ {
+@@ -87,12 +86,17 @@ void cat_into_file(char *str, char *file)
+ 	 * these need to be raw because they are called under
+ 	 * pkey_assert()
+ 	 */
+-	raw_assert(fd >= 0);
++	if (fd < 0) {
++		fprintf(stderr, "error opening '%s'\n", str);
++		perror("error: ");
++		exit(__LINE__);
++	}
++
+ 	ret = write(fd, str, strlen(str));
+ 	if (ret != strlen(str)) {
+ 		perror("write to file failed");
+ 		fprintf(stderr, "filename: '%s' str: '%s'\n", file, str);
+-		raw_assert(0);
++		exit(__LINE__);
+ 	}
+ 	close(fd);
+ }
+@@ -191,26 +195,30 @@ void lots_o_noops_around_write(int *write_to_me)
+ #ifdef __i386__
+ 
+ #ifndef SYS_mprotect_key
+-# define SYS_mprotect_key 380
++# define SYS_mprotect_key	380
+ #endif
++
+ #ifndef SYS_pkey_alloc
+-# define SYS_pkey_alloc	 381
+-# define SYS_pkey_free	 382
++# define SYS_pkey_alloc		381
++# define SYS_pkey_free		382
+ #endif
+-#define REG_IP_IDX REG_EIP
+-#define si_pkey_offset 0x14
++
++#define REG_IP_IDX		REG_EIP
++#define si_pkey_offset		0x14
+ 
+ #else
+ 
+ #ifndef SYS_mprotect_key
+-# define SYS_mprotect_key 329
++# define SYS_mprotect_key	329
+ #endif
++
+ #ifndef SYS_pkey_alloc
+-# define SYS_pkey_alloc	 330
+-# define SYS_pkey_free	 331
++# define SYS_pkey_alloc		330
++# define SYS_pkey_free		331
+ #endif
+-#define REG_IP_IDX REG_RIP
+-#define si_pkey_offset 0x20
++
++#define REG_IP_IDX		REG_RIP
++#define si_pkey_offset		0x20
+ 
+ #endif
+ 
+@@ -225,8 +233,14 @@ void dump_mem(void *dumpme, int len_bytes)
+ 	}
+ }
+ 
+-#define SEGV_BNDERR     3  /* failed address bound checks */
+-#define SEGV_PKUERR     4
++/* Failed address bound checks: */
++#ifndef SEGV_BNDERR
++# define SEGV_BNDERR		3
++#endif
++
++#ifndef SEGV_PKUERR
++# define SEGV_PKUERR		4
++#endif
+ 
+ static char *si_code_str(int si_code)
+ {
+@@ -289,13 +303,6 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
+ 		dump_mem(pkru_ptr - 128, 256);
+ 	pkey_assert(*pkru_ptr);
+ 
+-	si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset);
+-	dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
+-	dump_mem(si_pkey_ptr - 8, 24);
+-	siginfo_pkey = *si_pkey_ptr;
+-	pkey_assert(siginfo_pkey < NR_PKEYS);
+-	last_si_pkey = siginfo_pkey;
+-
+ 	if ((si->si_code == SEGV_MAPERR) ||
+ 	    (si->si_code == SEGV_ACCERR) ||
+ 	    (si->si_code == SEGV_BNDERR)) {
+@@ -303,6 +310,13 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
+ 		exit(4);
+ 	}
+ 
++	si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset);
++	dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
++	dump_mem((u8 *)si_pkey_ptr - 8, 24);
++	siginfo_pkey = *si_pkey_ptr;
++	pkey_assert(siginfo_pkey < NR_PKEYS);
++	last_si_pkey = siginfo_pkey;
++
+ 	dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr);
+ 	/* need __rdpkru() version so we do not do shadow_pkru checking */
+ 	dprintf1("signal pkru from  pkru: %08x\n", __rdpkru());
+@@ -311,22 +325,6 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext)
+ 	dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n");
+ 	pkru_faults++;
+ 	dprintf1("<<<<==================================================\n");
+-	return;
+-	if (trapno == 14) {
+-		fprintf(stderr,
+-			"ERROR: In signal handler, page fault, trapno = %d, ip = %016lx\n",
+-			trapno, ip);
+-		fprintf(stderr, "si_addr %p\n", si->si_addr);
+-		fprintf(stderr, "REG_ERR: %lx\n",
+-				(unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]);
+-		exit(1);
+-	} else {
+-		fprintf(stderr, "unexpected trap %d! at 0x%lx\n", trapno, ip);
+-		fprintf(stderr, "si_addr %p\n", si->si_addr);
+-		fprintf(stderr, "REG_ERR: %lx\n",
+-				(unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]);
+-		exit(2);
+-	}
+ 	dprint_in_signal = 0;
+ }
+ 
+@@ -393,10 +391,15 @@ pid_t fork_lazy_child(void)
+ 	return forkret;
+ }
+ 
+-#define PKEY_DISABLE_ACCESS    0x1
+-#define PKEY_DISABLE_WRITE     0x2
++#ifndef PKEY_DISABLE_ACCESS
++# define PKEY_DISABLE_ACCESS	0x1
++#endif
++
++#ifndef PKEY_DISABLE_WRITE
++# define PKEY_DISABLE_WRITE	0x2
++#endif
+ 
+-u32 pkey_get(int pkey, unsigned long flags)
++static u32 hw_pkey_get(int pkey, unsigned long flags)
+ {
+ 	u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE);
+ 	u32 pkru = __rdpkru();
+@@ -418,7 +421,7 @@ u32 pkey_get(int pkey, unsigned long flags)
+ 	return masked_pkru;
+ }
+ 
+-int pkey_set(int pkey, unsigned long rights, unsigned long flags)
++static int hw_pkey_set(int pkey, unsigned long rights, unsigned long flags)
+ {
+ 	u32 mask = (PKEY_DISABLE_ACCESS|PKEY_DISABLE_WRITE);
+ 	u32 old_pkru = __rdpkru();
+@@ -452,15 +455,15 @@ void pkey_disable_set(int pkey, int flags)
+ 		pkey, flags);
+ 	pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
+ 
+-	pkey_rights = pkey_get(pkey, syscall_flags);
++	pkey_rights = hw_pkey_get(pkey, syscall_flags);
+ 
+-	dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
++	dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
+ 			pkey, pkey, pkey_rights);
+ 	pkey_assert(pkey_rights >= 0);
+ 
+ 	pkey_rights |= flags;
+ 
+-	ret = pkey_set(pkey, pkey_rights, syscall_flags);
++	ret = hw_pkey_set(pkey, pkey_rights, syscall_flags);
+ 	assert(!ret);
+ 	/*pkru and flags have the same format */
+ 	shadow_pkru |= flags << (pkey * 2);
+@@ -468,8 +471,8 @@ void pkey_disable_set(int pkey, int flags)
+ 
+ 	pkey_assert(ret >= 0);
+ 
+-	pkey_rights = pkey_get(pkey, syscall_flags);
+-	dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
++	pkey_rights = hw_pkey_get(pkey, syscall_flags);
++	dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
+ 			pkey, pkey, pkey_rights);
+ 
+ 	dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru());
+@@ -483,24 +486,24 @@ void pkey_disable_clear(int pkey, int flags)
+ {
+ 	unsigned long syscall_flags = 0;
+ 	int ret;
+-	int pkey_rights = pkey_get(pkey, syscall_flags);
++	int pkey_rights = hw_pkey_get(pkey, syscall_flags);
+ 	u32 orig_pkru = rdpkru();
+ 
+ 	pkey_assert(flags & (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE));
+ 
+-	dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
++	dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
+ 			pkey, pkey, pkey_rights);
+ 	pkey_assert(pkey_rights >= 0);
+ 
+ 	pkey_rights |= flags;
+ 
+-	ret = pkey_set(pkey, pkey_rights, 0);
++	ret = hw_pkey_set(pkey, pkey_rights, 0);
+ 	/* pkru and flags have the same format */
+ 	shadow_pkru &= ~(flags << (pkey * 2));
+ 	pkey_assert(ret >= 0);
+ 
+-	pkey_rights = pkey_get(pkey, syscall_flags);
+-	dprintf1("%s(%d) pkey_get(%d): %x\n", __func__,
++	pkey_rights = hw_pkey_get(pkey, syscall_flags);
++	dprintf1("%s(%d) hw_pkey_get(%d): %x\n", __func__,
+ 			pkey, pkey, pkey_rights);
+ 
+ 	dprintf1("%s(%d) pkru: 0x%x\n", __func__, pkey, rdpkru());
+@@ -674,10 +677,12 @@ int mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot,
+ struct pkey_malloc_record {
+ 	void *ptr;
+ 	long size;
++	int prot;
+ };
+ struct pkey_malloc_record *pkey_malloc_records;
++struct pkey_malloc_record *pkey_last_malloc_record;
+ long nr_pkey_malloc_records;
+-void record_pkey_malloc(void *ptr, long size)
++void record_pkey_malloc(void *ptr, long size, int prot)
+ {
+ 	long i;
+ 	struct pkey_malloc_record *rec = NULL;
+@@ -709,6 +714,8 @@ void record_pkey_malloc(void *ptr, long size)
+ 		(int)(rec - pkey_malloc_records), rec, ptr, size);
+ 	rec->ptr = ptr;
+ 	rec->size = size;
++	rec->prot = prot;
++	pkey_last_malloc_record = rec;
+ 	nr_pkey_malloc_records++;
+ }
+ 
+@@ -753,7 +760,7 @@ void *malloc_pkey_with_mprotect(long size, int prot, u16 pkey)
+ 	pkey_assert(ptr != (void *)-1);
+ 	ret = mprotect_pkey((void *)ptr, PAGE_SIZE, prot, pkey);
+ 	pkey_assert(!ret);
+-	record_pkey_malloc(ptr, size);
++	record_pkey_malloc(ptr, size, prot);
+ 	rdpkru();
+ 
+ 	dprintf1("%s() for pkey %d @ %p\n", __func__, pkey, ptr);
+@@ -774,7 +781,7 @@ void *malloc_pkey_anon_huge(long size, int prot, u16 pkey)
+ 	size = ALIGN_UP(size, HPAGE_SIZE * 2);
+ 	ptr = mmap(NULL, size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ 	pkey_assert(ptr != (void *)-1);
+-	record_pkey_malloc(ptr, size);
++	record_pkey_malloc(ptr, size, prot);
+ 	mprotect_pkey(ptr, size, prot, pkey);
+ 
+ 	dprintf1("unaligned ptr: %p\n", ptr);
+@@ -847,7 +854,7 @@ void *malloc_pkey_hugetlb(long size, int prot, u16 pkey)
+ 	pkey_assert(ptr != (void *)-1);
+ 	mprotect_pkey(ptr, size, prot, pkey);
+ 
+-	record_pkey_malloc(ptr, size);
++	record_pkey_malloc(ptr, size, prot);
+ 
+ 	dprintf1("mmap()'d hugetlbfs for pkey %d @ %p\n", pkey, ptr);
+ 	return ptr;
+@@ -869,7 +876,7 @@ void *malloc_pkey_mmap_dax(long size, int prot, u16 pkey)
+ 
+ 	mprotect_pkey(ptr, size, prot, pkey);
+ 
+-	record_pkey_malloc(ptr, size);
++	record_pkey_malloc(ptr, size, prot);
+ 
+ 	dprintf1("mmap()'d for pkey %d @ %p\n", pkey, ptr);
+ 	close(fd);
+@@ -918,13 +925,21 @@ void *malloc_pkey(long size, int prot, u16 pkey)
+ }
+ 
+ int last_pkru_faults;
++#define UNKNOWN_PKEY -2
+ void expected_pk_fault(int pkey)
+ {
+ 	dprintf2("%s(): last_pkru_faults: %d pkru_faults: %d\n",
+ 			__func__, last_pkru_faults, pkru_faults);
+ 	dprintf2("%s(%d): last_si_pkey: %d\n", __func__, pkey, last_si_pkey);
+ 	pkey_assert(last_pkru_faults + 1 == pkru_faults);
+-	pkey_assert(last_si_pkey == pkey);
++
++       /*
++	* For exec-only memory, we do not know the pkey in
++	* advance, so skip this check.
++	*/
++	if (pkey != UNKNOWN_PKEY)
++		pkey_assert(last_si_pkey == pkey);
++
+ 	/*
+ 	 * The signal handler shold have cleared out PKRU to let the
+ 	 * test program continue.  We now have to restore it.
+@@ -939,10 +954,11 @@ void expected_pk_fault(int pkey)
+ 	last_si_pkey = -1;
+ }
+ 
+-void do_not_expect_pk_fault(void)
+-{
+-	pkey_assert(last_pkru_faults == pkru_faults);
+-}
++#define do_not_expect_pk_fault(msg)	do {			\
++	if (last_pkru_faults != pkru_faults)			\
++		dprintf0("unexpected PK fault: %s\n", msg);	\
++	pkey_assert(last_pkru_faults == pkru_faults);		\
++} while (0)
+ 
+ int test_fds[10] = { -1 };
+ int nr_test_fds;
+@@ -1151,12 +1167,15 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
+ 	pkey_assert(i < NR_PKEYS*2);
+ 
+ 	/*
+-	 * There are 16 pkeys supported in hardware.  One is taken
+-	 * up for the default (0) and another can be taken up by
+-	 * an execute-only mapping.  Ensure that we can allocate
+-	 * at least 14 (16-2).
++	 * There are 16 pkeys supported in hardware.  Three are
++	 * allocated by the time we get here:
++	 *   1. The default key (0)
++	 *   2. One possibly consumed by an execute-only mapping.
++	 *   3. One allocated by the test code and passed in via
++	 *      'pkey' to this function.
++	 * Ensure that we can allocate at least another 13 (16-3).
+ 	 */
+-	pkey_assert(i >= NR_PKEYS-2);
++	pkey_assert(i >= NR_PKEYS-3);
+ 
+ 	for (i = 0; i < nr_allocated_pkeys; i++) {
+ 		err = sys_pkey_free(allocated_pkeys[i]);
+@@ -1165,6 +1184,35 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
+ 	}
+ }
+ 
++/*
++ * pkey 0 is special.  It is allocated by default, so you do not
++ * have to call pkey_alloc() to use it first.  Make sure that it
++ * is usable.
++ */
++void test_mprotect_with_pkey_0(int *ptr, u16 pkey)
++{
++	long size;
++	int prot;
++
++	assert(pkey_last_malloc_record);
++	size = pkey_last_malloc_record->size;
++	/*
++	 * This is a bit of a hack.  But mprotect() requires
++	 * huge-page-aligned sizes when operating on hugetlbfs.
++	 * So, make sure that we use something that's a multiple
++	 * of a huge page when we can.
++	 */
++	if (size >= HPAGE_SIZE)
++		size = HPAGE_SIZE;
++	prot = pkey_last_malloc_record->prot;
++
++	/* Use pkey 0 */
++	mprotect_pkey(ptr, size, prot, 0);
++
++	/* Make sure that we can set it back to the original pkey. */
++	mprotect_pkey(ptr, size, prot, pkey);
++}
++
+ void test_ptrace_of_child(int *ptr, u16 pkey)
+ {
+ 	__attribute__((__unused__)) int peek_result;
+@@ -1228,7 +1276,7 @@ void test_ptrace_of_child(int *ptr, u16 pkey)
+ 	pkey_assert(ret != -1);
+ 	/* Now access from the current task, and expect NO exception: */
+ 	peek_result = read_ptr(plain_ptr);
+-	do_not_expect_pk_fault();
++	do_not_expect_pk_fault("read plain pointer after ptrace");
+ 
+ 	ret = ptrace(PTRACE_DETACH, child_pid, ignored, 0);
+ 	pkey_assert(ret != -1);
+@@ -1241,12 +1289,9 @@ void test_ptrace_of_child(int *ptr, u16 pkey)
+ 	free(plain_ptr_unaligned);
+ }
+ 
+-void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
++void *get_pointer_to_instructions(void)
+ {
+ 	void *p1;
+-	int scratch;
+-	int ptr_contents;
+-	int ret;
+ 
+ 	p1 = ALIGN_PTR_UP(&lots_o_noops_around_write, PAGE_SIZE);
+ 	dprintf3("&lots_o_noops: %p\n", &lots_o_noops_around_write);
+@@ -1256,7 +1301,23 @@ void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
+ 	/* Point 'p1' at the *second* page of the function: */
+ 	p1 += PAGE_SIZE;
+ 
++	/*
++	 * Try to ensure we fault this in on next touch to ensure
++	 * we get an instruction fault as opposed to a data one
++	 */
+ 	madvise(p1, PAGE_SIZE, MADV_DONTNEED);
++
++	return p1;
++}
++
++void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
++{
++	void *p1;
++	int scratch;
++	int ptr_contents;
++	int ret;
++
++	p1 = get_pointer_to_instructions();
+ 	lots_o_noops_around_write(&scratch);
+ 	ptr_contents = read_ptr(p1);
+ 	dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
+@@ -1272,12 +1333,55 @@ void test_executing_on_unreadable_memory(int *ptr, u16 pkey)
+ 	 */
+ 	madvise(p1, PAGE_SIZE, MADV_DONTNEED);
+ 	lots_o_noops_around_write(&scratch);
+-	do_not_expect_pk_fault();
++	do_not_expect_pk_fault("executing on PROT_EXEC memory");
+ 	ptr_contents = read_ptr(p1);
+ 	dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
+ 	expected_pk_fault(pkey);
+ }
+ 
++void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey)
++{
++	void *p1;
++	int scratch;
++	int ptr_contents;
++	int ret;
++
++	dprintf1("%s() start\n", __func__);
++
++	p1 = get_pointer_to_instructions();
++	lots_o_noops_around_write(&scratch);
++	ptr_contents = read_ptr(p1);
++	dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
++
++	/* Use a *normal* mprotect(), not mprotect_pkey(): */
++	ret = mprotect(p1, PAGE_SIZE, PROT_EXEC);
++	pkey_assert(!ret);
++
++	dprintf2("pkru: %x\n", rdpkru());
++
++	/* Make sure this is an *instruction* fault */
++	madvise(p1, PAGE_SIZE, MADV_DONTNEED);
++	lots_o_noops_around_write(&scratch);
++	do_not_expect_pk_fault("executing on PROT_EXEC memory");
++	ptr_contents = read_ptr(p1);
++	dprintf2("ptr (%p) contents@%d: %x\n", p1, __LINE__, ptr_contents);
++	expected_pk_fault(UNKNOWN_PKEY);
++
++	/*
++	 * Put the memory back to non-PROT_EXEC.  Should clear the
++	 * exec-only pkey off the VMA and allow it to be readable
++	 * again.  Go to PROT_NONE first to check for a kernel bug
++	 * that did not clear the pkey when doing PROT_NONE.
++	 */
++	ret = mprotect(p1, PAGE_SIZE, PROT_NONE);
++	pkey_assert(!ret);
++
++	ret = mprotect(p1, PAGE_SIZE, PROT_READ|PROT_EXEC);
++	pkey_assert(!ret);
++	ptr_contents = read_ptr(p1);
++	do_not_expect_pk_fault("plain read on recently PROT_EXEC area");
++}
++
+ void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey)
+ {
+ 	int size = PAGE_SIZE;
+@@ -1302,6 +1406,8 @@ void (*pkey_tests[])(int *ptr, u16 pkey) = {
+ 	test_kernel_gup_of_access_disabled_region,
+ 	test_kernel_gup_write_to_write_disabled_region,
+ 	test_executing_on_unreadable_memory,
++	test_implicit_mprotect_exec_only_memory,
++	test_mprotect_with_pkey_0,
+ 	test_ptrace_of_child,
+ 	test_pkey_syscalls_on_non_allocated_pkey,
+ 	test_pkey_syscalls_bad_args,
+diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
+index e21e2f49b005..ffc587bf4742 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
++++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
+@@ -14,6 +14,8 @@
+ #include <linux/irqchip/arm-gic.h>
+ #include <linux/kvm.h>
+ #include <linux/kvm_host.h>
++#include <linux/nospec.h>
++
+ #include <kvm/iodev.h>
+ #include <kvm/arm_vgic.h>
+ 
+@@ -324,6 +326,9 @@ static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
+ 
+ 		if (n > vgic_v3_max_apr_idx(vcpu))
+ 			return 0;
++
++		n = array_index_nospec(n, 4);
++
+ 		/* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
+ 		return vgicv3->vgic_ap1r[n];
+ 	}
+diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
+index 4232c40b34f8..b38360c6c7d2 100644
+--- a/virt/kvm/arm/vgic/vgic.c
++++ b/virt/kvm/arm/vgic/vgic.c
+@@ -599,6 +599,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
+ 
+ 	list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
+ 		struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
++		bool target_vcpu_needs_kick = false;
+ 
+ 		spin_lock(&irq->irq_lock);
+ 
+@@ -669,11 +670,18 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
+ 			list_del(&irq->ap_list);
+ 			irq->vcpu = target_vcpu;
+ 			list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
++			target_vcpu_needs_kick = true;
+ 		}
+ 
+ 		spin_unlock(&irq->irq_lock);
+ 		spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
+ 		spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
++
++		if (target_vcpu_needs_kick) {
++			kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
++			kvm_vcpu_kick(target_vcpu);
++		}
++
+ 		goto retry;
+ 	}
+ 


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-06-16 15:45 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-06-16 15:45 UTC (permalink / raw
  To: gentoo-commits

commit:     cc18b98313f05cf141a5a076b3ea74613eb80b75
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jun 16 15:44:53 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jun 16 15:44:53 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cc18b983

Linux patch 4.16.16

 0000_README              |    4 +
 1015_linux-4.16.16.patch | 2637 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2641 insertions(+)

diff --git a/0000_README b/0000_README
index d817caf..83e0c3b 100644
--- a/0000_README
+++ b/0000_README
@@ -103,6 +103,10 @@ Patch:  1014_linux-4.16.15.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.15
 
+Patch:  1015_linux-4.16.16.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.16
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1015_linux-4.16.16.patch b/1015_linux-4.16.16.patch
new file mode 100644
index 0000000..b47c64a
--- /dev/null
+++ b/1015_linux-4.16.16.patch
@@ -0,0 +1,2637 @@
+diff --git a/Makefile b/Makefile
+index e45c66b27241..55554f392115 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
+index b24b1c8b3979..0f82cd91cd3c 100644
+--- a/arch/x86/include/asm/kvm_emulate.h
++++ b/arch/x86/include/asm/kvm_emulate.h
+@@ -107,11 +107,12 @@ struct x86_emulate_ops {
+ 	 *  @addr:  [IN ] Linear address from which to read.
+ 	 *  @val:   [OUT] Value read from memory, zero-extended to 'u_long'.
+ 	 *  @bytes: [IN ] Number of bytes to read from memory.
++	 *  @system:[IN ] Whether the access is forced to be at CPL0.
+ 	 */
+ 	int (*read_std)(struct x86_emulate_ctxt *ctxt,
+ 			unsigned long addr, void *val,
+ 			unsigned int bytes,
+-			struct x86_exception *fault);
++			struct x86_exception *fault, bool system);
+ 
+ 	/*
+ 	 * read_phys: Read bytes of standard (non-emulated/special) memory.
+@@ -129,10 +130,11 @@ struct x86_emulate_ops {
+ 	 *  @addr:  [IN ] Linear address to which to write.
+ 	 *  @val:   [OUT] Value write to memory, zero-extended to 'u_long'.
+ 	 *  @bytes: [IN ] Number of bytes to write to memory.
++	 *  @system:[IN ] Whether the access is forced to be at CPL0.
+ 	 */
+ 	int (*write_std)(struct x86_emulate_ctxt *ctxt,
+ 			 unsigned long addr, void *val, unsigned int bytes,
+-			 struct x86_exception *fault);
++			 struct x86_exception *fault, bool system);
+ 	/*
+ 	 * fetch: Read bytes of standard (non-emulated/special) memory.
+ 	 *        Used for instruction fetch.
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index d91eaeb01034..defbce750e7c 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -811,6 +811,19 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+ 	return assign_eip_near(ctxt, ctxt->_eip + rel);
+ }
+ 
++static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
++			      void *data, unsigned size)
++{
++	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
++}
++
++static int linear_write_system(struct x86_emulate_ctxt *ctxt,
++			       ulong linear, void *data,
++			       unsigned int size)
++{
++	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
++}
++
+ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
+ 			      struct segmented_address addr,
+ 			      void *data,
+@@ -822,7 +835,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
+ 	rc = linearize(ctxt, addr, size, false, &linear);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+-	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
++	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
+ }
+ 
+ static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
+@@ -836,7 +849,7 @@ static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
+ 	rc = linearize(ctxt, addr, size, true, &linear);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+-	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
++	return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
+ }
+ 
+ /*
+@@ -1495,8 +1508,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
+ 		return emulate_gp(ctxt, index << 3 | 0x2);
+ 
+ 	addr = dt.address + index * 8;
+-	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
+-				   &ctxt->exception);
++	return linear_read_system(ctxt, addr, desc, sizeof *desc);
+ }
+ 
+ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
+@@ -1559,8 +1571,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 
+-	return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
+-				   &ctxt->exception);
++	return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
+ }
+ 
+ /* allowed just for 8 bytes segments */
+@@ -1574,8 +1585,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 
+-	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
+-				    &ctxt->exception);
++	return linear_write_system(ctxt, addr, desc, sizeof *desc);
+ }
+ 
+ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+@@ -1736,8 +1746,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ 				return ret;
+ 		}
+ 	} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
+-		ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
+-				sizeof(base3), &ctxt->exception);
++		ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
+ 		if (ret != X86EMUL_CONTINUE)
+ 			return ret;
+ 		if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
+@@ -2050,11 +2059,11 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
+ 	eip_addr = dt.address + (irq << 2);
+ 	cs_addr = dt.address + (irq << 2) + 2;
+ 
+-	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
++	rc = linear_read_system(ctxt, cs_addr, &cs, 2);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 
+-	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
++	rc = linear_read_system(ctxt, eip_addr, &eip, 2);
+ 	if (rc != X86EMUL_CONTINUE)
+ 		return rc;
+ 
+@@ -2907,12 +2916,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
+ #ifdef CONFIG_X86_64
+ 	base |= ((u64)base3) << 32;
+ #endif
+-	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
++	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
+ 	if (r != X86EMUL_CONTINUE)
+ 		return false;
+ 	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
+ 		return false;
+-	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
++	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
+ 	if (r != X86EMUL_CONTINUE)
+ 		return false;
+ 	if ((perm >> bit_idx) & mask)
+@@ -3041,35 +3050,30 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
+ 			  u16 tss_selector, u16 old_tss_sel,
+ 			  ulong old_tss_base, struct desc_struct *new_desc)
+ {
+-	const struct x86_emulate_ops *ops = ctxt->ops;
+ 	struct tss_segment_16 tss_seg;
+ 	int ret;
+ 	u32 new_tss_base = get_desc_base(new_desc);
+ 
+-	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+-			    &ctxt->exception);
++	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+ 
+ 	save_state_to_tss16(ctxt, &tss_seg);
+ 
+-	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+-			     &ctxt->exception);
++	ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+ 
+-	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
+-			    &ctxt->exception);
++	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+ 
+ 	if (old_tss_sel != 0xffff) {
+ 		tss_seg.prev_task_link = old_tss_sel;
+ 
+-		ret = ops->write_std(ctxt, new_tss_base,
+-				     &tss_seg.prev_task_link,
+-				     sizeof tss_seg.prev_task_link,
+-				     &ctxt->exception);
++		ret = linear_write_system(ctxt, new_tss_base,
++					  &tss_seg.prev_task_link,
++					  sizeof tss_seg.prev_task_link);
+ 		if (ret != X86EMUL_CONTINUE)
+ 			return ret;
+ 	}
+@@ -3185,38 +3189,34 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
+ 			  u16 tss_selector, u16 old_tss_sel,
+ 			  ulong old_tss_base, struct desc_struct *new_desc)
+ {
+-	const struct x86_emulate_ops *ops = ctxt->ops;
+ 	struct tss_segment_32 tss_seg;
+ 	int ret;
+ 	u32 new_tss_base = get_desc_base(new_desc);
+ 	u32 eip_offset = offsetof(struct tss_segment_32, eip);
+ 	u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
+ 
+-	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
+-			    &ctxt->exception);
++	ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+ 
+ 	save_state_to_tss32(ctxt, &tss_seg);
+ 
+ 	/* Only GP registers and segment selectors are saved */
+-	ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
+-			     ldt_sel_offset - eip_offset, &ctxt->exception);
++	ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
++				  ldt_sel_offset - eip_offset);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+ 
+-	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
+-			    &ctxt->exception);
++	ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
+ 	if (ret != X86EMUL_CONTINUE)
+ 		return ret;
+ 
+ 	if (old_tss_sel != 0xffff) {
+ 		tss_seg.prev_task_link = old_tss_sel;
+ 
+-		ret = ops->write_std(ctxt, new_tss_base,
+-				     &tss_seg.prev_task_link,
+-				     sizeof tss_seg.prev_task_link,
+-				     &ctxt->exception);
++		ret = linear_write_system(ctxt, new_tss_base,
++					  &tss_seg.prev_task_link,
++					  sizeof tss_seg.prev_task_link);
+ 		if (ret != X86EMUL_CONTINUE)
+ 			return ret;
+ 	}
+@@ -4177,7 +4177,9 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
+ 				maxphyaddr = eax & 0xff;
+ 			else
+ 				maxphyaddr = 36;
+-			rsvd = rsvd_bits(maxphyaddr, 62);
++			rsvd = rsvd_bits(maxphyaddr, 63);
++			if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
++				rsvd &= ~CR3_PCID_INVD;
+ 		}
+ 
+ 		if (new_val & rsvd)
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 11e2147c3824..e3b589e28264 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7344,8 +7344,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
+ 			vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
+ 		return 1;
+ 
+-	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
+-				sizeof(*vmpointer), &e)) {
++	if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
+ 		kvm_inject_page_fault(vcpu, &e);
+ 		return 1;
+ 	}
+@@ -7426,6 +7425,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
+ 		return 1;
+ 	}
+ 
++	/* CPL=0 must be checked manually. */
++	if (vmx_get_cpl(vcpu)) {
++		kvm_queue_exception(vcpu, UD_VECTOR);
++		return 1;
++	}
++
+ 	if (vmx->nested.vmxon) {
+ 		nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
+ 		return kvm_skip_emulated_instruction(vcpu);
+@@ -7485,6 +7490,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
+  */
+ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
+ {
++	if (vmx_get_cpl(vcpu)) {
++		kvm_queue_exception(vcpu, UD_VECTOR);
++		return 0;
++	}
++
+ 	if (!to_vmx(vcpu)->nested.vmxon) {
+ 		kvm_queue_exception(vcpu, UD_VECTOR);
+ 		return 0;
+@@ -7785,9 +7795,9 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
+ 		if (get_vmx_mem_address(vcpu, exit_qualification,
+ 				vmx_instruction_info, true, &gva))
+ 			return 1;
+-		/* _system ok, as hardware has verified cpl=0 */
+-		kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
+-			     &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
++		/* _system ok, nested_vmx_check_permission has verified cpl=0 */
++		kvm_write_guest_virt_system(vcpu, gva, &field_value,
++					    (is_long_mode(vcpu) ? 8 : 4), NULL);
+ 	}
+ 
+ 	nested_vmx_succeed(vcpu);
+@@ -7825,8 +7835,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
+ 		if (get_vmx_mem_address(vcpu, exit_qualification,
+ 				vmx_instruction_info, false, &gva))
+ 			return 1;
+-		if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
+-			   &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
++		if (kvm_read_guest_virt(vcpu, gva, &field_value,
++					(is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
+ 			kvm_inject_page_fault(vcpu, &e);
+ 			return 1;
+ 		}
+@@ -7945,10 +7955,10 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
+ 	if (get_vmx_mem_address(vcpu, exit_qualification,
+ 			vmx_instruction_info, true, &vmcs_gva))
+ 		return 1;
+-	/* ok to use *_system, as hardware has verified cpl=0 */
+-	if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
+-				 (void *)&to_vmx(vcpu)->nested.current_vmptr,
+-				 sizeof(u64), &e)) {
++	/* *_system ok, nested_vmx_check_permission has verified cpl=0 */
++	if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
++					(void *)&to_vmx(vcpu)->nested.current_vmptr,
++					sizeof(u64), &e)) {
+ 		kvm_inject_page_fault(vcpu, &e);
+ 		return 1;
+ 	}
+@@ -7995,8 +8005,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+ 	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ 			vmx_instruction_info, false, &gva))
+ 		return 1;
+-	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
+-				sizeof(operand), &e)) {
++	if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+ 		kvm_inject_page_fault(vcpu, &e);
+ 		return 1;
+ 	}
+@@ -8060,8 +8069,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
+ 	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ 			vmx_instruction_info, false, &gva))
+ 		return 1;
+-	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
+-				sizeof(operand), &e)) {
++	if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+ 		kvm_inject_page_fault(vcpu, &e);
+ 		return 1;
+ 	}
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 999560ff12b5..cf08ac8a910c 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -844,7 +844,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+ 	}
+ 
+ 	if (is_long_mode(vcpu) &&
+-	    (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 62)))
++	    (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63)))
+ 		return 1;
+ 	else if (is_pae(vcpu) && is_paging(vcpu) &&
+ 		   !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
+@@ -4703,11 +4703,10 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
+ 	return X86EMUL_CONTINUE;
+ }
+ 
+-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
++int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
+ 			       gva_t addr, void *val, unsigned int bytes,
+ 			       struct x86_exception *exception)
+ {
+-	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ 
+ 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
+@@ -4715,12 +4714,17 @@ int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+ }
+ EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
+ 
+-static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+-				      gva_t addr, void *val, unsigned int bytes,
+-				      struct x86_exception *exception)
++static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
++			     gva_t addr, void *val, unsigned int bytes,
++			     struct x86_exception *exception, bool system)
+ {
+ 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+-	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
++	u32 access = 0;
++
++	if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
++		access |= PFERR_USER_MASK;
++
++	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
+ }
+ 
+ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
+@@ -4732,18 +4736,16 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
+ 	return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
+ }
+ 
+-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+-				       gva_t addr, void *val,
+-				       unsigned int bytes,
+-				       struct x86_exception *exception)
++static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
++				      struct kvm_vcpu *vcpu, u32 access,
++				      struct x86_exception *exception)
+ {
+-	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ 	void *data = val;
+ 	int r = X86EMUL_CONTINUE;
+ 
+ 	while (bytes) {
+ 		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
+-							     PFERR_WRITE_MASK,
++							     access,
+ 							     exception);
+ 		unsigned offset = addr & (PAGE_SIZE-1);
+ 		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
+@@ -4764,6 +4766,27 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+ out:
+ 	return r;
+ }
++
++static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
++			      unsigned int bytes, struct x86_exception *exception,
++			      bool system)
++{
++	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
++	u32 access = PFERR_WRITE_MASK;
++
++	if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
++		access |= PFERR_USER_MASK;
++
++	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
++					   access, exception);
++}
++
++int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
++				unsigned int bytes, struct x86_exception *exception)
++{
++	return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
++					   PFERR_WRITE_MASK, exception);
++}
+ EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
+ 
+ static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
+@@ -5492,8 +5515,8 @@ static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt, u64 smbase)
+ static const struct x86_emulate_ops emulate_ops = {
+ 	.read_gpr            = emulator_read_gpr,
+ 	.write_gpr           = emulator_write_gpr,
+-	.read_std            = kvm_read_guest_virt_system,
+-	.write_std           = kvm_write_guest_virt_system,
++	.read_std            = emulator_read_std,
++	.write_std           = emulator_write_std,
+ 	.read_phys           = kvm_read_guest_phys_system,
+ 	.fetch               = kvm_fetch_guest_virt,
+ 	.read_emulated       = emulator_read_emulated,
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index b91215d1fd80..949bd317ad5c 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -213,11 +213,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
+ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
+ u64 get_kvmclock_ns(struct kvm *kvm);
+ 
+-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
++int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
+ 	gva_t addr, void *val, unsigned int bytes,
+ 	struct x86_exception *exception);
+ 
+-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
++int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
+ 	gva_t addr, void *val, unsigned int bytes,
+ 	struct x86_exception *exception);
+ 
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index acb7252c7e81..416079fdc8a6 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -328,7 +328,11 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
+ 	if (!rep.nr_zones)
+ 		return -EINVAL;
+ 
+-	zones = kcalloc(rep.nr_zones, sizeof(struct blk_zone), GFP_KERNEL);
++	if (rep.nr_zones > INT_MAX / sizeof(struct blk_zone))
++		return -ERANGE;
++
++	zones = kvmalloc(rep.nr_zones * sizeof(struct blk_zone),
++			GFP_KERNEL | __GFP_ZERO);
+ 	if (!zones)
+ 		return -ENOMEM;
+ 
+@@ -350,7 +354,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
+ 	}
+ 
+  out:
+-	kfree(zones);
++	kvfree(zones);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index 2188235be02d..526c1b0e7dcb 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -760,15 +760,18 @@ struct aead_edesc {
+  * @src_nents: number of segments in input s/w scatterlist
+  * @dst_nents: number of segments in output s/w scatterlist
+  * @iv_dma: dma address of iv for checking continuity and link table
++ * @iv_dir: DMA mapping direction for IV
+  * @sec4_sg_bytes: length of dma mapped sec4_sg space
+  * @sec4_sg_dma: bus physical mapped address of h/w link table
+  * @sec4_sg: pointer to h/w link table
+  * @hw_desc: the h/w job descriptor followed by any referenced link tables
++ *	     and IV
+  */
+ struct ablkcipher_edesc {
+ 	int src_nents;
+ 	int dst_nents;
+ 	dma_addr_t iv_dma;
++	enum dma_data_direction iv_dir;
+ 	int sec4_sg_bytes;
+ 	dma_addr_t sec4_sg_dma;
+ 	struct sec4_sg_entry *sec4_sg;
+@@ -778,7 +781,8 @@ struct ablkcipher_edesc {
+ static void caam_unmap(struct device *dev, struct scatterlist *src,
+ 		       struct scatterlist *dst, int src_nents,
+ 		       int dst_nents,
+-		       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
++		       dma_addr_t iv_dma, int ivsize,
++		       enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
+ 		       int sec4_sg_bytes)
+ {
+ 	if (dst != src) {
+@@ -790,7 +794,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
+ 	}
+ 
+ 	if (iv_dma)
+-		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
++		dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
+ 	if (sec4_sg_bytes)
+ 		dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
+ 				 DMA_TO_DEVICE);
+@@ -801,7 +805,7 @@ static void aead_unmap(struct device *dev,
+ 		       struct aead_request *req)
+ {
+ 	caam_unmap(dev, req->src, req->dst,
+-		   edesc->src_nents, edesc->dst_nents, 0, 0,
++		   edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
+ 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+ }
+ 
+@@ -814,7 +818,7 @@ static void ablkcipher_unmap(struct device *dev,
+ 
+ 	caam_unmap(dev, req->src, req->dst,
+ 		   edesc->src_nents, edesc->dst_nents,
+-		   edesc->iv_dma, ivsize,
++		   edesc->iv_dma, ivsize, edesc->iv_dir,
+ 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+ }
+ 
+@@ -903,6 +907,18 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ 	scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+ 				 ivsize, 0);
+ 
++	/* In case initial IV was generated, copy it in GIVCIPHER request */
++	if (edesc->iv_dir == DMA_FROM_DEVICE) {
++		u8 *iv;
++		struct skcipher_givcrypt_request *greq;
++
++		greq = container_of(req, struct skcipher_givcrypt_request,
++				    creq);
++		iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
++		     edesc->sec4_sg_bytes;
++		memcpy(greq->giv, iv, ivsize);
++	}
++
+ 	kfree(edesc);
+ 
+ 	ablkcipher_request_complete(req, err);
+@@ -913,10 +929,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ {
+ 	struct ablkcipher_request *req = context;
+ 	struct ablkcipher_edesc *edesc;
++#ifdef DEBUG
+ 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ 
+-#ifdef DEBUG
+ 	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+ #endif
+ 
+@@ -934,14 +950,6 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ 		     edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ 
+ 	ablkcipher_unmap(jrdev, edesc, req);
+-
+-	/*
+-	 * The crypto API expects us to set the IV (req->info) to the last
+-	 * ciphertext block.
+-	 */
+-	scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+-				 ivsize, 0);
+-
+ 	kfree(edesc);
+ 
+ 	ablkcipher_request_complete(req, err);
+@@ -1090,15 +1098,14 @@ static void init_authenc_job(struct aead_request *req,
+  */
+ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+ 				struct ablkcipher_edesc *edesc,
+-				struct ablkcipher_request *req,
+-				bool iv_contig)
++				struct ablkcipher_request *req)
+ {
+ 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ 	u32 *desc = edesc->hw_desc;
+-	u32 out_options = 0, in_options;
+-	dma_addr_t dst_dma, src_dma;
+-	int len, sec4_sg_index = 0;
++	u32 out_options = 0;
++	dma_addr_t dst_dma;
++	int len;
+ 
+ #ifdef DEBUG
+ 	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
+@@ -1114,30 +1121,18 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+ 	len = desc_len(sh_desc);
+ 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+ 
+-	if (iv_contig) {
+-		src_dma = edesc->iv_dma;
+-		in_options = 0;
+-	} else {
+-		src_dma = edesc->sec4_sg_dma;
+-		sec4_sg_index += edesc->src_nents + 1;
+-		in_options = LDST_SGF;
+-	}
+-	append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
++	append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
++			  LDST_SGF);
+ 
+ 	if (likely(req->src == req->dst)) {
+-		if (edesc->src_nents == 1 && iv_contig) {
+-			dst_dma = sg_dma_address(req->src);
+-		} else {
+-			dst_dma = edesc->sec4_sg_dma +
+-				sizeof(struct sec4_sg_entry);
+-			out_options = LDST_SGF;
+-		}
++		dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry);
++		out_options = LDST_SGF;
+ 	} else {
+ 		if (edesc->dst_nents == 1) {
+ 			dst_dma = sg_dma_address(req->dst);
+ 		} else {
+-			dst_dma = edesc->sec4_sg_dma +
+-				sec4_sg_index * sizeof(struct sec4_sg_entry);
++			dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) *
++				  sizeof(struct sec4_sg_entry);
+ 			out_options = LDST_SGF;
+ 		}
+ 	}
+@@ -1149,13 +1144,12 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+  */
+ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+ 				    struct ablkcipher_edesc *edesc,
+-				    struct ablkcipher_request *req,
+-				    bool iv_contig)
++				    struct ablkcipher_request *req)
+ {
+ 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ 	u32 *desc = edesc->hw_desc;
+-	u32 out_options, in_options;
++	u32 in_options;
+ 	dma_addr_t dst_dma, src_dma;
+ 	int len, sec4_sg_index = 0;
+ 
+@@ -1181,15 +1175,9 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+ 	}
+ 	append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
+ 
+-	if (iv_contig) {
+-		dst_dma = edesc->iv_dma;
+-		out_options = 0;
+-	} else {
+-		dst_dma = edesc->sec4_sg_dma +
+-			  sec4_sg_index * sizeof(struct sec4_sg_entry);
+-		out_options = LDST_SGF;
+-	}
+-	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
++	dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
++		  sizeof(struct sec4_sg_entry);
++	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
+ }
+ 
+ /*
+@@ -1278,7 +1266,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ 			GFP_DMA | flags);
+ 	if (!edesc) {
+ 		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+-			   0, 0, 0);
++			   0, DMA_NONE, 0, 0);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+@@ -1482,8 +1470,7 @@ static int aead_decrypt(struct aead_request *req)
+  * allocate and map the ablkcipher extended descriptor for ablkcipher
+  */
+ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+-						       *req, int desc_bytes,
+-						       bool *iv_contig_out)
++						       *req, int desc_bytes)
+ {
+ 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+@@ -1492,8 +1479,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ 		       GFP_KERNEL : GFP_ATOMIC;
+ 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ 	struct ablkcipher_edesc *edesc;
+-	dma_addr_t iv_dma = 0;
+-	bool in_contig;
++	dma_addr_t iv_dma;
++	u8 *iv;
+ 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ 	int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+ 
+@@ -1537,33 +1524,20 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ 		}
+ 	}
+ 
+-	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+-	if (dma_mapping_error(jrdev, iv_dma)) {
+-		dev_err(jrdev, "unable to map IV\n");
+-		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+-			   0, 0, 0);
+-		return ERR_PTR(-ENOMEM);
+-	}
+-
+-	if (mapped_src_nents == 1 &&
+-	    iv_dma + ivsize == sg_dma_address(req->src)) {
+-		in_contig = true;
+-		sec4_sg_ents = 0;
+-	} else {
+-		in_contig = false;
+-		sec4_sg_ents = 1 + mapped_src_nents;
+-	}
++	sec4_sg_ents = 1 + mapped_src_nents;
+ 	dst_sg_idx = sec4_sg_ents;
+ 	sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ 	sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
+ 
+-	/* allocate space for base edesc and hw desc commands, link tables */
+-	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
++	/*
++	 * allocate space for base edesc and hw desc commands, link tables, IV
++	 */
++	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
+ 			GFP_DMA | flags);
+ 	if (!edesc) {
+ 		dev_err(jrdev, "could not allocate extended descriptor\n");
+-		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+-			   iv_dma, ivsize, 0, 0);
++		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++			   0, DMA_NONE, 0, 0);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+@@ -1572,13 +1546,24 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ 	edesc->sec4_sg_bytes = sec4_sg_bytes;
+ 	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+ 			 desc_bytes;
++	edesc->iv_dir = DMA_TO_DEVICE;
+ 
+-	if (!in_contig) {
+-		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
+-		sg_to_sec4_sg_last(req->src, mapped_src_nents,
+-				   edesc->sec4_sg + 1, 0);
++	/* Make sure IV is located in a DMAable area */
++	iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
++	memcpy(iv, req->info, ivsize);
++
++	iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
++	if (dma_mapping_error(jrdev, iv_dma)) {
++		dev_err(jrdev, "unable to map IV\n");
++		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++			   0, DMA_NONE, 0, 0);
++		kfree(edesc);
++		return ERR_PTR(-ENOMEM);
+ 	}
+ 
++	dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
++	sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0);
++
+ 	if (mapped_dst_nents > 1) {
+ 		sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+ 				   edesc->sec4_sg + dst_sg_idx, 0);
+@@ -1589,7 +1574,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ 		dev_err(jrdev, "unable to map S/G table\n");
+ 		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+-			   iv_dma, ivsize, 0, 0);
++			   iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
+ 		kfree(edesc);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+@@ -1602,7 +1587,6 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ 		       sec4_sg_bytes, 1);
+ #endif
+ 
+-	*iv_contig_out = in_contig;
+ 	return edesc;
+ }
+ 
+@@ -1612,19 +1596,16 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req)
+ 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ 	struct device *jrdev = ctx->jrdev;
+-	bool iv_contig;
+ 	u32 *desc;
+ 	int ret = 0;
+ 
+ 	/* allocate extended descriptor */
+-	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+-				       CAAM_CMD_SZ, &iv_contig);
++	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ 	if (IS_ERR(edesc))
+ 		return PTR_ERR(edesc);
+ 
+ 	/* Create and submit job descriptor*/
+-	init_ablkcipher_job(ctx->sh_desc_enc,
+-		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
++	init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
+ #ifdef DEBUG
+ 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+ 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+@@ -1648,20 +1629,25 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
+ 	struct ablkcipher_edesc *edesc;
+ 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ 	struct device *jrdev = ctx->jrdev;
+-	bool iv_contig;
+ 	u32 *desc;
+ 	int ret = 0;
+ 
+ 	/* allocate extended descriptor */
+-	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+-				       CAAM_CMD_SZ, &iv_contig);
++	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ 	if (IS_ERR(edesc))
+ 		return PTR_ERR(edesc);
+ 
++	/*
++	 * The crypto API expects us to set the IV (req->info) to the last
++	 * ciphertext block.
++	 */
++	scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
++				 ivsize, 0);
++
+ 	/* Create and submit job descriptor*/
+-	init_ablkcipher_job(ctx->sh_desc_dec,
+-		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
++	init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
+ 	desc = edesc->hw_desc;
+ #ifdef DEBUG
+ 	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+@@ -1686,8 +1672,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
+  */
+ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ 				struct skcipher_givcrypt_request *greq,
+-				int desc_bytes,
+-				bool *iv_contig_out)
++				int desc_bytes)
+ {
+ 	struct ablkcipher_request *req = &greq->creq;
+ 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+@@ -1697,8 +1682,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ 		       GFP_KERNEL : GFP_ATOMIC;
+ 	int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+ 	struct ablkcipher_edesc *edesc;
+-	dma_addr_t iv_dma = 0;
+-	bool out_contig;
++	dma_addr_t iv_dma;
++	u8 *iv;
+ 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ 	int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+ 
+@@ -1743,36 +1728,20 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ 		}
+ 	}
+ 
+-	/*
+-	 * Check if iv can be contiguous with source and destination.
+-	 * If so, include it. If not, create scatterlist.
+-	 */
+-	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+-	if (dma_mapping_error(jrdev, iv_dma)) {
+-		dev_err(jrdev, "unable to map IV\n");
+-		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
+-			   0, 0, 0);
+-		return ERR_PTR(-ENOMEM);
+-	}
+-
+ 	sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+ 	dst_sg_idx = sec4_sg_ents;
+-	if (mapped_dst_nents == 1 &&
+-	    iv_dma + ivsize == sg_dma_address(req->dst)) {
+-		out_contig = true;
+-	} else {
+-		out_contig = false;
+-		sec4_sg_ents += 1 + mapped_dst_nents;
+-	}
++	sec4_sg_ents += 1 + mapped_dst_nents;
+ 
+-	/* allocate space for base edesc and hw desc commands, link tables */
++	/*
++	 * allocate space for base edesc and hw desc commands, link tables, IV
++	 */
+ 	sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
+-	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
++	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
+ 			GFP_DMA | flags);
+ 	if (!edesc) {
+ 		dev_err(jrdev, "could not allocate extended descriptor\n");
+-		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+-			   iv_dma, ivsize, 0, 0);
++		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++			   0, DMA_NONE, 0, 0);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+@@ -1781,24 +1750,33 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ 	edesc->sec4_sg_bytes = sec4_sg_bytes;
+ 	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+ 			 desc_bytes;
++	edesc->iv_dir = DMA_FROM_DEVICE;
++
++	/* Make sure IV is located in a DMAable area */
++	iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
++	iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
++	if (dma_mapping_error(jrdev, iv_dma)) {
++		dev_err(jrdev, "unable to map IV\n");
++		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
++			   0, DMA_NONE, 0, 0);
++		kfree(edesc);
++		return ERR_PTR(-ENOMEM);
++	}
+ 
+ 	if (mapped_src_nents > 1)
+ 		sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
+ 				   0);
+ 
+-	if (!out_contig) {
+-		dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
+-				   iv_dma, ivsize, 0);
+-		sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
+-				   edesc->sec4_sg + dst_sg_idx + 1, 0);
+-	}
++	dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
++	sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
++			   dst_sg_idx + 1, 0);
+ 
+ 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ 					    sec4_sg_bytes, DMA_TO_DEVICE);
+ 	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ 		dev_err(jrdev, "unable to map S/G table\n");
+ 		caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
+-			   iv_dma, ivsize, 0, 0);
++			   iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
+ 		kfree(edesc);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+@@ -1811,7 +1789,6 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ 		       sec4_sg_bytes, 1);
+ #endif
+ 
+-	*iv_contig_out = out_contig;
+ 	return edesc;
+ }
+ 
+@@ -1822,19 +1799,17 @@ static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
+ 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ 	struct device *jrdev = ctx->jrdev;
+-	bool iv_contig = false;
+ 	u32 *desc;
+ 	int ret = 0;
+ 
+ 	/* allocate extended descriptor */
+-	edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
+-				       CAAM_CMD_SZ, &iv_contig);
++	edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ 	if (IS_ERR(edesc))
+ 		return PTR_ERR(edesc);
+ 
+ 	/* Create and submit job descriptor*/
+ 	init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
+-				edesc, req, iv_contig);
++				edesc, req);
+ #ifdef DEBUG
+ 	print_hex_dump(KERN_ERR,
+ 		       "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
+diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
+index 4aecc9435f69..03e017233051 100644
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -423,7 +423,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+  * @assoclen: associated data length, in CAAM endianness
+  * @assoclen_dma: bus physical mapped address of req->assoclen
+  * @drv_req: driver-specific request structure
+- * @sgt: the h/w link table
++ * @sgt: the h/w link table, followed by IV
+  */
+ struct aead_edesc {
+ 	int src_nents;
+@@ -434,9 +434,6 @@ struct aead_edesc {
+ 	unsigned int assoclen;
+ 	dma_addr_t assoclen_dma;
+ 	struct caam_drv_req drv_req;
+-#define CAAM_QI_MAX_AEAD_SG						\
+-	((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /	\
+-	 sizeof(struct qm_sg_entry))
+ 	struct qm_sg_entry sgt[0];
+ };
+ 
+@@ -448,7 +445,7 @@ struct aead_edesc {
+  * @qm_sg_bytes: length of dma mapped h/w link table
+  * @qm_sg_dma: bus physical mapped address of h/w link table
+  * @drv_req: driver-specific request structure
+- * @sgt: the h/w link table
++ * @sgt: the h/w link table, followed by IV
+  */
+ struct ablkcipher_edesc {
+ 	int src_nents;
+@@ -457,9 +454,6 @@ struct ablkcipher_edesc {
+ 	int qm_sg_bytes;
+ 	dma_addr_t qm_sg_dma;
+ 	struct caam_drv_req drv_req;
+-#define CAAM_QI_MAX_ABLKCIPHER_SG					    \
+-	((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
+-	 sizeof(struct qm_sg_entry))
+ 	struct qm_sg_entry sgt[0];
+ };
+ 
+@@ -671,17 +665,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ 		}
+ 	}
+ 
+-	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
++	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
+ 		ivsize = crypto_aead_ivsize(aead);
+-		iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
+-		if (dma_mapping_error(qidev, iv_dma)) {
+-			dev_err(qidev, "unable to map IV\n");
+-			caam_unmap(qidev, req->src, req->dst, src_nents,
+-				   dst_nents, 0, 0, op_type, 0, 0);
+-			qi_cache_free(edesc);
+-			return ERR_PTR(-ENOMEM);
+-		}
+-	}
+ 
+ 	/*
+ 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+@@ -689,16 +674,33 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ 	 */
+ 	qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
+ 		     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+-	if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
+-		dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
+-			qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
+-		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-			   iv_dma, ivsize, op_type, 0, 0);
++	sg_table = &edesc->sgt[0];
++	qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
++		     CAAM_QI_MEMCACHE_SIZE)) {
++		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
++			qm_sg_ents, ivsize);
++		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++			   0, 0, 0, 0);
+ 		qi_cache_free(edesc);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+-	sg_table = &edesc->sgt[0];
+-	qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++
++	if (ivsize) {
++		u8 *iv = (u8 *)(sg_table + qm_sg_ents);
++
++		/* Make sure IV is located in a DMAable area */
++		memcpy(iv, req->iv, ivsize);
++
++		iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
++		if (dma_mapping_error(qidev, iv_dma)) {
++			dev_err(qidev, "unable to map IV\n");
++			caam_unmap(qidev, req->src, req->dst, src_nents,
++				   dst_nents, 0, 0, 0, 0, 0);
++			qi_cache_free(edesc);
++			return ERR_PTR(-ENOMEM);
++		}
++	}
+ 
+ 	edesc->src_nents = src_nents;
+ 	edesc->dst_nents = dst_nents;
+@@ -835,15 +837,27 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+ #endif
+ 
+ 	ablkcipher_unmap(qidev, edesc, req);
+-	qi_cache_free(edesc);
++
++	/* In case initial IV was generated, copy it in GIVCIPHER request */
++	if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
++		u8 *iv;
++		struct skcipher_givcrypt_request *greq;
++
++		greq = container_of(req, struct skcipher_givcrypt_request,
++				    creq);
++		iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
++		memcpy(greq->giv, iv, ivsize);
++	}
+ 
+ 	/*
+ 	 * The crypto API expects us to set the IV (req->info) to the last
+ 	 * ciphertext block. This is used e.g. by the CTS mode.
+ 	 */
+-	scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+-				 ivsize, 0);
++	if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
++		scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
++					 ivsize, ivsize, 0);
+ 
++	qi_cache_free(edesc);
+ 	ablkcipher_request_complete(req, status);
+ }
+ 
+@@ -858,9 +872,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ 	struct ablkcipher_edesc *edesc;
+ 	dma_addr_t iv_dma;
+-	bool in_contig;
++	u8 *iv;
+ 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+-	int dst_sg_idx, qm_sg_ents;
++	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+ 	struct qm_sg_entry *sg_table, *fd_sgt;
+ 	struct caam_drv_ctx *drv_ctx;
+ 	enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
+@@ -907,55 +921,53 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ 		}
+ 	}
+ 
+-	iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
+-	if (dma_mapping_error(qidev, iv_dma)) {
+-		dev_err(qidev, "unable to map IV\n");
+-		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+-			   0, 0, 0, 0);
+-		return ERR_PTR(-ENOMEM);
+-	}
+-
+-	if (mapped_src_nents == 1 &&
+-	    iv_dma + ivsize == sg_dma_address(req->src)) {
+-		in_contig = true;
+-		qm_sg_ents = 0;
+-	} else {
+-		in_contig = false;
+-		qm_sg_ents = 1 + mapped_src_nents;
+-	}
++	qm_sg_ents = 1 + mapped_src_nents;
+ 	dst_sg_idx = qm_sg_ents;
+ 
+ 	qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+-	if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+-		dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
+-			qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+-		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-			   iv_dma, ivsize, op_type, 0, 0);
++	qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
++	if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
++		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
++		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
++			qm_sg_ents, ivsize);
++		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++			   0, 0, 0, 0);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+-	/* allocate space for base edesc and link tables */
++	/* allocate space for base edesc, link tables and IV */
+ 	edesc = qi_cache_alloc(GFP_DMA | flags);
+ 	if (unlikely(!edesc)) {
+ 		dev_err(qidev, "could not allocate extended descriptor\n");
+-		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-			   iv_dma, ivsize, op_type, 0, 0);
++		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++			   0, 0, 0, 0);
++		return ERR_PTR(-ENOMEM);
++	}
++
++	/* Make sure IV is located in a DMAable area */
++	sg_table = &edesc->sgt[0];
++	iv = (u8 *)(sg_table + qm_sg_ents);
++	memcpy(iv, req->info, ivsize);
++
++	iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
++	if (dma_mapping_error(qidev, iv_dma)) {
++		dev_err(qidev, "unable to map IV\n");
++		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++			   0, 0, 0, 0);
++		qi_cache_free(edesc);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+ 	edesc->src_nents = src_nents;
+ 	edesc->dst_nents = dst_nents;
+ 	edesc->iv_dma = iv_dma;
+-	sg_table = &edesc->sgt[0];
+-	edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++	edesc->qm_sg_bytes = qm_sg_bytes;
+ 	edesc->drv_req.app_ctx = req;
+ 	edesc->drv_req.cbk = ablkcipher_done;
+ 	edesc->drv_req.drv_ctx = drv_ctx;
+ 
+-	if (!in_contig) {
+-		dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+-		sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+-	}
++	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+ 
+ 	if (mapped_dst_nents > 1)
+ 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+@@ -973,20 +985,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ 
+ 	fd_sgt = &edesc->drv_req.fd_sgt[0];
+ 
+-	if (!in_contig)
+-		dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
+-					  ivsize + req->nbytes, 0);
+-	else
+-		dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
+-				      0);
++	dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
++				  ivsize + req->nbytes, 0);
+ 
+ 	if (req->src == req->dst) {
+-		if (!in_contig)
+-			dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
+-					     sizeof(*sg_table), req->nbytes, 0);
+-		else
+-			dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
+-					 req->nbytes, 0);
++		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
++				     sizeof(*sg_table), req->nbytes, 0);
+ 	} else if (mapped_dst_nents > 1) {
+ 		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+ 				     sizeof(*sg_table), req->nbytes, 0);
+@@ -1010,10 +1014,10 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ 	int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
+ 	struct ablkcipher_edesc *edesc;
+ 	dma_addr_t iv_dma;
+-	bool out_contig;
++	u8 *iv;
+ 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ 	struct qm_sg_entry *sg_table, *fd_sgt;
+-	int dst_sg_idx, qm_sg_ents;
++	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+ 	struct caam_drv_ctx *drv_ctx;
+ 
+ 	drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
+@@ -1061,46 +1065,45 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ 		mapped_dst_nents = src_nents;
+ 	}
+ 
+-	iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
+-	if (dma_mapping_error(qidev, iv_dma)) {
+-		dev_err(qidev, "unable to map IV\n");
+-		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+-			   0, 0, 0, 0);
+-		return ERR_PTR(-ENOMEM);
+-	}
+-
+ 	qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
+ 	dst_sg_idx = qm_sg_ents;
+-	if (mapped_dst_nents == 1 &&
+-	    iv_dma + ivsize == sg_dma_address(req->dst)) {
+-		out_contig = true;
+-	} else {
+-		out_contig = false;
+-		qm_sg_ents += 1 + mapped_dst_nents;
+-	}
+ 
+-	if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
+-		dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
+-			qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
+-		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
++	qm_sg_ents += 1 + mapped_dst_nents;
++	qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
++	if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
++		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
++		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
++			qm_sg_ents, ivsize);
++		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++			   0, 0, 0, 0);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+-	/* allocate space for base edesc and link tables */
++	/* allocate space for base edesc, link tables and IV */
+ 	edesc = qi_cache_alloc(GFP_DMA | flags);
+ 	if (!edesc) {
+ 		dev_err(qidev, "could not allocate extended descriptor\n");
+-		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+-			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
++		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++			   0, 0, 0, 0);
++		return ERR_PTR(-ENOMEM);
++	}
++
++	/* Make sure IV is located in a DMAable area */
++	sg_table = &edesc->sgt[0];
++	iv = (u8 *)(sg_table + qm_sg_ents);
++	iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
++	if (dma_mapping_error(qidev, iv_dma)) {
++		dev_err(qidev, "unable to map IV\n");
++		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++			   0, 0, 0, 0);
++		qi_cache_free(edesc);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+ 	edesc->src_nents = src_nents;
+ 	edesc->dst_nents = dst_nents;
+ 	edesc->iv_dma = iv_dma;
+-	sg_table = &edesc->sgt[0];
+-	edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++	edesc->qm_sg_bytes = qm_sg_bytes;
+ 	edesc->drv_req.app_ctx = req;
+ 	edesc->drv_req.cbk = ablkcipher_done;
+ 	edesc->drv_req.drv_ctx = drv_ctx;
+@@ -1108,11 +1111,9 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ 	if (mapped_src_nents > 1)
+ 		sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
+ 
+-	if (!out_contig) {
+-		dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
+-		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+-				 dst_sg_idx + 1, 0);
+-	}
++	dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
++	sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
++			 0);
+ 
+ 	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
+ 					  DMA_TO_DEVICE);
+@@ -1133,13 +1134,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+ 		dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
+ 				 req->nbytes, 0);
+ 
+-	if (!out_contig)
+-		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+-				     sizeof(*sg_table), ivsize + req->nbytes,
+-				     0);
+-	else
+-		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
+-				 ivsize + req->nbytes, 0);
++	dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
++			     sizeof(*sg_table), ivsize + req->nbytes, 0);
+ 
+ 	return edesc;
+ }
+@@ -1149,6 +1145,7 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
+ 	struct ablkcipher_edesc *edesc;
+ 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ 	int ret;
+ 
+ 	if (unlikely(caam_congested))
+@@ -1159,6 +1156,14 @@ static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
+ 	if (IS_ERR(edesc))
+ 		return PTR_ERR(edesc);
+ 
++	/*
++	 * The crypto API expects us to set the IV (req->info) to the last
++	 * ciphertext block.
++	 */
++	if (!encrypt)
++		scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
++					 ivsize, ivsize, 0);
++
+ 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ 	if (!ret) {
+ 		ret = -EINPROGRESS;
+diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
+index 7a897209f181..7ff4a25440ac 100644
+--- a/drivers/crypto/caam/caampkc.c
++++ b/drivers/crypto/caam/caampkc.c
+@@ -66,7 +66,7 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
+ 	struct caam_rsa_key *key = &ctx->key;
+ 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
+ 	size_t p_sz = key->p_sz;
+-	size_t q_sz = key->p_sz;
++	size_t q_sz = key->q_sz;
+ 
+ 	dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
+ 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+@@ -83,7 +83,7 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
+ 	struct caam_rsa_key *key = &ctx->key;
+ 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
+ 	size_t p_sz = key->p_sz;
+-	size_t q_sz = key->p_sz;
++	size_t q_sz = key->q_sz;
+ 
+ 	dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
+ 	dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
+@@ -166,18 +166,71 @@ static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
+ 	akcipher_request_complete(req, err);
+ }
+ 
++static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
++					unsigned int nbytes,
++					unsigned int flags)
++{
++	struct sg_mapping_iter miter;
++	int lzeros, ents;
++	unsigned int len;
++	unsigned int tbytes = nbytes;
++	const u8 *buff;
++
++	ents = sg_nents_for_len(sgl, nbytes);
++	if (ents < 0)
++		return ents;
++
++	sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
++
++	lzeros = 0;
++	len = 0;
++	while (nbytes > 0) {
++		while (len && !*buff) {
++			lzeros++;
++			len--;
++			buff++;
++		}
++
++		if (len && *buff)
++			break;
++
++		sg_miter_next(&miter);
++		buff = miter.addr;
++		len = miter.length;
++
++		nbytes -= lzeros;
++		lzeros = 0;
++	}
++
++	miter.consumed = lzeros;
++	sg_miter_stop(&miter);
++	nbytes -= lzeros;
++
++	return tbytes - nbytes;
++}
++
+ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
+ 					 size_t desclen)
+ {
+ 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ 	struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ 	struct device *dev = ctx->dev;
++	struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
+ 	struct rsa_edesc *edesc;
+ 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ 		       GFP_KERNEL : GFP_ATOMIC;
++	int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
+ 	int sgc;
+ 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ 	int src_nents, dst_nents;
++	int lzeros;
++
++	lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
++	if (lzeros < 0)
++		return ERR_PTR(lzeros);
++
++	req->src_len -= lzeros;
++	req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
+ 
+ 	src_nents = sg_nents_for_len(req->src, req->src_len);
+ 	dst_nents = sg_nents_for_len(req->dst, req->dst_len);
+@@ -344,7 +397,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
+ 	struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
+ 	int sec4_sg_index = 0;
+ 	size_t p_sz = key->p_sz;
+-	size_t q_sz = key->p_sz;
++	size_t q_sz = key->q_sz;
+ 
+ 	pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
+ 	if (dma_mapping_error(dev, pdb->d_dma)) {
+@@ -419,7 +472,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
+ 	struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
+ 	int sec4_sg_index = 0;
+ 	size_t p_sz = key->p_sz;
+-	size_t q_sz = key->p_sz;
++	size_t q_sz = key->q_sz;
+ 
+ 	pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
+ 	if (dma_mapping_error(dev, pdb->p_dma)) {
+@@ -953,6 +1006,7 @@ static struct akcipher_alg caam_rsa = {
+ 	.max_size = caam_rsa_max_size,
+ 	.init = caam_rsa_init_tfm,
+ 	.exit = caam_rsa_exit_tfm,
++	.reqsize = sizeof(struct caam_rsa_req_ctx),
+ 	.base = {
+ 		.cra_name = "rsa",
+ 		.cra_driver_name = "rsa-caam",
+diff --git a/drivers/crypto/caam/caampkc.h b/drivers/crypto/caam/caampkc.h
+index fd145c46eae1..82645bcf8b27 100644
+--- a/drivers/crypto/caam/caampkc.h
++++ b/drivers/crypto/caam/caampkc.h
+@@ -95,6 +95,14 @@ struct caam_rsa_ctx {
+ 	struct device *dev;
+ };
+ 
++/**
++ * caam_rsa_req_ctx - per request context.
++ * @src: input scatterlist (stripped of leading zeros)
++ */
++struct caam_rsa_req_ctx {
++	struct scatterlist src[2];
++};
++
+ /**
+  * rsa_edesc - s/w-extended rsa descriptor
+  * @src_nents     : number of segments in input scatterlist
+diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h
+index dc451e0a43c5..58fb3ed6e644 100644
+--- a/drivers/crypto/cavium/zip/common.h
++++ b/drivers/crypto/cavium/zip/common.h
+@@ -46,8 +46,10 @@
+ #ifndef __COMMON_H__
+ #define __COMMON_H__
+ 
++#include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
++#include <linux/io.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
+@@ -149,6 +151,25 @@ struct zip_operation {
+ 	u32   sizeofzops;
+ };
+ 
++static inline int zip_poll_result(union zip_zres_s *result)
++{
++	int retries = 1000;
++
++	while (!result->s.compcode) {
++		if (!--retries) {
++			pr_err("ZIP ERR: request timed out");
++			return -ETIMEDOUT;
++		}
++		udelay(10);
++		/*
++		 * Force re-reading of compcode which is updated
++		 * by the ZIP coprocessor.
++		 */
++		rmb();
++	}
++	return 0;
++}
++
+ /* error messages */
+ #define zip_err(fmt, args...) pr_err("ZIP ERR:%s():%d: " \
+ 			      fmt "\n", __func__, __LINE__, ## args)
+diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
+index 8df4d26cf9d4..b92b6e7e100f 100644
+--- a/drivers/crypto/cavium/zip/zip_crypto.c
++++ b/drivers/crypto/cavium/zip/zip_crypto.c
+@@ -124,7 +124,7 @@ int zip_compress(const u8 *src, unsigned int slen,
+ 		 struct zip_kernel_ctx *zip_ctx)
+ {
+ 	struct zip_operation  *zip_ops   = NULL;
+-	struct zip_state      zip_state;
++	struct zip_state      *zip_state;
+ 	struct zip_device     *zip = NULL;
+ 	int ret;
+ 
+@@ -135,20 +135,23 @@ int zip_compress(const u8 *src, unsigned int slen,
+ 	if (!zip)
+ 		return -ENODEV;
+ 
+-	memset(&zip_state, 0, sizeof(struct zip_state));
++	zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
++	if (!zip_state)
++		return -ENOMEM;
++
+ 	zip_ops = &zip_ctx->zip_comp;
+ 
+ 	zip_ops->input_len  = slen;
+ 	zip_ops->output_len = *dlen;
+ 	memcpy(zip_ops->input, src, slen);
+ 
+-	ret = zip_deflate(zip_ops, &zip_state, zip);
++	ret = zip_deflate(zip_ops, zip_state, zip);
+ 
+ 	if (!ret) {
+ 		*dlen = zip_ops->output_len;
+ 		memcpy(dst, zip_ops->output, *dlen);
+ 	}
+-
++	kfree(zip_state);
+ 	return ret;
+ }
+ 
+@@ -157,7 +160,7 @@ int zip_decompress(const u8 *src, unsigned int slen,
+ 		   struct zip_kernel_ctx *zip_ctx)
+ {
+ 	struct zip_operation  *zip_ops   = NULL;
+-	struct zip_state      zip_state;
++	struct zip_state      *zip_state;
+ 	struct zip_device     *zip = NULL;
+ 	int ret;
+ 
+@@ -168,7 +171,10 @@ int zip_decompress(const u8 *src, unsigned int slen,
+ 	if (!zip)
+ 		return -ENODEV;
+ 
+-	memset(&zip_state, 0, sizeof(struct zip_state));
++	zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
++	if (!zip_state)
++		return -ENOMEM;
++
+ 	zip_ops = &zip_ctx->zip_decomp;
+ 	memcpy(zip_ops->input, src, slen);
+ 
+@@ -179,13 +185,13 @@ int zip_decompress(const u8 *src, unsigned int slen,
+ 	zip_ops->input_len  = slen;
+ 	zip_ops->output_len = *dlen;
+ 
+-	ret = zip_inflate(zip_ops, &zip_state, zip);
++	ret = zip_inflate(zip_ops, zip_state, zip);
+ 
+ 	if (!ret) {
+ 		*dlen = zip_ops->output_len;
+ 		memcpy(dst, zip_ops->output, *dlen);
+ 	}
+-
++	kfree(zip_state);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/crypto/cavium/zip/zip_deflate.c b/drivers/crypto/cavium/zip/zip_deflate.c
+index 9a944b8c1e29..d7133f857d67 100644
+--- a/drivers/crypto/cavium/zip/zip_deflate.c
++++ b/drivers/crypto/cavium/zip/zip_deflate.c
+@@ -129,8 +129,8 @@ int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s,
+ 	/* Stats update for compression requests submitted */
+ 	atomic64_inc(&zip_dev->stats.comp_req_submit);
+ 
+-	while (!result_ptr->s.compcode)
+-		continue;
++	/* Wait for completion or error */
++	zip_poll_result(result_ptr);
+ 
+ 	/* Stats update for compression requests completed */
+ 	atomic64_inc(&zip_dev->stats.comp_req_complete);
+diff --git a/drivers/crypto/cavium/zip/zip_inflate.c b/drivers/crypto/cavium/zip/zip_inflate.c
+index 50cbdd83dbf2..7e0d73e2f89e 100644
+--- a/drivers/crypto/cavium/zip/zip_inflate.c
++++ b/drivers/crypto/cavium/zip/zip_inflate.c
+@@ -143,8 +143,8 @@ int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s,
+ 	/* Decompression requests submitted stats update */
+ 	atomic64_inc(&zip_dev->stats.decomp_req_submit);
+ 
+-	while (!result_ptr->s.compcode)
+-		continue;
++	/* Wait for completion or error */
++	zip_poll_result(result_ptr);
+ 
+ 	/* Decompression requests completed stats update */
+ 	atomic64_inc(&zip_dev->stats.decomp_req_complete);
+diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
+index db1e241104ed..1a2a51f2262a 100644
+--- a/drivers/crypto/chelsio/chcr_ipsec.c
++++ b/drivers/crypto/chelsio/chcr_ipsec.c
+@@ -346,18 +346,23 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
+ 				struct net_device *dev,
+ 				void *pos)
+ {
++	struct cpl_tx_pkt_core *cpl;
++	struct sge_eth_txq *q;
+ 	struct adapter *adap;
+ 	struct port_info *pi;
+-	struct sge_eth_txq *q;
+-	struct cpl_tx_pkt_core *cpl;
+-	u64 cntrl = 0;
+ 	u32 ctrl0, qidx;
++	u64 cntrl = 0;
++	int left;
+ 
+ 	pi = netdev_priv(dev);
+ 	adap = pi->adapter;
+ 	qidx = skb->queue_mapping;
+ 	q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ 
++	left = (void *)q->q.stat - pos;
++	if (!left)
++		pos = q->q.desc;
++
+ 	cpl = (struct cpl_tx_pkt_core *)pos;
+ 
+ 	if (skb->ip_summed == CHECKSUM_PARTIAL)
+@@ -383,18 +388,17 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
+ 				void *pos,
+ 				struct ipsec_sa_entry *sa_entry)
+ {
+-	struct adapter *adap;
+-	struct port_info *pi;
+-	struct sge_eth_txq *q;
+-	unsigned int len, qidx;
+ 	struct _key_ctx *key_ctx;
+ 	int left, eoq, key_len;
++	struct sge_eth_txq *q;
++	struct adapter *adap;
++	struct port_info *pi;
++	unsigned int qidx;
+ 
+ 	pi = netdev_priv(dev);
+ 	adap = pi->adapter;
+ 	qidx = skb->queue_mapping;
+ 	q = &adap->sge.ethtxq[qidx + pi->first_qset];
+-	len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core);
+ 	key_len = sa_entry->kctx_len;
+ 
+ 	/* end of queue, reset pos to start of queue */
+@@ -412,19 +416,14 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
+ 	pos += sizeof(struct _key_ctx);
+ 	left -= sizeof(struct _key_ctx);
+ 
+-	if (likely(len <= left)) {
++	if (likely(key_len <= left)) {
+ 		memcpy(key_ctx->key, sa_entry->key, key_len);
+ 		pos += key_len;
+ 	} else {
+-		if (key_len <= left) {
+-			memcpy(pos, sa_entry->key, key_len);
+-			pos += key_len;
+-		} else {
+-			memcpy(pos, sa_entry->key, left);
+-			memcpy(q->q.desc, sa_entry->key + left,
+-			       key_len - left);
+-			pos = (u8 *)q->q.desc + (key_len - left);
+-		}
++		memcpy(pos, sa_entry->key, left);
++		memcpy(q->q.desc, sa_entry->key + left,
++		       key_len - left);
++		pos = (u8 *)q->q.desc + (key_len - left);
+ 	}
+ 	/* Copy CPL TX PKT XT */
+ 	pos = copy_cpltx_pktxt(skb, dev, pos);
+diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
+index 86b89ace836f..07235d0be29c 100644
+--- a/drivers/crypto/omap-sham.c
++++ b/drivers/crypto/omap-sham.c
+@@ -1082,7 +1082,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
+ 
+ 	if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
+ 		free_pages((unsigned long)sg_virt(ctx->sg),
+-			   get_order(ctx->sg->length));
++			   get_order(ctx->sg->length + ctx->bufcnt));
+ 
+ 	if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
+ 		kfree(ctx->sg);
+diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
+index 96072b9b55c4..d7316f7a3a69 100644
+--- a/drivers/crypto/vmx/aes.c
++++ b/drivers/crypto/vmx/aes.c
+@@ -48,8 +48,6 @@ static int p8_aes_init(struct crypto_tfm *tfm)
+ 		       alg, PTR_ERR(fallback));
+ 		return PTR_ERR(fallback);
+ 	}
+-	printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-	       crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+ 
+ 	crypto_cipher_set_flags(fallback,
+ 				crypto_cipher_get_flags((struct
+diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
+index 7394d35d5936..5285ece4f33a 100644
+--- a/drivers/crypto/vmx/aes_cbc.c
++++ b/drivers/crypto/vmx/aes_cbc.c
+@@ -52,9 +52,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
+ 		       alg, PTR_ERR(fallback));
+ 		return PTR_ERR(fallback);
+ 	}
+-	printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-		crypto_skcipher_driver_name(fallback));
+-
+ 
+ 	crypto_skcipher_set_flags(
+ 		fallback,
+diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
+index fc60d00a2e84..cd777c75291d 100644
+--- a/drivers/crypto/vmx/aes_ctr.c
++++ b/drivers/crypto/vmx/aes_ctr.c
+@@ -50,8 +50,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
+ 		       alg, PTR_ERR(fallback));
+ 		return PTR_ERR(fallback);
+ 	}
+-	printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-		crypto_skcipher_driver_name(fallback));
+ 
+ 	crypto_skcipher_set_flags(
+ 		fallback,
+diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
+index 8cd6e62e4c90..8bd9aff0f55f 100644
+--- a/drivers/crypto/vmx/aes_xts.c
++++ b/drivers/crypto/vmx/aes_xts.c
+@@ -53,8 +53,6 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm)
+ 			alg, PTR_ERR(fallback));
+ 		return PTR_ERR(fallback);
+ 	}
+-	printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-		crypto_skcipher_driver_name(fallback));
+ 
+ 	crypto_skcipher_set_flags(
+ 		fallback,
+diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
+index 27a94a119009..1c4b5b889fba 100644
+--- a/drivers/crypto/vmx/ghash.c
++++ b/drivers/crypto/vmx/ghash.c
+@@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
+ 		       alg, PTR_ERR(fallback));
+ 		return PTR_ERR(fallback);
+ 	}
+-	printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+-	       crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
+ 
+ 	crypto_shash_set_flags(fallback,
+ 			       crypto_shash_get_flags((struct crypto_shash
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 9d2688f3f961..cba80b0df188 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -123,6 +123,7 @@ static const struct xpad_device {
+ 	u8 mapping;
+ 	u8 xtype;
+ } xpad_device[] = {
++	{ 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
+ 	{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+ 	{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+ 	{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+@@ -408,6 +409,7 @@ static const signed short xpad_abs_triggers[] = {
+ 
+ static const struct usb_device_id xpad_table[] = {
+ 	{ USB_INTERFACE_INFO('X', 'B', 0) },	/* X-Box USB-IF not approved class */
++	XPAD_XBOX360_VENDOR(0x0079),		/* GPD Win 2 Controller */
+ 	XPAD_XBOX360_VENDOR(0x044f),		/* Thrustmaster X-Box 360 controllers */
+ 	XPAD_XBOX360_VENDOR(0x045e),		/* Microsoft X-Box 360 controllers */
+ 	XPAD_XBOXONE_VENDOR(0x045e),		/* Microsoft X-Box One controllers */
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index 75e757520ef0..93967c8139e7 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1262,6 +1262,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
+ 	{ "ELAN060B", 0 },
+ 	{ "ELAN060C", 0 },
+ 	{ "ELAN0611", 0 },
++	{ "ELAN0612", 0 },
+ 	{ "ELAN1000", 0 },
+ 	{ }
+ };
+diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
+index 9736c83dd418..f2d9c2c41885 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -933,6 +933,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
+ #ifdef CONFIG_ACPI
+ static const struct acpi_device_id goodix_acpi_match[] = {
+ 	{ "GDIX1001", 0 },
++	{ "GDIX1002", 0 },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
+diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
+index 9047c0a529b2..efd733472a35 100644
+--- a/drivers/misc/vmw_balloon.c
++++ b/drivers/misc/vmw_balloon.c
+@@ -576,15 +576,9 @@ static void vmballoon_pop(struct vmballoon *b)
+ 		}
+ 	}
+ 
+-	if (b->batch_page) {
+-		vunmap(b->batch_page);
+-		b->batch_page = NULL;
+-	}
+-
+-	if (b->page) {
+-		__free_page(b->page);
+-		b->page = NULL;
+-	}
++	/* Clearing the batch_page unconditionally has no adverse effect */
++	free_page((unsigned long)b->batch_page);
++	b->batch_page = NULL;
+ }
+ 
+ /*
+@@ -991,16 +985,13 @@ static const struct vmballoon_ops vmballoon_batched_ops = {
+ 
+ static bool vmballoon_init_batching(struct vmballoon *b)
+ {
+-	b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
+-	if (!b->page)
+-		return false;
++	struct page *page;
+ 
+-	b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
+-	if (!b->batch_page) {
+-		__free_page(b->page);
++	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
++	if (!page)
+ 		return false;
+-	}
+ 
++	b->batch_page = page_address(page);
+ 	return true;
+ }
+ 
+diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
+index e153e8b64bb8..d5553c47014f 100644
+--- a/drivers/nfc/pn533/usb.c
++++ b/drivers/nfc/pn533/usb.c
+@@ -62,6 +62,9 @@ struct pn533_usb_phy {
+ 	struct urb *out_urb;
+ 	struct urb *in_urb;
+ 
++	struct urb *ack_urb;
++	u8 *ack_buffer;
++
+ 	struct pn533 *priv;
+ };
+ 
+@@ -150,13 +153,16 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags)
+ 	struct pn533_usb_phy *phy = dev->phy;
+ 	static const u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
+ 	/* spec 7.1.1.3:  Preamble, SoPC (2), ACK Code (2), Postamble */
+-	int rc;
+ 
+-	phy->out_urb->transfer_buffer = (u8 *)ack;
+-	phy->out_urb->transfer_buffer_length = sizeof(ack);
+-	rc = usb_submit_urb(phy->out_urb, flags);
++	if (!phy->ack_buffer) {
++		phy->ack_buffer = kmemdup(ack, sizeof(ack), flags);
++		if (!phy->ack_buffer)
++			return -ENOMEM;
++	}
+ 
+-	return rc;
++	phy->ack_urb->transfer_buffer = phy->ack_buffer;
++	phy->ack_urb->transfer_buffer_length = sizeof(ack);
++	return usb_submit_urb(phy->ack_urb, flags);
+ }
+ 
+ static int pn533_usb_send_frame(struct pn533 *dev,
+@@ -375,26 +381,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
+ 	/* Power on th reader (CCID cmd) */
+ 	u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON,
+ 		      0, 0, 0, 0, 0, 0, 3, 0, 0};
++	char *buffer;
++	int transferred;
+ 	int rc;
+ 	void *cntx;
+ 	struct pn533_acr122_poweron_rdr_arg arg;
+ 
+ 	dev_dbg(&phy->udev->dev, "%s\n", __func__);
+ 
++	buffer = kmemdup(cmd, sizeof(cmd), GFP_KERNEL);
++	if (!buffer)
++		return -ENOMEM;
++
+ 	init_completion(&arg.done);
+ 	cntx = phy->in_urb->context;  /* backup context */
+ 
+ 	phy->in_urb->complete = pn533_acr122_poweron_rdr_resp;
+ 	phy->in_urb->context = &arg;
+ 
+-	phy->out_urb->transfer_buffer = cmd;
+-	phy->out_urb->transfer_buffer_length = sizeof(cmd);
+-
+ 	print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1,
+ 		       cmd, sizeof(cmd), false);
+ 
+-	rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
+-	if (rc) {
++	rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd),
++			  &transferred, 0);
++	kfree(buffer);
++	if (rc || (transferred != sizeof(cmd))) {
+ 		nfc_err(&phy->udev->dev,
+ 			"Reader power on cmd error %d\n", rc);
+ 		return rc;
+@@ -490,8 +501,9 @@ static int pn533_usb_probe(struct usb_interface *interface,
+ 
+ 	phy->in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ 	phy->out_urb = usb_alloc_urb(0, GFP_KERNEL);
++	phy->ack_urb = usb_alloc_urb(0, GFP_KERNEL);
+ 
+-	if (!phy->in_urb || !phy->out_urb)
++	if (!phy->in_urb || !phy->out_urb || !phy->ack_urb)
+ 		goto error;
+ 
+ 	usb_fill_bulk_urb(phy->in_urb, phy->udev,
+@@ -501,7 +513,9 @@ static int pn533_usb_probe(struct usb_interface *interface,
+ 	usb_fill_bulk_urb(phy->out_urb, phy->udev,
+ 			  usb_sndbulkpipe(phy->udev, out_endpoint),
+ 			  NULL, 0, pn533_send_complete, phy);
+-
++	usb_fill_bulk_urb(phy->ack_urb, phy->udev,
++			  usb_sndbulkpipe(phy->udev, out_endpoint),
++			  NULL, 0, pn533_send_complete, phy);
+ 
+ 	switch (id->driver_info) {
+ 	case PN533_DEVICE_STD:
+@@ -554,6 +568,7 @@ static int pn533_usb_probe(struct usb_interface *interface,
+ error:
+ 	usb_free_urb(phy->in_urb);
+ 	usb_free_urb(phy->out_urb);
++	usb_free_urb(phy->ack_urb);
+ 	usb_put_dev(phy->udev);
+ 	kfree(in_buf);
+ 
+@@ -573,10 +588,13 @@ static void pn533_usb_disconnect(struct usb_interface *interface)
+ 
+ 	usb_kill_urb(phy->in_urb);
+ 	usb_kill_urb(phy->out_urb);
++	usb_kill_urb(phy->ack_urb);
+ 
+ 	kfree(phy->in_urb->transfer_buffer);
+ 	usb_free_urb(phy->in_urb);
+ 	usb_free_urb(phy->out_urb);
++	usb_free_urb(phy->ack_urb);
++	kfree(phy->ack_buffer);
+ 
+ 	nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n");
+ }
+diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
+index 6c575244c0fb..af9b7005a2ba 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
++++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
+@@ -178,6 +178,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
+ 	struct device *dev = &qphy->phy->dev;
+ 	u8 *val;
+ 
++	/* efuse register is optional */
++	if (!qphy->cell)
++		return;
++
+ 	/*
+ 	 * Read efuse register having TUNE2 parameter's high nibble.
+ 	 * If efuse register shows value as 0x0, or if we fail to find
+diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
+index 57e0d8035b2e..b68700e58432 100644
+--- a/drivers/staging/android/ion/ion.c
++++ b/drivers/staging/android/ion/ion.c
+@@ -119,8 +119,11 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+ 
+ void ion_buffer_destroy(struct ion_buffer *buffer)
+ {
+-	if (WARN_ON(buffer->kmap_cnt > 0))
++	if (buffer->kmap_cnt > 0) {
++		pr_warn_once("%s: buffer still mapped in the kernel\n",
++			     __func__);
+ 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
++	}
+ 	buffer->heap->ops->free(buffer);
+ 	kfree(buffer);
+ }
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 57f6eba47f44..425d393658ed 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -1101,13 +1101,14 @@ static int omap8250_no_handle_irq(struct uart_port *port)
+ 	return 0;
+ }
+ 
++static const u8 omap4_habit = UART_ERRATA_CLOCK_DISABLE;
+ static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE;
+ static const u8 dra742_habit = UART_ERRATA_CLOCK_DISABLE;
+ 
+ static const struct of_device_id omap8250_dt_ids[] = {
+ 	{ .compatible = "ti,omap2-uart" },
+ 	{ .compatible = "ti,omap3-uart" },
+-	{ .compatible = "ti,omap4-uart" },
++	{ .compatible = "ti,omap4-uart", .data = &omap4_habit, },
+ 	{ .compatible = "ti,am3352-uart", .data = &am3352_habit, },
+ 	{ .compatible = "ti,am4372-uart", .data = &am3352_habit, },
+ 	{ .compatible = "ti,dra742-uart", .data = &dra742_habit, },
+@@ -1344,6 +1345,19 @@ static int omap8250_soft_reset(struct device *dev)
+ 	int sysc;
+ 	int syss;
+ 
++	/*
++	 * At least on omap4, unused uarts may not idle after reset without
++	 * a basic scr dma configuration even with no dma in use. The
++	 * module clkctrl status bits will be 1 instead of 3 blocking idle
++	 * for the whole clockdomain. The softreset below will clear scr,
++	 * and we restore it on resume so this is safe to do on all SoCs
++	 * needing omap8250_soft_reset() quirk. Do it in two writes as
++	 * recommended in the comment for omap8250_update_scr().
++	 */
++	serial_out(up, UART_OMAP_SCR, OMAP_UART_SCR_DMAMODE_1);
++	serial_out(up, UART_OMAP_SCR,
++		   OMAP_UART_SCR_DMAMODE_1 | OMAP_UART_SCR_DMAMODE_CTL);
++
+ 	sysc = serial_in(up, UART_OMAP_SYSC);
+ 
+ 	/* softreset the UART */
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 4b40a5b449ee..ebd33c0232e6 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1727,10 +1727,26 @@ static int pl011_allocate_irq(struct uart_amba_port *uap)
+  */
+ static void pl011_enable_interrupts(struct uart_amba_port *uap)
+ {
++	unsigned int i;
++
+ 	spin_lock_irq(&uap->port.lock);
+ 
+ 	/* Clear out any spuriously appearing RX interrupts */
+ 	pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
++
++	/*
++	 * RXIS is asserted only when the RX FIFO transitions from below
++	 * to above the trigger threshold.  If the RX FIFO is already
++	 * full to the threshold this can't happen and RXIS will now be
++	 * stuck off.  Drain the RX FIFO explicitly to fix this:
++	 */
++	for (i = 0; i < uap->fifosize * 2; ++i) {
++		if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE)
++			break;
++
++		pl011_read(uap, REG_DR);
++	}
++
+ 	uap->im = UART011_RTIM;
+ 	if (!pl011_dma_rx_running(uap))
+ 		uap->im |= UART011_RXIM;
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index e287fe8f10fc..55b3eff148b1 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -1757,7 +1757,6 @@ static int atmel_startup(struct uart_port *port)
+ {
+ 	struct platform_device *pdev = to_platform_device(port->dev);
+ 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+-	struct tty_struct *tty = port->state->port.tty;
+ 	int retval;
+ 
+ 	/*
+@@ -1772,8 +1771,8 @@ static int atmel_startup(struct uart_port *port)
+ 	 * Allocate the IRQ
+ 	 */
+ 	retval = request_irq(port->irq, atmel_interrupt,
+-			IRQF_SHARED | IRQF_COND_SUSPEND,
+-			tty ? tty->name : "atmel_serial", port);
++			     IRQF_SHARED | IRQF_COND_SUSPEND,
++			     dev_name(&pdev->dev), port);
+ 	if (retval) {
+ 		dev_err(port->dev, "atmel_startup - Can't get irq\n");
+ 		return retval;
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index 3f2f8c118ce0..64e96926f1ad 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -862,15 +862,12 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
+ 	dma->rx_conf.direction		= DMA_DEV_TO_MEM;
+ 	dma->rx_conf.src_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
+ 	dma->rx_conf.src_addr		= p->port.mapbase + S3C2410_URXH;
+-	dma->rx_conf.src_maxburst	= 16;
++	dma->rx_conf.src_maxburst	= 1;
+ 
+ 	dma->tx_conf.direction		= DMA_MEM_TO_DEV;
+ 	dma->tx_conf.dst_addr_width	= DMA_SLAVE_BUSWIDTH_1_BYTE;
+ 	dma->tx_conf.dst_addr		= p->port.mapbase + S3C2410_UTXH;
+-	if (dma_get_cache_alignment() >= 16)
+-		dma->tx_conf.dst_maxburst = 16;
+-	else
+-		dma->tx_conf.dst_maxburst = 1;
++	dma->tx_conf.dst_maxburst	= 1;
+ 
+ 	dma->rx_chan = dma_request_chan(p->port.dev, "rx");
+ 
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index ab757546c6db..b8382135a78e 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -2691,8 +2691,8 @@ static int sci_init_clocks(struct sci_port *sci_port, struct device *dev)
+ 			dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i],
+ 				PTR_ERR(clk));
+ 		else
+-			dev_dbg(dev, "clk %s is %pC rate %pCr\n", clk_names[i],
+-				clk, clk);
++			dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i],
++				clk, clk_get_rate(clk));
+ 		sci_port->clks[i] = IS_ERR(clk) ? NULL : clk;
+ 	}
+ 	return 0;
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index 0c11d40a12bc..7b137003c2be 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -940,7 +940,7 @@ int usb_set_isoch_delay(struct usb_device *dev)
+ 	return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 			USB_REQ_SET_ISOCH_DELAY,
+ 			USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+-			cpu_to_le16(dev->hub_delay), 0, NULL, 0,
++			dev->hub_delay, 0, NULL, 0,
+ 			USB_CTRL_SET_TIMEOUT);
+ }
+ 
+diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
+index d359efe06c76..9c7ed2539ff7 100644
+--- a/drivers/usb/gadget/function/f_printer.c
++++ b/drivers/usb/gadget/function/f_printer.c
+@@ -631,19 +631,19 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 			return -EAGAIN;
+ 		}
+ 
++		list_add(&req->list, &dev->tx_reqs_active);
++
+ 		/* here, we unlock, and only unlock, to avoid deadlock. */
+ 		spin_unlock(&dev->lock);
+ 		value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
+ 		spin_lock(&dev->lock);
+ 		if (value) {
++			list_del(&req->list);
+ 			list_add(&req->list, &dev->tx_reqs);
+ 			spin_unlock_irqrestore(&dev->lock, flags);
+ 			mutex_unlock(&dev->lock_printer_io);
+ 			return -EAGAIN;
+ 		}
+-
+-		list_add(&req->list, &dev->tx_reqs_active);
+-
+ 	}
+ 
+ 	spin_unlock_irqrestore(&dev->lock, flags);
+diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
+index 409cde4e6a51..5caf78bbbf7c 100644
+--- a/drivers/usb/gadget/udc/renesas_usb3.c
++++ b/drivers/usb/gadget/udc/renesas_usb3.c
+@@ -333,6 +333,7 @@ struct renesas_usb3 {
+ 	struct extcon_dev *extcon;
+ 	struct work_struct extcon_work;
+ 	struct phy *phy;
++	struct dentry *dentry;
+ 
+ 	struct renesas_usb3_ep *usb3_ep;
+ 	int num_usb3_eps;
+@@ -622,6 +623,13 @@ static void usb3_disconnect(struct renesas_usb3 *usb3)
+ 	usb3_usb2_pullup(usb3, 0);
+ 	usb3_clear_bit(usb3, USB30_CON_B3_CONNECT, USB3_USB30_CON);
+ 	usb3_reset_epc(usb3);
++	usb3_disable_irq_1(usb3, USB_INT_1_B2_RSUM | USB_INT_1_B3_PLLWKUP |
++			   USB_INT_1_B3_LUPSUCS | USB_INT_1_B3_DISABLE |
++			   USB_INT_1_SPEED | USB_INT_1_B3_WRMRST |
++			   USB_INT_1_B3_HOTRST | USB_INT_1_B2_SPND |
++			   USB_INT_1_B2_L1SPND | USB_INT_1_B2_USBRST);
++	usb3_clear_bit(usb3, USB_COM_CON_SPD_MODE, USB3_USB_COM_CON);
++	usb3_init_epc_registers(usb3);
+ 
+ 	if (usb3->driver)
+ 		usb3->driver->disconnect(&usb3->gadget);
+@@ -2393,8 +2401,12 @@ static void renesas_usb3_debugfs_init(struct renesas_usb3 *usb3,
+ 
+ 	file = debugfs_create_file("b_device", 0644, root, usb3,
+ 				   &renesas_usb3_b_device_fops);
+-	if (!file)
++	if (!file) {
+ 		dev_info(dev, "%s: Can't create debugfs mode\n", __func__);
++		debugfs_remove_recursive(root);
++	} else {
++		usb3->dentry = root;
++	}
+ }
+ 
+ /*------- platform_driver ------------------------------------------------*/
+@@ -2402,14 +2414,13 @@ static int renesas_usb3_remove(struct platform_device *pdev)
+ {
+ 	struct renesas_usb3 *usb3 = platform_get_drvdata(pdev);
+ 
++	debugfs_remove_recursive(usb3->dentry);
+ 	device_remove_file(&pdev->dev, &dev_attr_role);
+ 
+ 	usb_del_gadget_udc(&usb3->gadget);
+ 	renesas_usb3_dma_free_prd(usb3, &pdev->dev);
+ 
+ 	__renesas_usb3_ep_free_request(usb3->ep0_req);
+-	if (usb3->phy)
+-		phy_put(usb3->phy);
+ 	pm_runtime_disable(&pdev->dev);
+ 
+ 	return 0;
+@@ -2628,6 +2639,17 @@ static int renesas_usb3_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		goto err_alloc_prd;
+ 
++	/*
++	 * This is optional. So, if this driver cannot get a phy,
++	 * this driver will not handle a phy anymore.
++	 */
++	usb3->phy = devm_phy_optional_get(&pdev->dev, "usb");
++	if (IS_ERR(usb3->phy)) {
++		ret = PTR_ERR(usb3->phy);
++		goto err_add_udc;
++	}
++
++	pm_runtime_enable(&pdev->dev);
+ 	ret = usb_add_gadget_udc(&pdev->dev, &usb3->gadget);
+ 	if (ret < 0)
+ 		goto err_add_udc;
+@@ -2636,20 +2658,11 @@ static int renesas_usb3_probe(struct platform_device *pdev)
+ 	if (ret < 0)
+ 		goto err_dev_create;
+ 
+-	/*
+-	 * This is an optional. So, if this driver cannot get a phy,
+-	 * this driver will not handle a phy anymore.
+-	 */
+-	usb3->phy = devm_phy_get(&pdev->dev, "usb");
+-	if (IS_ERR(usb3->phy))
+-		usb3->phy = NULL;
+-
+ 	usb3->workaround_for_vbus = priv->workaround_for_vbus;
+ 
+ 	renesas_usb3_debugfs_init(usb3, &pdev->dev);
+ 
+ 	dev_info(&pdev->dev, "probed%s\n", usb3->phy ? " with phy" : "");
+-	pm_runtime_enable(usb3_to_dev(usb3));
+ 
+ 	return 0;
+ 
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 6034c39b67d1..9e9de5452860 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -836,6 +836,12 @@ static int uas_slave_configure(struct scsi_device *sdev)
+ 	if (devinfo->flags & US_FL_BROKEN_FUA)
+ 		sdev->broken_fua = 1;
+ 
++	/* UAS also needs to support FL_ALWAYS_SYNC */
++	if (devinfo->flags & US_FL_ALWAYS_SYNC) {
++		sdev->skip_ms_page_3f = 1;
++		sdev->skip_ms_page_8 = 1;
++		sdev->wce_default_on = 1;
++	}
+ 	scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
+ 	return 0;
+ }
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 747d3a9596d9..22fcfccf453a 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2321,6 +2321,15 @@ UNUSUAL_DEV(  0x4146, 0xba01, 0x0100, 0x0100,
+ 		"Micro Mini 1GB",
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
+ 
++/* "G-DRIVE" external HDD hangs on write without these.
++ * Patch submitted by Alexander Kappner <agk@godking.net>
++ */
++UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
++		"SimpleTech",
++		"External HDD",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_ALWAYS_SYNC),
++
+ /*
+  * Nick Bowler <nbowler@elliptictech.com>
+  * SCSI stack spams (otherwise harmless) error messages.
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index 38434d88954a..d0bdebd87ce3 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -107,3 +107,12 @@ UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
+ 		"External HDD",
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_REPORT_OPCODES),
++
++/* "G-DRIVE" external HDD hangs on write without these.
++ * Patch submitted by Alexander Kappner <agk@godking.net>
++ */
++UNUSUAL_DEV(0x4971, 0x8024, 0x0000, 0x9999,
++		"SimpleTech",
++		"External HDD",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_ALWAYS_SYNC),
+diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c
+index 2e990e0d917d..1e13d4e15831 100644
+--- a/drivers/usb/typec/typec_wcove.c
++++ b/drivers/usb/typec/typec_wcove.c
+@@ -202,6 +202,10 @@ static int wcove_init(struct tcpc_dev *tcpc)
+ 	struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+ 	int ret;
+ 
++	ret = regmap_write(wcove->regmap, USBC_CONTROL1, 0);
++	if (ret)
++		return ret;
++
+ 	/* Unmask everything */
+ 	ret = regmap_write(wcove->regmap, USBC_IRQMASK1, 0);
+ 	if (ret)
+@@ -285,8 +289,30 @@ static int wcove_get_cc(struct tcpc_dev *tcpc, enum typec_cc_status *cc1,
+ 
+ static int wcove_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
+ {
+-	/* XXX: Relying on the HW FSM to configure things correctly for now */
+-	return 0;
++	struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
++	unsigned int ctrl;
++
++	switch (cc) {
++	case TYPEC_CC_RD:
++		ctrl = USBC_CONTROL1_MODE_SNK;
++		break;
++	case TYPEC_CC_RP_DEF:
++		ctrl = USBC_CONTROL1_CURSRC_UA_80 | USBC_CONTROL1_MODE_SRC;
++		break;
++	case TYPEC_CC_RP_1_5:
++		ctrl = USBC_CONTROL1_CURSRC_UA_180 | USBC_CONTROL1_MODE_SRC;
++		break;
++	case TYPEC_CC_RP_3_0:
++		ctrl = USBC_CONTROL1_CURSRC_UA_330 | USBC_CONTROL1_MODE_SRC;
++		break;
++	case TYPEC_CC_OPEN:
++		ctrl = 0;
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	return regmap_write(wcove->regmap, USBC_CONTROL1, ctrl);
+ }
+ 
+ static int wcove_set_polarity(struct tcpc_dev *tcpc, enum typec_cc_polarity pol)
+diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
+index 48808388ec33..be37aec250c2 100644
+--- a/drivers/usb/usbip/vhci_sysfs.c
++++ b/drivers/usb/usbip/vhci_sysfs.c
+@@ -10,6 +10,9 @@
+ #include <linux/platform_device.h>
+ #include <linux/slab.h>
+ 
++/* Hardening for Spectre-v1 */
++#include <linux/nospec.h>
++
+ #include "usbip_common.h"
+ #include "vhci.h"
+ 
+@@ -205,16 +208,20 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
+ 	return 0;
+ }
+ 
+-static int valid_port(__u32 pdev_nr, __u32 rhport)
++static int valid_port(__u32 *pdev_nr, __u32 *rhport)
+ {
+-	if (pdev_nr >= vhci_num_controllers) {
+-		pr_err("pdev %u\n", pdev_nr);
++	if (*pdev_nr >= vhci_num_controllers) {
++		pr_err("pdev %u\n", *pdev_nr);
+ 		return 0;
+ 	}
+-	if (rhport >= VHCI_HC_PORTS) {
+-		pr_err("rhport %u\n", rhport);
++	*pdev_nr = array_index_nospec(*pdev_nr, vhci_num_controllers);
++
++	if (*rhport >= VHCI_HC_PORTS) {
++		pr_err("rhport %u\n", *rhport);
+ 		return 0;
+ 	}
++	*rhport = array_index_nospec(*rhport, VHCI_HC_PORTS);
++
+ 	return 1;
+ }
+ 
+@@ -232,7 +239,7 @@ static ssize_t detach_store(struct device *dev, struct device_attribute *attr,
+ 	pdev_nr = port_to_pdev_nr(port);
+ 	rhport = port_to_rhport(port);
+ 
+-	if (!valid_port(pdev_nr, rhport))
++	if (!valid_port(&pdev_nr, &rhport))
+ 		return -EINVAL;
+ 
+ 	hcd = platform_get_drvdata(vhcis[pdev_nr].pdev);
+@@ -258,7 +265,8 @@ static ssize_t detach_store(struct device *dev, struct device_attribute *attr,
+ }
+ static DEVICE_ATTR_WO(detach);
+ 
+-static int valid_args(__u32 pdev_nr, __u32 rhport, enum usb_device_speed speed)
++static int valid_args(__u32 *pdev_nr, __u32 *rhport,
++		      enum usb_device_speed speed)
+ {
+ 	if (!valid_port(pdev_nr, rhport)) {
+ 		return 0;
+@@ -322,7 +330,7 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
+ 			     sockfd, devid, speed);
+ 
+ 	/* check received parameters */
+-	if (!valid_args(pdev_nr, rhport, speed))
++	if (!valid_args(&pdev_nr, &rhport, speed))
+ 		return -EINVAL;
+ 
+ 	hcd = platform_get_drvdata(vhcis[pdev_nr].pdev);
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 7e2e7188e7f4..e62e52e8f141 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -437,6 +437,24 @@ static int verify_address_len(const void *p)
+ 	return 0;
+ }
+ 
++static inline int sadb_key_len(const struct sadb_key *key)
++{
++	int key_bytes = DIV_ROUND_UP(key->sadb_key_bits, 8);
++
++	return DIV_ROUND_UP(sizeof(struct sadb_key) + key_bytes,
++			    sizeof(uint64_t));
++}
++
++static int verify_key_len(const void *p)
++{
++	const struct sadb_key *key = p;
++
++	if (sadb_key_len(key) > key->sadb_key_len)
++		return -EINVAL;
++
++	return 0;
++}
++
+ static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx)
+ {
+ 	return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) +
+@@ -533,16 +551,25 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
+ 				return -EINVAL;
+ 			if (ext_hdrs[ext_type-1] != NULL)
+ 				return -EINVAL;
+-			if (ext_type == SADB_EXT_ADDRESS_SRC ||
+-			    ext_type == SADB_EXT_ADDRESS_DST ||
+-			    ext_type == SADB_EXT_ADDRESS_PROXY ||
+-			    ext_type == SADB_X_EXT_NAT_T_OA) {
++			switch (ext_type) {
++			case SADB_EXT_ADDRESS_SRC:
++			case SADB_EXT_ADDRESS_DST:
++			case SADB_EXT_ADDRESS_PROXY:
++			case SADB_X_EXT_NAT_T_OA:
+ 				if (verify_address_len(p))
+ 					return -EINVAL;
+-			}
+-			if (ext_type == SADB_X_EXT_SEC_CTX) {
++				break;
++			case SADB_X_EXT_SEC_CTX:
+ 				if (verify_sec_ctx_len(p))
+ 					return -EINVAL;
++				break;
++			case SADB_EXT_KEY_AUTH:
++			case SADB_EXT_KEY_ENCRYPT:
++				if (verify_key_len(p))
++					return -EINVAL;
++				break;
++			default:
++				break;
+ 			}
+ 			ext_hdrs[ext_type-1] = (void *) p;
+ 		}
+@@ -1104,14 +1131,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
+ 	key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
+ 	if (key != NULL &&
+ 	    sa->sadb_sa_auth != SADB_X_AALG_NULL &&
+-	    ((key->sadb_key_bits+7) / 8 == 0 ||
+-	     (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
++	    key->sadb_key_bits == 0)
+ 		return ERR_PTR(-EINVAL);
+ 	key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1];
+ 	if (key != NULL &&
+ 	    sa->sadb_sa_encrypt != SADB_EALG_NULL &&
+-	    ((key->sadb_key_bits+7) / 8 == 0 ||
+-	     (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
++	    key->sadb_key_bits == 0)
+ 		return ERR_PTR(-EINVAL);
+ 
+ 	x = xfrm_state_alloc(net);
+diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
+index 6ab274b14484..26f93c26db29 100644
+--- a/net/netfilter/nft_ct.c
++++ b/net/netfilter/nft_ct.c
+@@ -842,22 +842,26 @@ static int nft_ct_helper_obj_dump(struct sk_buff *skb,
+ 				  struct nft_object *obj, bool reset)
+ {
+ 	const struct nft_ct_helper_obj *priv = nft_obj_data(obj);
+-	const struct nf_conntrack_helper *helper = priv->helper4;
++	const struct nf_conntrack_helper *helper;
+ 	u16 family;
+ 
++	if (priv->helper4 && priv->helper6) {
++		family = NFPROTO_INET;
++		helper = priv->helper4;
++	} else if (priv->helper6) {
++		family = NFPROTO_IPV6;
++		helper = priv->helper6;
++	} else {
++		family = NFPROTO_IPV4;
++		helper = priv->helper4;
++	}
++
+ 	if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name))
+ 		return -1;
+ 
+ 	if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto))
+ 		return -1;
+ 
+-	if (priv->helper4 && priv->helper6)
+-		family = NFPROTO_INET;
+-	else if (priv->helper6)
+-		family = NFPROTO_IPV6;
+-	else
+-		family = NFPROTO_IPV4;
+-
+ 	if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family)))
+ 		return -1;
+ 


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-06-11 21:48 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-06-11 21:48 UTC (permalink / raw
  To: gentoo-commits

commit:     373445e16ce72726343d67d7fca5de454a00a4c3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Jun 11 21:47:57 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Jun 11 21:47:57 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=373445e1

Linux patch 4.16.15

 0000_README              |    4 +
 1014_linux-4.16.15.patch | 1544 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1548 insertions(+)

diff --git a/0000_README b/0000_README
index 5691b91..d817caf 100644
--- a/0000_README
+++ b/0000_README
@@ -99,6 +99,10 @@ Patch:  1013_linux-4.16.14.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.14
 
+Patch:  1014_linux-4.16.15.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.15
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1014_linux-4.16.15.patch b/1014_linux-4.16.15.patch
new file mode 100644
index 0000000..6820a0f
--- /dev/null
+++ b/1014_linux-4.16.15.patch
@@ -0,0 +1,1544 @@
+diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
+index 2a3278d5cf35..fa951b820b25 100644
+--- a/Documentation/networking/netdev-FAQ.txt
++++ b/Documentation/networking/netdev-FAQ.txt
+@@ -179,6 +179,15 @@ A: No.  See above answer.  In short, if you think it really belongs in
+    dash marker line as described in Documentation/process/submitting-patches.rst to
+    temporarily embed that information into the patch that you send.
+ 
++Q: Are all networking bug fixes backported to all stable releases?
++
++A: Due to capacity, Dave could only take care of the backports for the last
++   2 stable releases. For earlier stable releases, each stable branch maintainer
++   is supposed to take care of them. If you find any patch is missing from an
++   earlier stable branch, please notify stable@vger.kernel.org with either a
++   commit ID or a formal patch backported, and CC Dave and other relevant
++   networking developers.
++
+ Q: Someone said that the comment style and coding convention is different
+    for the networking content.  Is this true?
+ 
+diff --git a/Makefile b/Makefile
+index a043442e442f..e45c66b27241 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
+index e394799979a6..6d9b9453707c 100644
+--- a/drivers/gpu/drm/drm_file.c
++++ b/drivers/gpu/drm/drm_file.c
+@@ -212,6 +212,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
+ 		return -ENOMEM;
+ 
+ 	filp->private_data = priv;
++	filp->f_mode |= FMODE_UNSIGNED_OFFSET;
+ 	priv->filp = filp;
+ 	priv->pid = get_pid(task_pid(current));
+ 	priv->minor = minor;
+diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
+index 944a7f338099..1b25d8bc153a 100644
+--- a/drivers/isdn/hardware/eicon/diva.c
++++ b/drivers/isdn/hardware/eicon/diva.c
+@@ -388,10 +388,10 @@ void divasa_xdi_driver_unload(void)
+ **  Receive and process command from user mode utility
+ */
+ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
+-			    int length,
++			    int length, void *mptr,
+ 			    divas_xdi_copy_from_user_fn_t cp_fn)
+ {
+-	diva_xdi_um_cfg_cmd_t msg;
++	diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
+ 	diva_os_xdi_adapter_t *a = NULL;
+ 	diva_os_spin_lock_magic_t old_irql;
+ 	struct list_head *tmp;
+@@ -401,21 +401,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
+ 			 length, sizeof(diva_xdi_um_cfg_cmd_t)))
+ 			return NULL;
+ 	}
+-	if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) {
++	if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
+ 		DBG_ERR(("A: A(?) open, write error"))
+ 			return NULL;
+ 	}
+ 	diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
+ 	list_for_each(tmp, &adapter_queue) {
+ 		a = list_entry(tmp, diva_os_xdi_adapter_t, link);
+-		if (a->controller == (int)msg.adapter)
++		if (a->controller == (int)msg->adapter)
+ 			break;
+ 		a = NULL;
+ 	}
+ 	diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
+ 
+ 	if (!a) {
+-		DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter))
++		DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
+ 			}
+ 
+ 	return (a);
+@@ -437,8 +437,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle)
+ 
+ int
+ diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
+-	       int length, divas_xdi_copy_from_user_fn_t cp_fn)
++	       int length, void *mptr,
++	       divas_xdi_copy_from_user_fn_t cp_fn)
+ {
++	diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
+ 	diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
+ 	void *data;
+ 
+@@ -459,7 +461,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
+ 			return (-2);
+ 	}
+ 
+-	length = (*cp_fn) (os_handle, data, src, length);
++	if (msg) {
++		*(diva_xdi_um_cfg_cmd_t *)data = *msg;
++		length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
++				   src + sizeof(*msg), length - sizeof(*msg));
++	} else {
++		length = (*cp_fn) (os_handle, data, src, length);
++	}
+ 	if (length > 0) {
+ 		if ((*(a->interface.cmd_proc))
+ 		    (a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
+diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h
+index b067032093a8..1ad76650fbf9 100644
+--- a/drivers/isdn/hardware/eicon/diva.h
++++ b/drivers/isdn/hardware/eicon/diva.h
+@@ -20,10 +20,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
+ 		  int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
+ 
+ int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
+-		   int length, divas_xdi_copy_from_user_fn_t cp_fn);
++		   int length, void *msg,
++		   divas_xdi_copy_from_user_fn_t cp_fn);
+ 
+ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
+-			    int length,
++			    int length, void *msg,
+ 			    divas_xdi_copy_from_user_fn_t cp_fn);
+ 
+ void diva_xdi_close_adapter(void *adapter, void *os_handle);
+diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
+index b9980e84f9db..b6a3950b2564 100644
+--- a/drivers/isdn/hardware/eicon/divasmain.c
++++ b/drivers/isdn/hardware/eicon/divasmain.c
+@@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file)
+ static ssize_t divas_write(struct file *file, const char __user *buf,
+ 			   size_t count, loff_t *ppos)
+ {
++	diva_xdi_um_cfg_cmd_t msg;
+ 	int ret = -EINVAL;
+ 
+ 	if (!file->private_data) {
+ 		file->private_data = diva_xdi_open_adapter(file, buf,
+-							   count,
++							   count, &msg,
+ 							   xdi_copy_from_user);
+-	}
+-	if (!file->private_data) {
+-		return (-ENODEV);
++		if (!file->private_data)
++			return (-ENODEV);
++		ret = diva_xdi_write(file->private_data, file,
++				     buf, count, &msg, xdi_copy_from_user);
++	} else {
++		ret = diva_xdi_write(file->private_data, file,
++				     buf, count, NULL, xdi_copy_from_user);
+ 	}
+ 
+-	ret = diva_xdi_write(file->private_data, file,
+-			     buf, count, xdi_copy_from_user);
+ 	switch (ret) {
+ 	case -1:		/* Message should be removed from rx mailbox first */
+ 		ret = -EBUSY;
+@@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf,
+ static ssize_t divas_read(struct file *file, char __user *buf,
+ 			  size_t count, loff_t *ppos)
+ {
++	diva_xdi_um_cfg_cmd_t msg;
+ 	int ret = -EINVAL;
+ 
+ 	if (!file->private_data) {
+ 		file->private_data = diva_xdi_open_adapter(file, buf,
+-							   count,
++							   count, &msg,
+ 							   xdi_copy_from_user);
+ 	}
+ 	if (!file->private_data) {
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 63e02a54d537..06e8e7a81994 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -684,7 +684,8 @@ static int b53_switch_reset(struct b53_device *dev)
+ 	 * still use this driver as a library and need to perform the reset
+ 	 * earlier.
+ 	 */
+-	if (dev->chip_id == BCM58XX_DEVICE_ID) {
++	if (dev->chip_id == BCM58XX_DEVICE_ID ||
++	    dev->chip_id == BCM583XX_DEVICE_ID) {
+ 		b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, &reg);
+ 		reg |= SW_RST | EN_SW_RST | EN_CH_RST;
+ 		b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg);
+@@ -1867,6 +1868,18 @@ static const struct b53_chip_data b53_switch_chips[] = {
+ 		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ 		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ 	},
++	{
++		.chip_id = BCM583XX_DEVICE_ID,
++		.dev_name = "BCM583xx/11360",
++		.vlans = 4096,
++		.enabled_ports = 0x103,
++		.arl_entries = 4,
++		.cpu_port = B53_CPU_PORT,
++		.vta_regs = B53_VTA_REGS,
++		.duplex_reg = B53_DUPLEX_STAT_GE,
++		.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
++		.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
++	},
+ 	{
+ 		.chip_id = BCM7445_DEVICE_ID,
+ 		.dev_name = "BCM7445",
+diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
+index d954cf36ecd8..f91acda37572 100644
+--- a/drivers/net/dsa/b53/b53_priv.h
++++ b/drivers/net/dsa/b53/b53_priv.h
+@@ -61,6 +61,7 @@ enum {
+ 	BCM53018_DEVICE_ID = 0x53018,
+ 	BCM53019_DEVICE_ID = 0x53019,
+ 	BCM58XX_DEVICE_ID = 0x5800,
++	BCM583XX_DEVICE_ID = 0x58300,
+ 	BCM7445_DEVICE_ID = 0x7445,
+ 	BCM7278_DEVICE_ID = 0x7278,
+ };
+@@ -180,6 +181,7 @@ static inline int is5301x(struct b53_device *dev)
+ static inline int is58xx(struct b53_device *dev)
+ {
+ 	return dev->chip_id == BCM58XX_DEVICE_ID ||
++		dev->chip_id == BCM583XX_DEVICE_ID ||
+ 		dev->chip_id == BCM7445_DEVICE_ID ||
+ 		dev->chip_id == BCM7278_DEVICE_ID;
+ }
+diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
+index c37ffd1b6833..8247481eaa06 100644
+--- a/drivers/net/dsa/b53/b53_srab.c
++++ b/drivers/net/dsa/b53/b53_srab.c
+@@ -364,7 +364,7 @@ static const struct of_device_id b53_srab_of_match[] = {
+ 	{ .compatible = "brcm,bcm53018-srab" },
+ 	{ .compatible = "brcm,bcm53019-srab" },
+ 	{ .compatible = "brcm,bcm5301x-srab" },
+-	{ .compatible = "brcm,bcm11360-srab", .data = (void *)BCM58XX_DEVICE_ID },
++	{ .compatible = "brcm,bcm11360-srab", .data = (void *)BCM583XX_DEVICE_ID },
+ 	{ .compatible = "brcm,bcm58522-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ 	{ .compatible = "brcm,bcm58525-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ 	{ .compatible = "brcm,bcm58535-srab", .data = (void *)BCM58XX_DEVICE_ID },
+@@ -372,7 +372,7 @@ static const struct of_device_id b53_srab_of_match[] = {
+ 	{ .compatible = "brcm,bcm58623-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ 	{ .compatible = "brcm,bcm58625-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ 	{ .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID },
+-	{ .compatible = "brcm,cygnus-srab", .data = (void *)BCM58XX_DEVICE_ID },
++	{ .compatible = "brcm,cygnus-srab", .data = (void *)BCM583XX_DEVICE_ID },
+ 	{ .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID },
+ 	{ /* sentinel */ },
+ };
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+index 7dd83d0ef0a0..22243c480a05 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+@@ -588,7 +588,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
+ 	 * slots for the highest priority.
+ 	 */
+ 	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
+-		   NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
++		   NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ 	/* Mapping between the CREDIT_WEIGHT registers and actual client
+ 	 * numbers
+ 	 */
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index b91109d967fa..3179599dd797 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -2704,11 +2704,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	pci_set_master(pdev);
+ 
+ 	/* Query PCI controller on system for DMA addressing
+-	 * limitation for the device.  Try 64-bit first, and
++	 * limitation for the device.  Try 47-bit first, and
+ 	 * fail to 32-bit.
+ 	 */
+ 
+-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
++	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
+ 	if (err) {
+ 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ 		if (err) {
+@@ -2722,10 +2722,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 			goto err_out_release_regions;
+ 		}
+ 	} else {
+-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
++		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
+ 		if (err) {
+ 			dev_err(dev, "Unable to obtain %u-bit DMA "
+-				"for consistent allocations, aborting\n", 64);
++				"for consistent allocations, aborting\n", 47);
+ 			goto err_out_release_regions;
+ 		}
+ 		using_dac = 1;
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 5774fb6f8aa0..4d764c3ee155 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -3309,7 +3309,9 @@ void be_detect_error(struct be_adapter *adapter)
+ 				if ((val & POST_STAGE_FAT_LOG_START)
+ 				     != POST_STAGE_FAT_LOG_START &&
+ 				    (val & POST_STAGE_ARMFW_UE)
+-				     != POST_STAGE_ARMFW_UE)
++				     != POST_STAGE_ARMFW_UE &&
++				    (val & POST_STAGE_RECOVERABLE_ERR)
++				     != POST_STAGE_RECOVERABLE_ERR)
+ 					return;
+ 			}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
+index 3aaf4bad6c5a..427e7a31862c 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
++++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
+@@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
+ 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+ 	struct mlx4_qp *qp;
+ 
+-	spin_lock(&qp_table->lock);
++	spin_lock_irq(&qp_table->lock);
+ 
+ 	qp = __mlx4_qp_lookup(dev, qpn);
+ 
+-	spin_unlock(&qp_table->lock);
++	spin_unlock_irq(&qp_table->lock);
+ 	return qp;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index e5c3ab46a24a..f63b317f7b32 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -635,6 +635,45 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
+ 	return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
+ }
+ 
++static __be32 mlx5e_get_fcs(struct sk_buff *skb)
++{
++	int last_frag_sz, bytes_in_prev, nr_frags;
++	u8 *fcs_p1, *fcs_p2;
++	skb_frag_t *last_frag;
++	__be32 fcs_bytes;
++
++	if (!skb_is_nonlinear(skb))
++		return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
++
++	nr_frags = skb_shinfo(skb)->nr_frags;
++	last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
++	last_frag_sz = skb_frag_size(last_frag);
++
++	/* If all FCS data is in last frag */
++	if (last_frag_sz >= ETH_FCS_LEN)
++		return *(__be32 *)(skb_frag_address(last_frag) +
++				   last_frag_sz - ETH_FCS_LEN);
++
++	fcs_p2 = (u8 *)skb_frag_address(last_frag);
++	bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
++
++	/* Find where the other part of the FCS is - Linear or another frag */
++	if (nr_frags == 1) {
++		fcs_p1 = skb_tail_pointer(skb);
++	} else {
++		skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
++
++		fcs_p1 = skb_frag_address(prev_frag) +
++			    skb_frag_size(prev_frag);
++	}
++	fcs_p1 -= bytes_in_prev;
++
++	memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
++	memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
++
++	return fcs_bytes;
++}
++
+ static inline void mlx5e_handle_csum(struct net_device *netdev,
+ 				     struct mlx5_cqe64 *cqe,
+ 				     struct mlx5e_rq *rq,
+@@ -663,6 +702,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
+ 			skb->csum = csum_partial(skb->data + ETH_HLEN,
+ 						 network_depth - ETH_HLEN,
+ 						 skb->csum);
++		if (unlikely(netdev->features & NETIF_F_RXFCS))
++			skb->csum = csum_add(skb->csum,
++					     (__force __wsum)mlx5e_get_fcs(skb));
+ 		rq->stats.csum_complete++;
+ 		return;
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index bf400c75fcc8..c54762729bdf 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -4870,6 +4870,11 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
+ 				       "spectrum: Can not put a VLAN on an OVS port");
+ 			return -EINVAL;
+ 		}
++		if (is_vlan_dev(upper_dev) &&
++		    vlan_dev_vlan_id(upper_dev) == 1) {
++			NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic");
++			return -EINVAL;
++		}
+ 		break;
+ 	case NETDEV_CHANGEUPPER:
+ 		upper_dev = info->upper_dev;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+index 00f41c145d4d..820b226d6ff8 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+@@ -77,7 +77,7 @@
+ #define ILT_CFG_REG(cli, reg)	PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
+ 
+ /* ILT entry structure */
+-#define ILT_ENTRY_PHY_ADDR_MASK		0x000FFFFFFFFFFFULL
++#define ILT_ENTRY_PHY_ADDR_MASK		(~0ULL >> 12)
+ #define ILT_ENTRY_PHY_ADDR_SHIFT	0
+ #define ILT_ENTRY_VALID_MASK		0x1ULL
+ #define ILT_ENTRY_VALID_SHIFT		52
+diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
+index f4c0b02ddad8..59fbf74dcada 100644
+--- a/drivers/net/ethernet/socionext/netsec.c
++++ b/drivers/net/ethernet/socionext/netsec.c
+@@ -1674,8 +1674,8 @@ static int netsec_probe(struct platform_device *pdev)
+ 	if (ret)
+ 		goto unreg_napi;
+ 
+-	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
+-		dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n");
++	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
++		dev_warn(&pdev->dev, "Failed to set DMA mask\n");
+ 
+ 	ret = register_netdev(ndev);
+ 	if (ret) {
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index abceea802ea1..38828ab77eb9 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -1873,7 +1873,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
+ 	if (IS_ERR(priv->txchan)) {
+ 		dev_err(&pdev->dev, "error initializing tx dma channel\n");
+ 		rc = PTR_ERR(priv->txchan);
+-		goto no_cpdma_chan;
++		goto err_free_dma;
+ 	}
+ 
+ 	priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
+@@ -1881,14 +1881,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
+ 	if (IS_ERR(priv->rxchan)) {
+ 		dev_err(&pdev->dev, "error initializing rx dma channel\n");
+ 		rc = PTR_ERR(priv->rxchan);
+-		goto no_cpdma_chan;
++		goto err_free_txchan;
+ 	}
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ 	if (!res) {
+ 		dev_err(&pdev->dev, "error getting irq res\n");
+ 		rc = -ENOENT;
+-		goto no_cpdma_chan;
++		goto err_free_rxchan;
+ 	}
+ 	ndev->irq = res->start;
+ 
+@@ -1914,7 +1914,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
+ 		pm_runtime_put_noidle(&pdev->dev);
+ 		dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
+ 			__func__, rc);
+-		goto no_cpdma_chan;
++		goto err_napi_del;
+ 	}
+ 
+ 	/* register the network device */
+@@ -1924,7 +1924,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
+ 		dev_err(&pdev->dev, "error in register_netdev\n");
+ 		rc = -ENODEV;
+ 		pm_runtime_put(&pdev->dev);
+-		goto no_cpdma_chan;
++		goto err_napi_del;
+ 	}
+ 
+ 
+@@ -1937,11 +1937,13 @@ static int davinci_emac_probe(struct platform_device *pdev)
+ 
+ 	return 0;
+ 
+-no_cpdma_chan:
+-	if (priv->txchan)
+-		cpdma_chan_destroy(priv->txchan);
+-	if (priv->rxchan)
+-		cpdma_chan_destroy(priv->rxchan);
++err_napi_del:
++	netif_napi_del(&priv->napi);
++err_free_rxchan:
++	cpdma_chan_destroy(priv->rxchan);
++err_free_txchan:
++	cpdma_chan_destroy(priv->txchan);
++err_free_dma:
+ 	cpdma_ctlr_destroy(priv->dma);
+ no_pdata:
+ 	if (of_phy_is_fixed_link(np))
+diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
+index 6838129839ca..e757b09f1889 100644
+--- a/drivers/net/phy/bcm-cygnus.c
++++ b/drivers/net/phy/bcm-cygnus.c
+@@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev)
+ 		return rc;
+ 
+ 	/* make rcal=100, since rdb default is 000 */
+-	rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10);
++	rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10);
+ 	if (rc < 0)
+ 		return rc;
+ 
+ 	/* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
+-	rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10);
++	rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10);
+ 	if (rc < 0)
+ 		return rc;
+ 
+ 	/* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
+-	rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00);
++	rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
+index 5ad130c3da43..d5e0833d69b9 100644
+--- a/drivers/net/phy/bcm-phy-lib.c
++++ b/drivers/net/phy/bcm-phy-lib.c
+@@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
+ 	/* The register must be written to both the Shadow Register Select and
+ 	 * the Shadow Read Register Selector
+ 	 */
+-	phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum |
++	phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK |
+ 		  regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT);
+ 	return phy_read(phydev, MII_BCM54XX_AUX_CTL);
+ }
+diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
+index 7c73808cbbde..81cceaa412fe 100644
+--- a/drivers/net/phy/bcm-phy-lib.h
++++ b/drivers/net/phy/bcm-phy-lib.h
+@@ -14,11 +14,18 @@
+ #ifndef _LINUX_BCM_PHY_LIB_H
+ #define _LINUX_BCM_PHY_LIB_H
+ 
++#include <linux/brcmphy.h>
+ #include <linux/phy.h>
+ 
+ int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
+ int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
+ 
++static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
++					u16 reg, u16 val)
++{
++	return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
++}
++
+ int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
+ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
+ 
+diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
+index 421feb8f92fe..90eb3e12a4f8 100644
+--- a/drivers/net/phy/bcm7xxx.c
++++ b/drivers/net/phy/bcm7xxx.c
+@@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv {
+ static void r_rc_cal_reset(struct phy_device *phydev)
+ {
+ 	/* Reset R_CAL/RC_CAL Engine */
+-	bcm_phy_write_exp(phydev, 0x00b0, 0x0010);
++	bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
+ 
+ 	/* Disable Reset R_AL/RC_CAL Engine */
+-	bcm_phy_write_exp(phydev, 0x00b0, 0x0000);
++	bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
+ }
+ 
+ static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 3175f7410baf..8f3863cd0094 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1004,7 +1004,8 @@ static void team_port_disable(struct team *team,
+ static void __team_compute_features(struct team *team)
+ {
+ 	struct team_port *port;
+-	u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
++	netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
++					  NETIF_F_ALL_FOR_ALL;
+ 	netdev_features_t enc_features  = TEAM_ENC_FEATURES;
+ 	unsigned short max_hard_header_len = ETH_HLEN;
+ 	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index ffae19714ffd..24e645c86ae7 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1632,7 +1632,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+ 	else
+ 		*skb_xdp = 0;
+ 
+-	preempt_disable();
++	local_bh_disable();
+ 	rcu_read_lock();
+ 	xdp_prog = rcu_dereference(tun->xdp_prog);
+ 	if (xdp_prog && !*skb_xdp) {
+@@ -1657,7 +1657,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+ 			if (err)
+ 				goto err_redirect;
+ 			rcu_read_unlock();
+-			preempt_enable();
++			local_bh_enable();
+ 			return NULL;
+ 		case XDP_TX:
+ 			xdp_xmit = true;
+@@ -1679,7 +1679,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+ 	skb = build_skb(buf, buflen);
+ 	if (!skb) {
+ 		rcu_read_unlock();
+-		preempt_enable();
++		local_bh_enable();
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+@@ -1692,12 +1692,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+ 		skb->dev = tun->dev;
+ 		generic_xdp_tx(skb, xdp_prog);
+ 		rcu_read_unlock();
+-		preempt_enable();
++		local_bh_enable();
+ 		return NULL;
+ 	}
+ 
+ 	rcu_read_unlock();
+-	preempt_enable();
++	local_bh_enable();
+ 
+ 	return skb;
+ 
+@@ -1705,7 +1705,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+ 	put_page(alloc_frag->page);
+ err_xdp:
+ 	rcu_read_unlock();
+-	preempt_enable();
++	local_bh_enable();
+ 	this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ 	return NULL;
+ }
+@@ -1901,16 +1901,19 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 		struct bpf_prog *xdp_prog;
+ 		int ret;
+ 
++		local_bh_disable();
+ 		rcu_read_lock();
+ 		xdp_prog = rcu_dereference(tun->xdp_prog);
+ 		if (xdp_prog) {
+ 			ret = do_xdp_generic(xdp_prog, skb);
+ 			if (ret != XDP_PASS) {
+ 				rcu_read_unlock();
++				local_bh_enable();
+ 				return total_len;
+ 			}
+ 		}
+ 		rcu_read_unlock();
++		local_bh_enable();
+ 	}
+ 
+ 	rcu_read_lock();
+diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
+index 7220cd620717..0362acd5cdca 100644
+--- a/drivers/net/usb/cdc_mbim.c
++++ b/drivers/net/usb/cdc_mbim.c
+@@ -609,7 +609,7 @@ static const struct driver_info cdc_mbim_info_ndp_to_end = {
+  */
+ static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = {
+ 	.description = "CDC MBIM",
+-	.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
++	.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
+ 	.bind = cdc_mbim_bind,
+ 	.unbind = cdc_mbim_unbind,
+ 	.manage_power = cdc_mbim_manage_power,
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 16b0c7db431b..8911e3466e61 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -707,6 +707,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ 		void *data;
+ 		u32 act;
+ 
++		/* Transient failure which in theory could occur if
++		 * in-flight packets from before XDP was enabled reach
++		 * the receive path after XDP is loaded.
++		 */
++		if (unlikely(hdr->hdr.gso_type))
++			goto err_xdp;
++
+ 		/* This happens when rx buffer size is underestimated
+ 		 * or headroom is not enough because of the buffer
+ 		 * was refilled before XDP is set. This should only
+@@ -727,14 +734,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ 			xdp_page = page;
+ 		}
+ 
+-		/* Transient failure which in theory could occur if
+-		 * in-flight packets from before XDP was enabled reach
+-		 * the receive path after XDP is loaded. In practice I
+-		 * was not able to create this condition.
+-		 */
+-		if (unlikely(hdr->hdr.gso_type))
+-			goto err_xdp;
+-
+ 		/* Allow consuming headroom but reserve enough space to push
+ 		 * the descriptor on if we get an XDP_TX return code.
+ 		 */
+@@ -775,7 +774,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ 			}
+ 			*xdp_xmit = true;
+ 			if (unlikely(xdp_page != page))
+-				goto err_xdp;
++				put_page(page);
+ 			rcu_read_unlock();
+ 			goto xdp_xmit;
+ 		case XDP_REDIRECT:
+@@ -787,7 +786,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ 			}
+ 			*xdp_xmit = true;
+ 			if (unlikely(xdp_page != page))
+-				goto err_xdp;
++				put_page(page);
+ 			rcu_read_unlock();
+ 			goto xdp_xmit;
+ 		default:
+@@ -875,7 +874,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
+ 	rcu_read_unlock();
+ err_skb:
+ 	put_page(page);
+-	while (--num_buf) {
++	while (num_buf-- > 1) {
+ 		buf = virtqueue_get_buf(rq->vq, &len);
+ 		if (unlikely(!buf)) {
+ 			pr_debug("%s: rx error: %d buffers missing\n",
+diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
+index cb694d2a1228..e826933f71da 100644
+--- a/drivers/pci/host/pci-hyperv.c
++++ b/drivers/pci/host/pci-hyperv.c
+@@ -556,6 +556,26 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev,
+ static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
+ static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
+ 
++/*
++ * There is no good way to get notified from vmbus_onoffer_rescind(),
++ * so let's use polling here, since this is not a hot path.
++ */
++static int wait_for_response(struct hv_device *hdev,
++			     struct completion *comp)
++{
++	while (true) {
++		if (hdev->channel->rescind) {
++			dev_warn_once(&hdev->device, "The device is gone.\n");
++			return -ENODEV;
++		}
++
++		if (wait_for_completion_timeout(comp, HZ / 10))
++			break;
++	}
++
++	return 0;
++}
++
+ /**
+  * devfn_to_wslot() - Convert from Linux PCI slot to Windows
+  * @devfn:	The Linux representation of PCI slot
+@@ -1568,7 +1588,8 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
+ 	if (ret)
+ 		goto error;
+ 
+-	wait_for_completion(&comp_pkt.host_event);
++	if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
++		goto error;
+ 
+ 	hpdev->desc = *desc;
+ 	refcount_set(&hpdev->refs, 1);
+@@ -2061,15 +2082,16 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
+ 				sizeof(struct pci_version_request),
+ 				(unsigned long)pkt, VM_PKT_DATA_INBAND,
+ 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
++		if (!ret)
++			ret = wait_for_response(hdev, &comp_pkt.host_event);
++
+ 		if (ret) {
+ 			dev_err(&hdev->device,
+-				"PCI Pass-through VSP failed sending version reqquest: %#x",
++				"PCI Pass-through VSP failed to request version: %d",
+ 				ret);
+ 			goto exit;
+ 		}
+ 
+-		wait_for_completion(&comp_pkt.host_event);
+-
+ 		if (comp_pkt.completion_status >= 0) {
+ 			pci_protocol_version = pci_protocol_versions[i];
+ 			dev_info(&hdev->device,
+@@ -2278,11 +2300,12 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
+ 	ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
+ 			       (unsigned long)pkt, VM_PKT_DATA_INBAND,
+ 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
++	if (!ret)
++		ret = wait_for_response(hdev, &comp_pkt.host_event);
++
+ 	if (ret)
+ 		goto exit;
+ 
+-	wait_for_completion(&comp_pkt.host_event);
+-
+ 	if (comp_pkt.completion_status < 0) {
+ 		dev_err(&hdev->device,
+ 			"PCI Pass-through VSP failed D0 Entry with status %x\n",
+@@ -2322,11 +2345,10 @@ static int hv_pci_query_relations(struct hv_device *hdev)
+ 
+ 	ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
+ 			       0, VM_PKT_DATA_INBAND, 0);
+-	if (ret)
+-		return ret;
++	if (!ret)
++		ret = wait_for_response(hdev, &comp);
+ 
+-	wait_for_completion(&comp);
+-	return 0;
++	return ret;
+ }
+ 
+ /**
+@@ -2396,11 +2418,11 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
+ 				size_res, (unsigned long)pkt,
+ 				VM_PKT_DATA_INBAND,
+ 				VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
++		if (!ret)
++			ret = wait_for_response(hdev, &comp_pkt.host_event);
+ 		if (ret)
+ 			break;
+ 
+-		wait_for_completion(&comp_pkt.host_event);
+-
+ 		if (comp_pkt.completion_status < 0) {
+ 			ret = -EPROTO;
+ 			dev_err(&hdev->device,
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 12bcfbac2cc9..d3c90ce5d4c4 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -101,7 +101,9 @@ struct vhost_net_virtqueue {
+ 	/* vhost zerocopy support fields below: */
+ 	/* last used idx for outstanding DMA zerocopy buffers */
+ 	int upend_idx;
+-	/* first used idx for DMA done zerocopy buffers */
++	/* For TX, first used idx for DMA done zerocopy buffers
++	 * For RX, number of batched heads
++	 */
+ 	int done_idx;
+ 	/* an array of userspace buffers info */
+ 	struct ubuf_info *ubuf_info;
+@@ -620,6 +622,18 @@ static int sk_has_rx_data(struct sock *sk)
+ 	return skb_queue_empty(&sk->sk_receive_queue);
+ }
+ 
++static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
++{
++	struct vhost_virtqueue *vq = &nvq->vq;
++	struct vhost_dev *dev = vq->dev;
++
++	if (!nvq->done_idx)
++		return;
++
++	vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
++	nvq->done_idx = 0;
++}
++
+ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
+ {
+ 	struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX];
+@@ -629,6 +643,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
+ 	int len = peek_head_len(rvq, sk);
+ 
+ 	if (!len && vq->busyloop_timeout) {
++		/* Flush batched heads first */
++		vhost_rx_signal_used(rvq);
+ 		/* Both tx vq and rx socket were polled here */
+ 		mutex_lock_nested(&vq->mutex, 1);
+ 		vhost_disable_notify(&net->dev, vq);
+@@ -756,7 +772,7 @@ static void handle_rx(struct vhost_net *net)
+ 	};
+ 	size_t total_len = 0;
+ 	int err, mergeable;
+-	s16 headcount, nheads = 0;
++	s16 headcount;
+ 	size_t vhost_hlen, sock_hlen;
+ 	size_t vhost_len, sock_len;
+ 	struct socket *sock;
+@@ -784,8 +800,8 @@ static void handle_rx(struct vhost_net *net)
+ 	while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
+ 		sock_len += sock_hlen;
+ 		vhost_len = sock_len + vhost_hlen;
+-		headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len,
+-					&in, vq_log, &log,
++		headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
++					vhost_len, &in, vq_log, &log,
+ 					likely(mergeable) ? UIO_MAXIOV : 1);
+ 		/* On error, stop handling until the next kick. */
+ 		if (unlikely(headcount < 0))
+@@ -856,12 +872,9 @@ static void handle_rx(struct vhost_net *net)
+ 			vhost_discard_vq_desc(vq, headcount);
+ 			goto out;
+ 		}
+-		nheads += headcount;
+-		if (nheads > VHOST_RX_BATCH) {
+-			vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+-						    nheads);
+-			nheads = 0;
+-		}
++		nvq->done_idx += headcount;
++		if (nvq->done_idx > VHOST_RX_BATCH)
++			vhost_rx_signal_used(nvq);
+ 		if (unlikely(vq_log))
+ 			vhost_log_write(vq, vq_log, log, vhost_len);
+ 		total_len += vhost_len;
+@@ -872,9 +885,7 @@ static void handle_rx(struct vhost_net *net)
+ 	}
+ 	vhost_net_enable_vq(net, vq);
+ out:
+-	if (nheads)
+-		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+-					    nheads);
++	vhost_rx_signal_used(nvq);
+ 	mutex_unlock(&vq->mutex);
+ }
+ 
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index be6a4b6a76c6..68242f50c303 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -981,6 +981,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
+ {
+ 	int ret = 0;
+ 
++	mutex_lock(&dev->mutex);
+ 	vhost_dev_lock_vqs(dev);
+ 	switch (msg->type) {
+ 	case VHOST_IOTLB_UPDATE:
+@@ -1016,6 +1017,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
+ 	}
+ 
+ 	vhost_dev_unlock_vqs(dev);
++	mutex_unlock(&dev->mutex);
++
+ 	return ret;
+ }
+ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index 8606c9113d3f..a3339ff732a0 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -918,6 +918,11 @@ static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
+ 	return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel;
+ }
+ 
++static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6)
++{
++	return fl6->flowlabel & IPV6_FLOWLABEL_MASK;
++}
++
+ /*
+  *	Prototypes exported by ipv6
+  */
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 03ca089cce0f..799217d6eea2 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1315,6 +1315,35 @@ static inline int mlock_future_check(struct mm_struct *mm,
+ 	return 0;
+ }
+ 
++static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
++{
++	if (S_ISREG(inode->i_mode))
++		return MAX_LFS_FILESIZE;
++
++	if (S_ISBLK(inode->i_mode))
++		return MAX_LFS_FILESIZE;
++
++	/* Special "we do even unsigned file positions" case */
++	if (file->f_mode & FMODE_UNSIGNED_OFFSET)
++		return 0;
++
++	/* Yes, random drivers might want more. But I'm tired of buggy drivers */
++	return ULONG_MAX;
++}
++
++static inline bool file_mmap_ok(struct file *file, struct inode *inode,
++				unsigned long pgoff, unsigned long len)
++{
++	u64 maxsize = file_mmap_size_max(file, inode);
++
++	if (maxsize && len > maxsize)
++		return false;
++	maxsize -= len;
++	if (pgoff > maxsize >> PAGE_SHIFT)
++		return false;
++	return true;
++}
++
+ /*
+  * The caller must hold down_write(&current->mm->mmap_sem).
+  */
+@@ -1389,6 +1418,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
+ 		struct inode *inode = file_inode(file);
+ 		unsigned long flags_mask;
+ 
++		if (!file_mmap_ok(file, inode, pgoff, len))
++			return -EOVERFLOW;
++
+ 		flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
+ 
+ 		switch (flags & MAP_TYPE) {
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 559db9ea8d86..ec3d47ebd919 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -1334,7 +1334,7 @@ __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
+ 	keys->ports.src = fl6->fl6_sport;
+ 	keys->ports.dst = fl6->fl6_dport;
+ 	keys->keyid.keyid = fl6->fl6_gre_key;
+-	keys->tags.flow_label = (__force u32)fl6->flowlabel;
++	keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
+ 	keys->basic.ip_proto = fl6->flowi6_proto;
+ 
+ 	return flow_hash_from_keys(keys);
+diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
+index 60a5ad2c33ee..82690745f94a 100644
+--- a/net/core/net-sysfs.c
++++ b/net/core/net-sysfs.c
+@@ -1214,9 +1214,6 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
+ 	cpumask_var_t mask;
+ 	unsigned long index;
+ 
+-	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+-		return -ENOMEM;
+-
+ 	index = get_netdev_queue_index(queue);
+ 
+ 	if (dev->num_tc) {
+@@ -1226,6 +1223,9 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
+ 			return -EINVAL;
+ 	}
+ 
++	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
++		return -ENOMEM;
++
+ 	rcu_read_lock();
+ 	dev_maps = rcu_dereference(dev->xps_maps);
+ 	if (dev_maps) {
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index bc290413a49d..824b32936e75 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2245,6 +2245,10 @@ static int do_setlink(const struct sk_buff *skb,
+ 	const struct net_device_ops *ops = dev->netdev_ops;
+ 	int err;
+ 
++	err = validate_linkmsg(dev, tb);
++	if (err < 0)
++		return err;
++
+ 	if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_IF_NETNSID]) {
+ 		struct net *net = rtnl_link_get_net_capable(skb, dev_net(dev),
+ 							    tb, CAP_NET_ADMIN);
+@@ -2608,10 +2612,6 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ 		goto errout;
+ 	}
+ 
+-	err = validate_linkmsg(dev, tb);
+-	if (err < 0)
+-		goto errout;
+-
+ 	err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
+ errout:
+ 	return err;
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index 84cd4e3fd01b..0d56e36a6db7 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -283,9 +283,7 @@ int dccp_disconnect(struct sock *sk, int flags)
+ 
+ 	dccp_clear_xmit_timers(sk);
+ 	ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
+-	ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
+ 	dp->dccps_hc_rx_ccid = NULL;
+-	dp->dccps_hc_tx_ccid = NULL;
+ 
+ 	__skb_queue_purge(&sk->sk_receive_queue);
+ 	__skb_queue_purge(&sk->sk_write_queue);
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index f05afaf3235c..aa597b2c1429 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -643,6 +643,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
+ 	[RTA_ENCAP]		= { .type = NLA_NESTED },
+ 	[RTA_UID]		= { .type = NLA_U32 },
+ 	[RTA_MARK]		= { .type = NLA_U32 },
++	[RTA_TABLE]		= { .type = NLA_U32 },
+ };
+ 
+ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 7d36a950d961..19f7d8cd4875 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -717,6 +717,8 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
+ 			nla_strlcpy(tmp, nla, sizeof(tmp));
+ 			val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca);
+ 		} else {
++			if (nla_len(nla) != sizeof(u32))
++				return false;
+ 			val = nla_get_u32(nla);
+ 		}
+ 
+@@ -1043,6 +1045,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
+ 			if (val == TCP_CA_UNSPEC)
+ 				return -EINVAL;
+ 		} else {
++			if (nla_len(nla) != sizeof(u32))
++				return -EINVAL;
+ 			val = nla_get_u32(nla);
+ 		}
+ 		if (type == RTAX_ADVMSS && val > 65535 - 40)
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 74c962b9b09c..d89d8c59b5ed 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -511,8 +511,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ 	int err;
+ 	int copied;
+ 
+-	WARN_ON_ONCE(sk->sk_family == AF_INET6);
+-
+ 	err = -EAGAIN;
+ 	skb = sock_dequeue_err_skb(sk);
+ 	if (!skb)
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 57478d68248d..c4e33f4141d8 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -344,7 +344,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
+ 
+ 	if (tdev) {
+ 		hlen = tdev->hard_header_len + tdev->needed_headroom;
+-		mtu = tdev->mtu;
++		mtu = min(tdev->mtu, IP_MAX_MTU);
+ 	}
+ 
+ 	dev->needed_headroom = t_hlen + hlen;
+@@ -379,7 +379,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
+ 	nt = netdev_priv(dev);
+ 	t_hlen = nt->hlen + sizeof(struct iphdr);
+ 	dev->min_mtu = ETH_MIN_MTU;
+-	dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
++	dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
+ 	ip_tunnel_add(itn, nt);
+ 	return nt;
+ 
+@@ -948,7 +948,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
+ {
+ 	struct ip_tunnel *tunnel = netdev_priv(dev);
+ 	int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+-	int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
++	int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
+ 
+ 	if (new_mtu < ETH_MIN_MTU)
+ 		return -EINVAL;
+@@ -1119,7 +1119,7 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
+ 
+ 	mtu = ip_tunnel_bind_dev(dev);
+ 	if (tb[IFLA_MTU]) {
+-		unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen;
++		unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
+ 
+ 		mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
+ 			    (unsigned int)(max - sizeof(struct iphdr)));
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index b05689bbba31..9669722f6f57 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -356,6 +356,7 @@ static const struct rhashtable_params ipmr_rht_params = {
+ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
+ {
+ 	struct mr_table *mrt;
++	int err;
+ 
+ 	/* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
+ 	if (id != RT_TABLE_DEFAULT && id >= 1000000000)
+@@ -371,7 +372,11 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
+ 	write_pnet(&mrt->net, net);
+ 	mrt->id = id;
+ 
+-	rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
++	err = rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
++	if (err) {
++		kfree(mrt);
++		return ERR_PTR(err);
++	}
+ 	INIT_LIST_HEAD(&mrt->mfc_cache_list);
+ 	INIT_LIST_HEAD(&mrt->mfc_unres_queue);
+ 
+diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
+index 0cd46bffa469..fc3923932eda 100644
+--- a/net/ipv4/netfilter/nf_flow_table_ipv4.c
++++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c
+@@ -213,7 +213,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ 	enum flow_offload_tuple_dir dir;
+ 	struct flow_offload *flow;
+ 	struct net_device *outdev;
+-	const struct rtable *rt;
++	struct rtable *rt;
+ 	struct iphdr *iph;
+ 	__be32 nexthop;
+ 
+@@ -234,7 +234,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ 	dir = tuplehash->tuple.dir;
+ 	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
+ 
+-	rt = (const struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
++	rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
+ 	if (unlikely(nf_flow_exceeds_mtu(skb, rt)))
+ 		return NF_ACCEPT;
+ 
+@@ -251,6 +251,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ 
+ 	skb->dev = outdev;
+ 	nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
++	skb_dst_set_noref(skb, &rt->dst);
+ 	neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
+ 
+ 	return NF_STOLEN;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 072333760a52..f39ea066977d 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -507,7 +507,8 @@ int ip6_forward(struct sk_buff *skb)
+ 	   send redirects to source routed frames.
+ 	   We don't send redirects to frames decapsulated from IPsec.
+ 	 */
+-	if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
++	if (IP6CB(skb)->iif == dst->dev->ifindex &&
++	    opt->srcrt == 0 && !skb_sec_path(skb)) {
+ 		struct in6_addr *target = NULL;
+ 		struct inet_peer *peer;
+ 		struct rt6_info *rt;
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 179313b0926c..58b4ffd7168e 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1688,8 +1688,13 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
+ 		if (new_mtu < ETH_MIN_MTU)
+ 			return -EINVAL;
+ 	}
+-	if (new_mtu > 0xFFF8 - dev->hard_header_len)
+-		return -EINVAL;
++	if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
++		if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
++			return -EINVAL;
++	} else {
++		if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
++			return -EINVAL;
++	}
+ 	dev->mtu = new_mtu;
+ 	return 0;
+ }
+@@ -1837,7 +1842,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
+ 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+ 		dev->mtu -= 8;
+ 	dev->min_mtu = ETH_MIN_MTU;
+-	dev->max_mtu = 0xFFF8 - dev->hard_header_len;
++	dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
+ 
+ 	return 0;
+ 
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 9f6cace9c817..bab166a6fbb3 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -1800,7 +1800,8 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
+ 		ret = 0;
+ 		if (!ip6mr_new_table(net, v))
+ 			ret = -ENOMEM;
+-		raw6_sk(sk)->ip6mr_table = v;
++		else
++			raw6_sk(sk)->ip6mr_table = v;
+ 		rtnl_unlock();
+ 		return ret;
+ 	}
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index ba5e04c6ae17..65956d0f8a1f 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -1576,6 +1576,12 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
+ 	   ops_data_buf[NDISC_OPS_REDIRECT_DATA_SPACE], *ops_data = NULL;
+ 	bool ret;
+ 
++	if (netif_is_l3_master(skb->dev)) {
++		dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
++		if (!dev)
++			return;
++	}
++
+ 	if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
+ 		ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n",
+ 			  dev->name);
+diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c
+index 207cb35569b1..2d6652146bba 100644
+--- a/net/ipv6/netfilter/nf_flow_table_ipv6.c
++++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
+@@ -243,6 +243,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+ 
+ 	skb->dev = outdev;
+ 	nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
++	skb_dst_set_noref(skb, &rt->dst);
+ 	neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
+ 
+ 	return NF_STOLEN;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 1aee1a537cb1..8f749742f11f 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1850,7 +1850,7 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb,
+ 	keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ 	keys->addrs.v6addrs.src = key_iph->saddr;
+ 	keys->addrs.v6addrs.dst = key_iph->daddr;
+-	keys->tags.flow_label = ip6_flowinfo(key_iph);
++	keys->tags.flow_label = ip6_flowlabel(key_iph);
+ 	keys->basic.ip_proto = key_iph->nexthdr;
+ }
+ 
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index 5fe139484919..bf4763fd68c2 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -103,7 +103,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
+ 	hdrlen = (osrh->hdrlen + 1) << 3;
+ 	tot_len = hdrlen + sizeof(*hdr);
+ 
+-	err = skb_cow_head(skb, tot_len);
++	err = skb_cow_head(skb, tot_len + skb->mac_len);
+ 	if (unlikely(err))
+ 		return err;
+ 
+@@ -161,7 +161,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
+ 
+ 	hdrlen = (osrh->hdrlen + 1) << 3;
+ 
+-	err = skb_cow_head(skb, hdrlen);
++	err = skb_cow_head(skb, hdrlen + skb->mac_len);
+ 	if (unlikely(err))
+ 		return err;
+ 
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index e85791854c87..5d176c532f0c 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1371,7 +1371,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
+ 	dev->hard_header_len	= LL_MAX_HEADER + t_hlen;
+ 	dev->mtu		= ETH_DATA_LEN - t_hlen;
+ 	dev->min_mtu		= IPV6_MIN_MTU;
+-	dev->max_mtu		= 0xFFF8 - t_hlen;
++	dev->max_mtu		= IP6_MAX_MTU - t_hlen;
+ 	dev->flags		= IFF_NOARP;
+ 	netif_keep_dst(dev);
+ 	dev->addr_len		= 4;
+@@ -1583,7 +1583,8 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
+ 	if (tb[IFLA_MTU]) {
+ 		u32 mtu = nla_get_u32(tb[IFLA_MTU]);
+ 
+-		if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
++		if (mtu >= IPV6_MIN_MTU &&
++		    mtu <= IP6_MAX_MTU - dev->hard_header_len)
+ 			dev->mtu = mtu;
+ 	}
+ 
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index dc76bc346829..d3601d421571 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -1671,7 +1671,7 @@ static struct file *kcm_clone(struct socket *osock)
+ 	__module_get(newsock->ops->owner);
+ 
+ 	newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
+-			 &kcm_proto, true);
++			 &kcm_proto, false);
+ 	if (!newsk) {
+ 		sock_release(newsock);
+ 		return ERR_PTR(-ENOMEM);
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 0c4530ad74be..b7185d600844 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -428,16 +428,6 @@ static void pppol2tp_put_sk(struct rcu_head *head)
+  */
+ static void pppol2tp_session_close(struct l2tp_session *session)
+ {
+-	struct pppol2tp_session *ps;
+-
+-	ps = l2tp_session_priv(session);
+-	mutex_lock(&ps->sk_lock);
+-	ps->__sk = rcu_dereference_protected(ps->sk,
+-					     lockdep_is_held(&ps->sk_lock));
+-	RCU_INIT_POINTER(ps->sk, NULL);
+-	if (ps->__sk)
+-		call_rcu(&ps->rcu, pppol2tp_put_sk);
+-	mutex_unlock(&ps->sk_lock);
+ }
+ 
+ /* Really kill the session socket. (Called from sock_put() if
+@@ -480,15 +470,24 @@ static int pppol2tp_release(struct socket *sock)
+ 	sock_orphan(sk);
+ 	sock->sk = NULL;
+ 
+-	/* If the socket is associated with a session,
+-	 * l2tp_session_delete will call pppol2tp_session_close which
+-	 * will drop the session's ref on the socket.
+-	 */
+ 	session = pppol2tp_sock_to_session(sk);
+ 	if (session) {
++		struct pppol2tp_session *ps;
++
+ 		l2tp_session_delete(session);
+-		/* drop the ref obtained by pppol2tp_sock_to_session */
+-		sock_put(sk);
++
++		ps = l2tp_session_priv(session);
++		mutex_lock(&ps->sk_lock);
++		ps->__sk = rcu_dereference_protected(ps->sk,
++						     lockdep_is_held(&ps->sk_lock));
++		RCU_INIT_POINTER(ps->sk, NULL);
++		mutex_unlock(&ps->sk_lock);
++		call_rcu(&ps->rcu, pppol2tp_put_sk);
++
++		/* Rely on the sock_put() call at the end of the function for
++		 * dropping the reference held by pppol2tp_sock_to_session().
++		 * The last reference will be dropped by pppol2tp_put_sk().
++		 */
+ 	}
+ 
+ 	release_sock(sk);
+@@ -742,7 +741,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 		 */
+ 		mutex_lock(&ps->sk_lock);
+ 		if (rcu_dereference_protected(ps->sk,
+-					      lockdep_is_held(&ps->sk_lock))) {
++					      lockdep_is_held(&ps->sk_lock)) ||
++		    ps->__sk) {
+ 			mutex_unlock(&ps->sk_lock);
+ 			error = -EEXIST;
+ 			goto end;
+@@ -803,7 +803,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 
+ out_no_ppp:
+ 	/* This is how we get the session context from the socket. */
+-	sock_hold(sk);
+ 	sk->sk_user_data = session;
+ 	rcu_assign_pointer(ps->sk, sk);
+ 	mutex_unlock(&ps->sk_lock);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index c6a2dd890de3..c9432a0ccd56 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2911,7 +2911,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 		if (unlikely(offset < 0))
+ 			goto out_free;
+ 	} else if (reserve) {
+-		skb_push(skb, reserve);
++		skb_reserve(skb, -reserve);
+ 	}
+ 
+ 	/* Returns -EFAULT on error */
+@@ -4284,7 +4284,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 			goto out;
+ 		if (po->tp_version >= TPACKET_V3 &&
+ 		    req->tp_block_size <=
+-			  BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
++		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr))
+ 			goto out;
+ 		if (unlikely(req->tp_frame_size < po->tp_hdrlen +
+ 					po->tp_reserve))
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index c2c732aad87c..86d2d5977f56 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -1587,7 +1587,7 @@ int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
+ 		return ret;
+ 	ok_count = ret;
+ 
+-	if (!exts)
++	if (!exts || ok_count)
+ 		return ok_count;
+ 	ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
+ 	if (ret < 0)
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index 7d0ce2c40f93..2c0c557c0007 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -974,7 +974,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
+ 	return 0;
+ 
+ errout_idr:
+-	if (fnew->handle)
++	if (!fold)
+ 		idr_remove(&head->handle_idr, fnew->handle);
+ errout:
+ 	tcf_exts_destroy(&fnew->exts);
+diff --git a/net/sctp/transport.c b/net/sctp/transport.c
+index 47f82bd794d9..03fc2c427aca 100644
+--- a/net/sctp/transport.c
++++ b/net/sctp/transport.c
+@@ -634,7 +634,7 @@ unsigned long sctp_transport_timeout(struct sctp_transport *trans)
+ 	    trans->state != SCTP_PF)
+ 		timeout += trans->hbinterval;
+ 
+-	return timeout;
++	return max_t(unsigned long, timeout, HZ / 5);
+ }
+ 
+ /* Reset transport variables to their initial values */
+diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
+index df26c7b0fe13..1a24660bd2ec 100644
+--- a/scripts/kconfig/confdata.c
++++ b/scripts/kconfig/confdata.c
+@@ -745,7 +745,7 @@ int conf_write(const char *name)
+ 	struct menu *menu;
+ 	const char *basename;
+ 	const char *str;
+-	char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1];
++	char dirname[PATH_MAX+1], tmpname[PATH_MAX+22], newname[PATH_MAX+8];
+ 	char *env;
+ 
+ 	dirname[0] = 0;


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-06-05 11:23 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-06-05 11:23 UTC (permalink / raw
  To: gentoo-commits

commit:     3e7962438b88fe111422c8cfb753db35134db8ce
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Jun  5 11:23:14 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Jun  5 11:23:14 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3e796243

Linux patch 4.16.14

 0000_README              |    4 +
 1013_linux-4.16.14.patch | 2085 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2089 insertions(+)

diff --git a/0000_README b/0000_README
index f199583..5691b91 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,10 @@ Patch:  1012_linux-4.16.13.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.13
 
+Patch:  1013_linux-4.16.14.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.14
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1013_linux-4.16.14.patch b/1013_linux-4.16.14.patch
new file mode 100644
index 0000000..91e0c4e
--- /dev/null
+++ b/1013_linux-4.16.14.patch
@@ -0,0 +1,2085 @@
+diff --git a/Makefile b/Makefile
+index 146e527a5e06..a043442e442f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index 57028d49c202..cdcfe4639a83 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -721,6 +721,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+ 	if (value & ~known_bits)
+ 		return -EOPNOTSUPP;
+ 
++	/* Setting FRE without FR is not supported.  */
++	if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
++		return -EOPNOTSUPP;
++
+ 	/* Avoid inadvertently triggering emulation */
+ 	if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
+ 	    !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index 8d098b9f395c..0c0c23c9c9f5 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -818,7 +818,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ 				break;
+ 			}
+ #endif
+-			tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
++			tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
+ 			break;
+ 		case PC:
+ 			tmp = regs->cp0_epc;
+diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
+index 656a137c1fe2..f30c381d3e1c 100644
+--- a/arch/mips/kernel/ptrace32.c
++++ b/arch/mips/kernel/ptrace32.c
+@@ -109,7 +109,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ 						addr & 1);
+ 				break;
+ 			}
+-			tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
++			tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
+ 			break;
+ 		case PC:
+ 			tmp = regs->cp0_epc;
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+index 12bc2863a4d6..c8e038800591 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -94,6 +94,11 @@ static struct smca_bank_name smca_names[] = {
+ 	[SMCA_SMU]	= { "smu",		"System Management Unit" },
+ };
+ 
++static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
++{
++	[0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
++};
++
+ const char *smca_get_name(enum smca_bank_types t)
+ {
+ 	if (t >= N_SMCA_BANK_TYPES)
+@@ -431,52 +436,51 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
+ 	wrmsr(MSR_CU_DEF_ERR, low, high);
+ }
+ 
+-static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
+-			     unsigned int bank, unsigned int block)
++static u32 smca_get_block_address(unsigned int cpu, unsigned int bank,
++				  unsigned int block)
+ {
+-	u32 addr = 0, offset = 0;
++	u32 low, high;
++	u32 addr = 0;
+ 
+-	if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
++	if (smca_get_bank_type(bank) == SMCA_RESERVED)
+ 		return addr;
+ 
+-	/* Get address from already initialized block. */
+-	if (per_cpu(threshold_banks, cpu)) {
+-		struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank];
++	if (!block)
++		return MSR_AMD64_SMCA_MCx_MISC(bank);
+ 
+-		if (bankp && bankp->blocks) {
+-			struct threshold_block *blockp = &bankp->blocks[block];
++	/* Check our cache first: */
++	if (smca_bank_addrs[bank][block] != -1)
++		return smca_bank_addrs[bank][block];
+ 
+-			if (blockp)
+-				return blockp->address;
+-		}
+-	}
++	/*
++	 * For SMCA enabled processors, BLKPTR field of the first MISC register
++	 * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
++	 */
++	if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
++		goto out;
+ 
+-	if (mce_flags.smca) {
+-		if (smca_get_bank_type(bank) == SMCA_RESERVED)
+-			return addr;
++	if (!(low & MCI_CONFIG_MCAX))
++		goto out;
+ 
+-		if (!block) {
+-			addr = MSR_AMD64_SMCA_MCx_MISC(bank);
+-		} else {
+-			/*
+-			 * For SMCA enabled processors, BLKPTR field of the
+-			 * first MISC register (MCx_MISC0) indicates presence of
+-			 * additional MISC register set (MISC1-4).
+-			 */
+-			u32 low, high;
++	if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
++	    (low & MASK_BLKPTR_LO))
++		addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
+ 
+-			if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
+-				return addr;
++out:
++	smca_bank_addrs[bank][block] = addr;
++	return addr;
++}
+ 
+-			if (!(low & MCI_CONFIG_MCAX))
+-				return addr;
++static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high,
++			     unsigned int bank, unsigned int block)
++{
++	u32 addr = 0, offset = 0;
+ 
+-			if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
+-			    (low & MASK_BLKPTR_LO))
+-				addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
+-		}
++	if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
+ 		return addr;
+-	}
++
++	if (mce_flags.smca)
++		return smca_get_block_address(cpu, bank, block);
+ 
+ 	/* Fall back to method we used for older processors: */
+ 	switch (block) {
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+index 44de0874629f..416abebb8b86 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+@@ -166,10 +166,10 @@ void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+ 		cz_dpm_powerup_uvd(hwmgr);
+ 		cgs_set_clockgating_state(hwmgr->device,
+ 						AMD_IP_BLOCK_TYPE_UVD,
+-						AMD_PG_STATE_UNGATE);
++						AMD_CG_STATE_UNGATE);
+ 		cgs_set_powergating_state(hwmgr->device,
+ 						AMD_IP_BLOCK_TYPE_UVD,
+-						AMD_CG_STATE_UNGATE);
++						AMD_PG_STATE_UNGATE);
+ 		cz_dpm_update_uvd_dpm(hwmgr, false);
+ 	}
+ 
+@@ -197,11 +197,11 @@ void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+ 		cgs_set_clockgating_state(
+ 					hwmgr->device,
+ 					AMD_IP_BLOCK_TYPE_VCE,
+-					AMD_PG_STATE_UNGATE);
++					AMD_CG_STATE_UNGATE);
+ 		cgs_set_powergating_state(
+ 					hwmgr->device,
+ 					AMD_IP_BLOCK_TYPE_VCE,
+-					AMD_CG_STATE_UNGATE);
++					AMD_PG_STATE_UNGATE);
+ 		cz_dpm_update_vce_dpm(hwmgr);
+ 		cz_enable_disable_vce_dpm(hwmgr, true);
+ 	}
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+index 69a0678ace98..402aa9cb1f78 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+@@ -162,7 +162,7 @@ void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+ 				AMD_CG_STATE_UNGATE);
+ 		cgs_set_powergating_state(hwmgr->device,
+ 						AMD_IP_BLOCK_TYPE_UVD,
+-						AMD_CG_STATE_UNGATE);
++						AMD_PG_STATE_UNGATE);
+ 		smu7_update_uvd_dpm(hwmgr, false);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
+index adf79be42c1e..9ffa66713104 100644
+--- a/drivers/gpu/drm/drm_dp_helper.c
++++ b/drivers/gpu/drm/drm_dp_helper.c
+@@ -1141,6 +1141,7 @@ int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE])
+ 	static const u16 psr_setup_time_us[] = {
+ 		PSR_SETUP_TIME(330),
+ 		PSR_SETUP_TIME(275),
++		PSR_SETUP_TIME(220),
+ 		PSR_SETUP_TIME(165),
+ 		PSR_SETUP_TIME(110),
+ 		PSR_SETUP_TIME(55),
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 7ed6f7b69556..3ba99c551f61 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -567,6 +567,36 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
+ 	return NOTIFY_OK;
+ }
+ 
++static int
++intel_lvds_connector_register(struct drm_connector *connector)
++{
++	struct intel_lvds_connector *lvds = to_lvds_connector(connector);
++	int ret;
++
++	ret = intel_connector_register(connector);
++	if (ret)
++		return ret;
++
++	lvds->lid_notifier.notifier_call = intel_lid_notify;
++	if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
++		DRM_DEBUG_KMS("lid notifier registration failed\n");
++		lvds->lid_notifier.notifier_call = NULL;
++	}
++
++	return 0;
++}
++
++static void
++intel_lvds_connector_unregister(struct drm_connector *connector)
++{
++	struct intel_lvds_connector *lvds = to_lvds_connector(connector);
++
++	if (lvds->lid_notifier.notifier_call)
++		acpi_lid_notifier_unregister(&lvds->lid_notifier);
++
++	intel_connector_unregister(connector);
++}
++
+ /**
+  * intel_lvds_destroy - unregister and free LVDS structures
+  * @connector: connector to free
+@@ -579,9 +609,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
+ 	struct intel_lvds_connector *lvds_connector =
+ 		to_lvds_connector(connector);
+ 
+-	if (lvds_connector->lid_notifier.notifier_call)
+-		acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
+-
+ 	if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
+ 		kfree(lvds_connector->base.edid);
+ 
+@@ -602,8 +629,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
+ 	.fill_modes = drm_helper_probe_single_connector_modes,
+ 	.atomic_get_property = intel_digital_connector_atomic_get_property,
+ 	.atomic_set_property = intel_digital_connector_atomic_set_property,
+-	.late_register = intel_connector_register,
+-	.early_unregister = intel_connector_unregister,
++	.late_register = intel_lvds_connector_register,
++	.early_unregister = intel_lvds_connector_unregister,
+ 	.destroy = intel_lvds_destroy,
+ 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ 	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
+@@ -820,6 +847,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ 			DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
+ 		},
+ 	},
++	{
++		.callback = intel_no_lvds_dmi_callback,
++		.ident = "Radiant P845",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "P845"),
++		},
++	},
+ 
+ 	{ }	/* terminating entry */
+ };
+@@ -1138,12 +1173,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
+ 
+ 	lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
+ 
+-	lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
+-	if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
+-		DRM_DEBUG_KMS("lid notifier registration failed\n");
+-		lvds_connector->lid_notifier.notifier_call = NULL;
+-	}
+-
+ 	return;
+ 
+ failed:
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+index 97000996b8dc..21d746bdc922 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+@@ -328,9 +328,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
+ {
+ 	struct rpc_channel channel;
+ 	char *msg, *reply = NULL;
+-	size_t msg_len, reply_len = 0;
+-	int ret = 0;
+-
++	size_t reply_len = 0;
+ 
+ 	if (!vmw_msg_enabled)
+ 		return -ENODEV;
+@@ -338,24 +336,20 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
+ 	if (!guest_info_param || !length)
+ 		return -EINVAL;
+ 
+-	msg_len = strlen(guest_info_param) + strlen("info-get ") + 1;
+-	msg = kzalloc(msg_len, GFP_KERNEL);
++	msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
+ 	if (!msg) {
+ 		DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
+ 		return -ENOMEM;
+ 	}
+ 
+-	sprintf(msg, "info-get %s", guest_info_param);
++	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
++		goto out_open;
+ 
+-	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
+-	    vmw_send_msg(&channel, msg) ||
+-	    vmw_recv_msg(&channel, (void *) &reply, &reply_len) ||
+-	    vmw_close_channel(&channel)) {
+-		DRM_ERROR("Failed to get %s", guest_info_param);
+-
+-		ret = -EINVAL;
+-	}
++	if (vmw_send_msg(&channel, msg) ||
++	    vmw_recv_msg(&channel, (void *) &reply, &reply_len))
++		goto out_msg;
+ 
++	vmw_close_channel(&channel);
+ 	if (buffer && reply && reply_len > 0) {
+ 		/* Remove reply code, which are the first 2 characters of
+ 		 * the reply
+@@ -372,7 +366,17 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
+ 	kfree(reply);
+ 	kfree(msg);
+ 
+-	return ret;
++	return 0;
++
++out_msg:
++	vmw_close_channel(&channel);
++	kfree(reply);
++out_open:
++	*length = 0;
++	kfree(msg);
++	DRM_ERROR("Failed to get %s", guest_info_param);
++
++	return -EINVAL;
+ }
+ 
+ 
+@@ -388,7 +392,6 @@ int vmw_host_log(const char *log)
+ {
+ 	struct rpc_channel channel;
+ 	char *msg;
+-	int msg_len;
+ 	int ret = 0;
+ 
+ 
+@@ -398,24 +401,28 @@ int vmw_host_log(const char *log)
+ 	if (!log)
+ 		return ret;
+ 
+-	msg_len = strlen(log) + strlen("log ") + 1;
+-	msg = kzalloc(msg_len, GFP_KERNEL);
++	msg = kasprintf(GFP_KERNEL, "log %s", log);
+ 	if (!msg) {
+ 		DRM_ERROR("Cannot allocate memory for log message\n");
+ 		return -ENOMEM;
+ 	}
+ 
+-	sprintf(msg, "log %s", log);
++	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM))
++		goto out_open;
+ 
+-	if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
+-	    vmw_send_msg(&channel, msg) ||
+-	    vmw_close_channel(&channel)) {
+-		DRM_ERROR("Failed to send log\n");
++	if (vmw_send_msg(&channel, msg))
++		goto out_msg;
+ 
+-		ret = -EINVAL;
+-	}
++	vmw_close_channel(&channel);
++	kfree(msg);
+ 
++	return 0;
++
++out_msg:
++	vmw_close_channel(&channel);
++out_open:
+ 	kfree(msg);
++	DRM_ERROR("Failed to send log\n");
+ 
+-	return ret;
++	return -EINVAL;
+ }
+diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
+index dfb57eaa9f22..58ac786634dc 100644
+--- a/drivers/hwtracing/intel_th/msu.c
++++ b/drivers/hwtracing/intel_th/msu.c
+@@ -741,8 +741,8 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
+ 		/* Reset the page to write-back before releasing */
+ 		set_memory_wb((unsigned long)win->block[i].bdesc, 1);
+ #endif
+-		dma_free_coherent(msc_dev(msc), size, win->block[i].bdesc,
+-				  win->block[i].addr);
++		dma_free_coherent(msc_dev(msc)->parent->parent, size,
++				  win->block[i].bdesc, win->block[i].addr);
+ 	}
+ 	kfree(win);
+ 
+@@ -777,7 +777,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
+ 		/* Reset the page to write-back before releasing */
+ 		set_memory_wb((unsigned long)win->block[i].bdesc, 1);
+ #endif
+-		dma_free_coherent(msc_dev(win->msc), PAGE_SIZE,
++		dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
+ 				  win->block[i].bdesc, win->block[i].addr);
+ 	}
+ 
+diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
+index f129869e05a9..736862967e32 100644
+--- a/drivers/hwtracing/stm/core.c
++++ b/drivers/hwtracing/stm/core.c
+@@ -27,6 +27,7 @@
+ #include <linux/stm.h>
+ #include <linux/fs.h>
+ #include <linux/mm.h>
++#include <linux/vmalloc.h>
+ #include "stm.h"
+ 
+ #include <uapi/linux/stm.h>
+@@ -682,7 +683,7 @@ static void stm_device_release(struct device *dev)
+ {
+ 	struct stm_device *stm = to_stm_device(dev);
+ 
+-	kfree(stm);
++	vfree(stm);
+ }
+ 
+ int stm_register_device(struct device *parent, struct stm_data *stm_data,
+@@ -699,7 +700,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
+ 		return -EINVAL;
+ 
+ 	nmasters = stm_data->sw_end - stm_data->sw_start + 1;
+-	stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
++	stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
+ 	if (!stm)
+ 		return -ENOMEM;
+ 
+@@ -752,7 +753,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
+ 	/* matches device_initialize() above */
+ 	put_device(&stm->dev);
+ err_free:
+-	kfree(stm);
++	vfree(stm);
+ 
+ 	return err;
+ }
+diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
+index 72bc2b71765a..47bbed3afc8f 100644
+--- a/drivers/iio/adc/Kconfig
++++ b/drivers/iio/adc/Kconfig
+@@ -159,6 +159,7 @@ config AT91_SAMA5D2_ADC
+ 	depends on ARCH_AT91 || COMPILE_TEST
+ 	depends on HAS_IOMEM
+ 	depends on HAS_DMA
++	select IIO_BUFFER
+ 	select IIO_TRIGGERED_BUFFER
+ 	help
+ 	  Say yes here to build support for Atmel SAMA5D2 ADC which is
+diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
+index 801afb61310b..d4bbe5b53318 100644
+--- a/drivers/iio/adc/ad7793.c
++++ b/drivers/iio/adc/ad7793.c
+@@ -348,55 +348,6 @@ static const u16 ad7793_sample_freq_avail[16] = {0, 470, 242, 123, 62, 50, 39,
+ static const u16 ad7797_sample_freq_avail[16] = {0, 0, 0, 123, 62, 50, 0,
+ 					33, 0, 17, 16, 12, 10, 8, 6, 4};
+ 
+-static ssize_t ad7793_read_frequency(struct device *dev,
+-		struct device_attribute *attr,
+-		char *buf)
+-{
+-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+-	struct ad7793_state *st = iio_priv(indio_dev);
+-
+-	return sprintf(buf, "%d\n",
+-	       st->chip_info->sample_freq_avail[AD7793_MODE_RATE(st->mode)]);
+-}
+-
+-static ssize_t ad7793_write_frequency(struct device *dev,
+-		struct device_attribute *attr,
+-		const char *buf,
+-		size_t len)
+-{
+-	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+-	struct ad7793_state *st = iio_priv(indio_dev);
+-	long lval;
+-	int i, ret;
+-
+-	ret = kstrtol(buf, 10, &lval);
+-	if (ret)
+-		return ret;
+-
+-	if (lval == 0)
+-		return -EINVAL;
+-
+-	for (i = 0; i < 16; i++)
+-		if (lval == st->chip_info->sample_freq_avail[i])
+-			break;
+-	if (i == 16)
+-		return -EINVAL;
+-
+-	ret = iio_device_claim_direct_mode(indio_dev);
+-	if (ret)
+-		return ret;
+-	st->mode &= ~AD7793_MODE_RATE(-1);
+-	st->mode |= AD7793_MODE_RATE(i);
+-	ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode);
+-	iio_device_release_direct_mode(indio_dev);
+-
+-	return len;
+-}
+-
+-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+-		ad7793_read_frequency,
+-		ad7793_write_frequency);
+-
+ static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+ 	"470 242 123 62 50 39 33 19 17 16 12 10 8 6 4");
+ 
+@@ -424,7 +375,6 @@ static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
+ 		ad7793_show_scale_available, NULL, 0);
+ 
+ static struct attribute *ad7793_attributes[] = {
+-	&iio_dev_attr_sampling_frequency.dev_attr.attr,
+ 	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ 	&iio_dev_attr_in_m_in_scale_available.dev_attr.attr,
+ 	NULL
+@@ -435,7 +385,6 @@ static const struct attribute_group ad7793_attribute_group = {
+ };
+ 
+ static struct attribute *ad7797_attributes[] = {
+-	&iio_dev_attr_sampling_frequency.dev_attr.attr,
+ 	&iio_const_attr_sampling_frequency_available_ad7797.dev_attr.attr,
+ 	NULL
+ };
+@@ -505,6 +454,10 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
+ 			*val -= offset;
+ 		}
+ 		return IIO_VAL_INT;
++	case IIO_CHAN_INFO_SAMP_FREQ:
++		*val = st->chip_info
++			       ->sample_freq_avail[AD7793_MODE_RATE(st->mode)];
++		return IIO_VAL_INT;
+ 	}
+ 	return -EINVAL;
+ }
+@@ -542,6 +495,26 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
+ 				break;
+ 			}
+ 		break;
++	case IIO_CHAN_INFO_SAMP_FREQ:
++		if (!val) {
++			ret = -EINVAL;
++			break;
++		}
++
++		for (i = 0; i < 16; i++)
++			if (val == st->chip_info->sample_freq_avail[i])
++				break;
++
++		if (i == 16) {
++			ret = -EINVAL;
++			break;
++		}
++
++		st->mode &= ~AD7793_MODE_RATE(-1);
++		st->mode |= AD7793_MODE_RATE(i);
++		ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode),
++				st->mode);
++		break;
+ 	default:
+ 		ret = -EINVAL;
+ 	}
+diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
+index 4eff8351ce29..8729d6524b4d 100644
+--- a/drivers/iio/adc/at91-sama5d2_adc.c
++++ b/drivers/iio/adc/at91-sama5d2_adc.c
+@@ -333,6 +333,27 @@ static const struct iio_chan_spec at91_adc_channels[] = {
+ 				+ AT91_SAMA5D2_DIFF_CHAN_CNT + 1),
+ };
+ 
++static int at91_adc_chan_xlate(struct iio_dev *indio_dev, int chan)
++{
++	int i;
++
++	for (i = 0; i < indio_dev->num_channels; i++) {
++		if (indio_dev->channels[i].scan_index == chan)
++			return i;
++	}
++	return -EINVAL;
++}
++
++static inline struct iio_chan_spec const *
++at91_adc_chan_get(struct iio_dev *indio_dev, int chan)
++{
++	int index = at91_adc_chan_xlate(indio_dev, chan);
++
++	if (index < 0)
++		return NULL;
++	return indio_dev->channels + index;
++}
++
+ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
+ {
+ 	struct iio_dev *indio = iio_trigger_get_drvdata(trig);
+@@ -350,8 +371,10 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
+ 	at91_adc_writel(st, AT91_SAMA5D2_TRGR, status);
+ 
+ 	for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
+-		struct iio_chan_spec const *chan = indio->channels + bit;
++		struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
+ 
++		if (!chan)
++			continue;
+ 		if (state) {
+ 			at91_adc_writel(st, AT91_SAMA5D2_CHER,
+ 					BIT(chan->channel));
+@@ -448,7 +471,11 @@ static int at91_adc_dma_start(struct iio_dev *indio_dev)
+ 
+ 	for_each_set_bit(bit, indio_dev->active_scan_mask,
+ 			 indio_dev->num_channels) {
+-		struct iio_chan_spec const *chan = indio_dev->channels + bit;
++		struct iio_chan_spec const *chan =
++					 at91_adc_chan_get(indio_dev, bit);
++
++		if (!chan)
++			continue;
+ 
+ 		st->dma_st.rx_buf_sz += chan->scan_type.storagebits / 8;
+ 	}
+@@ -526,8 +553,11 @@ static int at91_adc_buffer_predisable(struct iio_dev *indio_dev)
+ 	 */
+ 	for_each_set_bit(bit, indio_dev->active_scan_mask,
+ 			 indio_dev->num_channels) {
+-		struct iio_chan_spec const *chan = indio_dev->channels + bit;
++		struct iio_chan_spec const *chan =
++					at91_adc_chan_get(indio_dev, bit);
+ 
++		if (!chan)
++			continue;
+ 		if (st->dma_st.dma_chan)
+ 			at91_adc_readl(st, chan->address);
+ 	}
+@@ -587,8 +617,11 @@ static void at91_adc_trigger_handler_nodma(struct iio_dev *indio_dev,
+ 
+ 	for_each_set_bit(bit, indio_dev->active_scan_mask,
+ 			 indio_dev->num_channels) {
+-		struct iio_chan_spec const *chan = indio_dev->channels + bit;
++		struct iio_chan_spec const *chan =
++					at91_adc_chan_get(indio_dev, bit);
+ 
++		if (!chan)
++			continue;
+ 		st->buffer[i] = at91_adc_readl(st, chan->address);
+ 		i++;
+ 	}
+diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
+index 01422d11753c..b28a716a23b2 100644
+--- a/drivers/iio/adc/stm32-dfsdm-adc.c
++++ b/drivers/iio/adc/stm32-dfsdm-adc.c
+@@ -144,6 +144,7 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
+ 	 * Leave as soon as if exact resolution if reached.
+ 	 * Otherwise the higher resolution below 32 bits is kept.
+ 	 */
++	fl->res = 0;
+ 	for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
+ 		for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
+ 			if (fast)
+@@ -193,7 +194,7 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
+ 		}
+ 	}
+ 
+-	if (!fl->fosr)
++	if (!fl->res)
+ 		return -EINVAL;
+ 
+ 	return 0;
+@@ -770,7 +771,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
+ 	struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ 	struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+ 	struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
+-	unsigned int spi_freq = adc->spi_freq;
++	unsigned int spi_freq;
+ 	int ret = -EINVAL;
+ 
+ 	switch (mask) {
+@@ -784,8 +785,18 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
+ 	case IIO_CHAN_INFO_SAMP_FREQ:
+ 		if (!val)
+ 			return -EINVAL;
+-		if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
++
++		switch (ch->src) {
++		case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL:
+ 			spi_freq = adc->dfsdm->spi_master_freq;
++			break;
++		case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING:
++		case DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING:
++			spi_freq = adc->dfsdm->spi_master_freq / 2;
++			break;
++		default:
++			spi_freq = adc->spi_freq;
++		}
+ 
+ 		if (spi_freq % val)
+ 			dev_warn(&indio_dev->dev,
+diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
+index 05e0c353e089..b32bf57910ca 100644
+--- a/drivers/iio/buffer/industrialio-buffer-dma.c
++++ b/drivers/iio/buffer/industrialio-buffer-dma.c
+@@ -587,7 +587,7 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_set_bytes_per_datum);
+  * Should be used as the set_length callback for iio_buffer_access_ops
+  * struct for DMA buffers.
+  */
+-int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length)
++int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
+ {
+ 	/* Avoid an invalid state */
+ 	if (length < 2)
+diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
+index 047fe757ab97..70c302a93d7f 100644
+--- a/drivers/iio/buffer/kfifo_buf.c
++++ b/drivers/iio/buffer/kfifo_buf.c
+@@ -22,11 +22,18 @@ struct iio_kfifo {
+ #define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
+ 
+ static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
+-				int bytes_per_datum, int length)
++			size_t bytes_per_datum, unsigned int length)
+ {
+ 	if ((length == 0) || (bytes_per_datum == 0))
+ 		return -EINVAL;
+ 
++	/*
++	 * Make sure we don't overflow an unsigned int after kfifo rounds up to
++	 * the next power of 2.
++	 */
++	if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
++		return -EINVAL;
++
+ 	return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
+ 			     bytes_per_datum, GFP_KERNEL);
+ }
+@@ -67,7 +74,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
+ 	return 0;
+ }
+ 
+-static int iio_set_length_kfifo(struct iio_buffer *r, int length)
++static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
+ {
+ 	/* Avoid an invalid state */
+ 	if (length < 2)
+diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+index cfb6588565ba..4905a997a7ec 100644
+--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+@@ -178,14 +178,14 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+ #ifdef CONFIG_PM
+ 	int ret;
+ 
+-	atomic_set(&st->user_requested_state, state);
+-
+ 	if (atomic_add_unless(&st->runtime_pm_enable, 1, 1))
+ 		pm_runtime_enable(&st->pdev->dev);
+ 
+-	if (state)
++	if (state) {
++		atomic_inc(&st->user_requested_state);
+ 		ret = pm_runtime_get_sync(&st->pdev->dev);
+-	else {
++	} else {
++		atomic_dec(&st->user_requested_state);
+ 		pm_runtime_mark_last_busy(&st->pdev->dev);
+ 		pm_runtime_use_autosuspend(&st->pdev->dev);
+ 		ret = pm_runtime_put_autosuspend(&st->pdev->dev);
+diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
+index e9a409d7f4e2..21fbee68b8ed 100644
+--- a/drivers/infiniband/core/cache.c
++++ b/drivers/infiniband/core/cache.c
+@@ -434,7 +434,7 @@ static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
+ 		return -EINVAL;
+ 
+ 	if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID)
+-		return -EAGAIN;
++		return -EINVAL;
+ 
+ 	memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
+ 	if (attr) {
+diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
+index 29f99529b187..cfcb32559925 100644
+--- a/drivers/input/mouse/elan_i2c_smbus.c
++++ b/drivers/input/mouse/elan_i2c_smbus.c
+@@ -130,7 +130,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
+ 					bool max_baseline, u8 *value)
+ {
+ 	int error;
+-	u8 val[3];
++	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+ 
+ 	error = i2c_smbus_read_block_data(client,
+ 					  max_baseline ?
+@@ -149,7 +149,7 @@ static int elan_smbus_get_version(struct i2c_client *client,
+ 				  bool iap, u8 *version)
+ {
+ 	int error;
+-	u8 val[3];
++	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+ 
+ 	error = i2c_smbus_read_block_data(client,
+ 					  iap ? ETP_SMBUS_IAP_VERSION_CMD :
+@@ -170,7 +170,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
+ 				     u8 *clickpad)
+ {
+ 	int error;
+-	u8 val[3];
++	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+ 
+ 	error = i2c_smbus_read_block_data(client,
+ 					  ETP_SMBUS_SM_VERSION_CMD, val);
+@@ -188,7 +188,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
+ static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
+ {
+ 	int error;
+-	u8 val[3];
++	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+ 
+ 	error = i2c_smbus_read_block_data(client,
+ 					  ETP_SMBUS_UNIQUEID_CMD, val);
+@@ -205,7 +205,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client,
+ 				   bool iap, u16 *csum)
+ {
+ 	int error;
+-	u8 val[3];
++	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+ 
+ 	error = i2c_smbus_read_block_data(client,
+ 					  iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
+@@ -226,7 +226,7 @@ static int elan_smbus_get_max(struct i2c_client *client,
+ {
+ 	int ret;
+ 	int error;
+-	u8 val[3];
++	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+ 
+ 	ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
+ 	if (ret != 3) {
+@@ -246,7 +246,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client,
+ {
+ 	int ret;
+ 	int error;
+-	u8 val[3];
++	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+ 
+ 	ret = i2c_smbus_read_block_data(client, ETP_SMBUS_RESOLUTION_CMD, val);
+ 	if (ret != 3) {
+@@ -267,7 +267,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client,
+ {
+ 	int ret;
+ 	int error;
+-	u8 val[3];
++	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+ 
+ 	ret = i2c_smbus_read_block_data(client, ETP_SMBUS_XY_TRACENUM_CMD, val);
+ 	if (ret != 3) {
+@@ -294,7 +294,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client,
+ {
+ 	int error;
+ 	u16 constant;
+-	u8 val[3];
++	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+ 
+ 	error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
+ 	if (error < 0) {
+@@ -345,7 +345,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
+ 	int len;
+ 	int error;
+ 	enum tp_mode mode;
+-	u8 val[3];
++	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+ 	u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
+ 	u16 password;
+ 
+@@ -419,7 +419,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
+ 	struct device *dev = &client->dev;
+ 	int error;
+ 	u16 result;
+-	u8 val[3];
++	u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
+ 
+ 	/*
+ 	 * Due to the limitation of smbus protocol limiting
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index a246fc686bb7..6c4bbd38700e 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -172,6 +172,12 @@ static const char * const smbus_pnp_ids[] = {
+ 	"LEN0048", /* X1 Carbon 3 */
+ 	"LEN0046", /* X250 */
+ 	"LEN004a", /* W541 */
++	"LEN0071", /* T480 */
++	"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
++	"LEN0073", /* X1 Carbon G5 (Elantech) */
++	"LEN0092", /* X1 Carbon 6 */
++	"LEN0096", /* X280 */
++	"LEN0097", /* X280 -> ALPS trackpoint */
+ 	"LEN200f", /* T450s */
+ 	NULL
+ };
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+index a6884e73d2ab..7ddee980048b 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+@@ -372,16 +372,15 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
+ 
+ 	/*
+ 	 * Determine IFS values
+-	 * - Use TXOP_BACKOFF for probe and management frames except beacons
++	 * - Use TXOP_BACKOFF for management frames except beacons
+ 	 * - Use TXOP_SIFS for fragment bursts
+ 	 * - Use TXOP_HTTXOP for everything else
+ 	 *
+ 	 * Note: rt2800 devices won't use CTS protection (if used)
+ 	 * for frames not transmitted with TXOP_HTTXOP
+ 	 */
+-	if ((ieee80211_is_mgmt(hdr->frame_control) &&
+-	     !ieee80211_is_beacon(hdr->frame_control)) ||
+-	    (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
++	if (ieee80211_is_mgmt(hdr->frame_control) &&
++	    !ieee80211_is_beacon(hdr->frame_control))
+ 		txdesc->u.ht.txop = TXOP_BACKOFF;
+ 	else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
+ 		txdesc->u.ht.txop = TXOP_SIFS;
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
+index 9cff6bc4049c..cf551785eb08 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
+@@ -299,9 +299,6 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+ 			writeVal = 0x00000000;
+ 		if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+ 			writeVal = writeVal - 0x06060606;
+-		else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+-			 TXHIGHPWRLEVEL_BT2)
+-			writeVal = writeVal;
+ 		*(p_outwriteval + rf) = writeVal;
+ 	}
+ }
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index f5259912f049..df3d5051539d 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1446,8 +1446,8 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+ 	if (ns->lba_shift == 0)
+ 		ns->lba_shift = 9;
+ 	ns->noiob = le16_to_cpu(id->noiob);
+-	ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
+ 	ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms);
++	ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT);
+ 	/* the PI implementation requires metadata equal t10 pi tuple size */
+ 	if (ns->ms == sizeof(struct t10_pi_tuple))
+ 		ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
+index 95e5c5ea40af..495432f3341b 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
+@@ -818,7 +818,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
+ 		return -EINVAL;
+ 
+ 	chip = &pctrl->chip;
+-	chip->base = -1;
++	chip->base = 0;
+ 	chip->ngpio = ngpio;
+ 	chip->label = dev_name(pctrl->dev);
+ 	chip->parent = pctrl->dev;
+diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
+index 36f6190931bc..456ce9f19569 100644
+--- a/drivers/scsi/scsi_transport_srp.c
++++ b/drivers/scsi/scsi_transport_srp.c
+@@ -51,6 +51,8 @@ struct srp_internal {
+ 	struct transport_container rport_attr_cont;
+ };
+ 
++static int scsi_is_srp_rport(const struct device *dev);
++
+ #define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
+ 
+ #define	dev_to_rport(d)	container_of(d, struct srp_rport, dev)
+@@ -60,9 +62,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
+ 	return dev_to_shost(r->dev.parent);
+ }
+ 
++static int find_child_rport(struct device *dev, void *data)
++{
++	struct device **child = data;
++
++	if (scsi_is_srp_rport(dev)) {
++		WARN_ON_ONCE(*child);
++		*child = dev;
++	}
++	return 0;
++}
++
+ static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
+ {
+-	return transport_class_to_srp_rport(&shost->shost_gendev);
++	struct device *child = NULL;
++
++	WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
++					   find_child_rport) < 0);
++	return child ? dev_to_rport(child) : NULL;
+ }
+ 
+ /**
+@@ -600,7 +617,8 @@ enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
+ 	struct srp_rport *rport = shost_to_rport(shost);
+ 
+ 	pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
+-	return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 &&
++	return rport && rport->fast_io_fail_tmo < 0 &&
++		rport->dev_loss_tmo < 0 &&
+ 		i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
+ 		BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
+ }
+diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c
+index 8d8659463b3e..feeb17cebc25 100644
+--- a/drivers/soc/lantiq/gphy.c
++++ b/drivers/soc/lantiq/gphy.c
+@@ -30,7 +30,6 @@ struct xway_gphy_priv {
+ 	struct clk *gphy_clk_gate;
+ 	struct reset_control *gphy_reset;
+ 	struct reset_control *gphy_reset2;
+-	struct notifier_block gphy_reboot_nb;
+ 	void __iomem *membase;
+ 	char *fw_name;
+ };
+@@ -64,24 +63,6 @@ static const struct of_device_id xway_gphy_match[] = {
+ };
+ MODULE_DEVICE_TABLE(of, xway_gphy_match);
+ 
+-static struct xway_gphy_priv *to_xway_gphy_priv(struct notifier_block *nb)
+-{
+-	return container_of(nb, struct xway_gphy_priv, gphy_reboot_nb);
+-}
+-
+-static int xway_gphy_reboot_notify(struct notifier_block *reboot_nb,
+-				   unsigned long code, void *unused)
+-{
+-	struct xway_gphy_priv *priv = to_xway_gphy_priv(reboot_nb);
+-
+-	if (priv) {
+-		reset_control_assert(priv->gphy_reset);
+-		reset_control_assert(priv->gphy_reset2);
+-	}
+-
+-	return NOTIFY_DONE;
+-}
+-
+ static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv,
+ 			  dma_addr_t *dev_addr)
+ {
+@@ -205,14 +186,6 @@ static int xway_gphy_probe(struct platform_device *pdev)
+ 	reset_control_deassert(priv->gphy_reset);
+ 	reset_control_deassert(priv->gphy_reset2);
+ 
+-	/* assert the gphy reset because it can hang after a reboot: */
+-	priv->gphy_reboot_nb.notifier_call = xway_gphy_reboot_notify;
+-	priv->gphy_reboot_nb.priority = -1;
+-
+-	ret = register_reboot_notifier(&priv->gphy_reboot_nb);
+-	if (ret)
+-		dev_warn(dev, "Failed to register reboot notifier\n");
+-
+ 	platform_set_drvdata(pdev, priv);
+ 
+ 	return ret;
+@@ -220,21 +193,12 @@ static int xway_gphy_probe(struct platform_device *pdev)
+ 
+ static int xway_gphy_remove(struct platform_device *pdev)
+ {
+-	struct device *dev = &pdev->dev;
+ 	struct xway_gphy_priv *priv = platform_get_drvdata(pdev);
+-	int ret;
+-
+-	reset_control_assert(priv->gphy_reset);
+-	reset_control_assert(priv->gphy_reset2);
+ 
+ 	iowrite32be(0, priv->membase);
+ 
+ 	clk_disable_unprepare(priv->gphy_clk_gate);
+ 
+-	ret = unregister_reboot_notifier(&priv->gphy_reboot_nb);
+-	if (ret)
+-		dev_warn(dev, "Failed to unregister reboot notifier\n");
+-
+ 	return 0;
+ }
+ 
+diff --git a/fs/aio.c b/fs/aio.c
+index 63c0437ab135..3dbfbac2a668 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -643,9 +643,8 @@ static void free_ioctx_users(struct percpu_ref *ref)
+ 	while (!list_empty(&ctx->active_reqs)) {
+ 		req = list_first_entry(&ctx->active_reqs,
+ 				       struct aio_kiocb, ki_list);
+-
+-		list_del_init(&req->ki_list);
+ 		kiocb_cancel(req);
++		list_del_init(&req->ki_list);
+ 	}
+ 
+ 	spin_unlock_irq(&ctx->ctx_lock);
+diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
+index c02781a4c091..5a116b221f11 100644
+--- a/fs/xfs/libxfs/xfs_alloc.c
++++ b/fs/xfs/libxfs/xfs_alloc.c
+@@ -53,6 +53,23 @@ STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
+ STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
+ 		xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
+ 
++/*
++ * Size of the AGFL.  For CRC-enabled filesystes we steal a couple of slots in
++ * the beginning of the block for a proper header with the location information
++ * and CRC.
++ */
++unsigned int
++xfs_agfl_size(
++	struct xfs_mount	*mp)
++{
++	unsigned int		size = mp->m_sb.sb_sectsize;
++
++	if (xfs_sb_version_hascrc(&mp->m_sb))
++		size -= sizeof(struct xfs_agfl);
++
++	return size / sizeof(xfs_agblock_t);
++}
++
+ unsigned int
+ xfs_refc_block(
+ 	struct xfs_mount	*mp)
+@@ -550,7 +567,7 @@ xfs_agfl_verify(
+ 	if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
+ 		return __this_address;
+ 
+-	for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
++	for (i = 0; i < xfs_agfl_size(mp); i++) {
+ 		if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
+ 		    be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
+ 			return __this_address;
+@@ -2053,6 +2070,93 @@ xfs_alloc_space_available(
+ 	return true;
+ }
+ 
++/*
++ * Check the agfl fields of the agf for inconsistency or corruption. The purpose
++ * is to detect an agfl header padding mismatch between current and early v5
++ * kernels. This problem manifests as a 1-slot size difference between the
++ * on-disk flcount and the active [first, last] range of a wrapped agfl. This
++ * may also catch variants of agfl count corruption unrelated to padding. Either
++ * way, we'll reset the agfl and warn the user.
++ *
++ * Return true if a reset is required before the agfl can be used, false
++ * otherwise.
++ */
++static bool
++xfs_agfl_needs_reset(
++	struct xfs_mount	*mp,
++	struct xfs_agf		*agf)
++{
++	uint32_t		f = be32_to_cpu(agf->agf_flfirst);
++	uint32_t		l = be32_to_cpu(agf->agf_fllast);
++	uint32_t		c = be32_to_cpu(agf->agf_flcount);
++	int			agfl_size = xfs_agfl_size(mp);
++	int			active;
++
++	/* no agfl header on v4 supers */
++	if (!xfs_sb_version_hascrc(&mp->m_sb))
++		return false;
++
++	/*
++	 * The agf read verifier catches severe corruption of these fields.
++	 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
++	 * the verifier allows it.
++	 */
++	if (f >= agfl_size || l >= agfl_size)
++		return true;
++	if (c > agfl_size)
++		return true;
++
++	/*
++	 * Check consistency between the on-disk count and the active range. An
++	 * agfl padding mismatch manifests as an inconsistent flcount.
++	 */
++	if (c && l >= f)
++		active = l - f + 1;
++	else if (c)
++		active = agfl_size - f + l + 1;
++	else
++		active = 0;
++
++	return active != c;
++}
++
++/*
++ * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
++ * agfl content cannot be trusted. Warn the user that a repair is required to
++ * recover leaked blocks.
++ *
++ * The purpose of this mechanism is to handle filesystems affected by the agfl
++ * header padding mismatch problem. A reset keeps the filesystem online with a
++ * relatively minor free space accounting inconsistency rather than suffer the
++ * inevitable crash from use of an invalid agfl block.
++ */
++static void
++xfs_agfl_reset(
++	struct xfs_trans	*tp,
++	struct xfs_buf		*agbp,
++	struct xfs_perag	*pag)
++{
++	struct xfs_mount	*mp = tp->t_mountp;
++	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
++
++	ASSERT(pag->pagf_agflreset);
++	trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
++
++	xfs_warn(mp,
++	       "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
++	       "Please unmount and run xfs_repair.",
++	         pag->pag_agno, pag->pagf_flcount);
++
++	agf->agf_flfirst = 0;
++	agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
++	agf->agf_flcount = 0;
++	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
++				    XFS_AGF_FLCOUNT);
++
++	pag->pagf_flcount = 0;
++	pag->pagf_agflreset = false;
++}
++
+ /*
+  * Decide whether to use this allocation group for this allocation.
+  * If so, fix up the btree freelist's size.
+@@ -2114,6 +2218,10 @@ xfs_alloc_fix_freelist(
+ 		}
+ 	}
+ 
++	/* reset a padding mismatched agfl before final free space check */
++	if (pag->pagf_agflreset)
++		xfs_agfl_reset(tp, agbp, pag);
++
+ 	/* If there isn't enough total space or single-extent, reject it. */
+ 	need = xfs_alloc_min_freelist(mp, pag);
+ 	if (!xfs_alloc_space_available(args, need, flags))
+@@ -2266,10 +2374,11 @@ xfs_alloc_get_freelist(
+ 	bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
+ 	be32_add_cpu(&agf->agf_flfirst, 1);
+ 	xfs_trans_brelse(tp, agflbp);
+-	if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
++	if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
+ 		agf->agf_flfirst = 0;
+ 
+ 	pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
++	ASSERT(!pag->pagf_agflreset);
+ 	be32_add_cpu(&agf->agf_flcount, -1);
+ 	xfs_trans_agflist_delta(tp, -1);
+ 	pag->pagf_flcount--;
+@@ -2377,10 +2486,11 @@ xfs_alloc_put_freelist(
+ 			be32_to_cpu(agf->agf_seqno), &agflbp)))
+ 		return error;
+ 	be32_add_cpu(&agf->agf_fllast, 1);
+-	if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
++	if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
+ 		agf->agf_fllast = 0;
+ 
+ 	pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
++	ASSERT(!pag->pagf_agflreset);
+ 	be32_add_cpu(&agf->agf_flcount, 1);
+ 	xfs_trans_agflist_delta(tp, 1);
+ 	pag->pagf_flcount++;
+@@ -2395,7 +2505,7 @@ xfs_alloc_put_freelist(
+ 
+ 	xfs_alloc_log_agf(tp, agbp, logflags);
+ 
+-	ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
++	ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
+ 
+ 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
+ 	blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
+@@ -2428,9 +2538,9 @@ xfs_agf_verify(
+ 	if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
+ 	      XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
+ 	      be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
+-	      be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
+-	      be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
+-	      be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
++	      be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
++	      be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
++	      be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
+ 		return __this_address;
+ 
+ 	if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
+@@ -2588,6 +2698,7 @@ xfs_alloc_read_agf(
+ 		pag->pagb_count = 0;
+ 		pag->pagb_tree = RB_ROOT;
+ 		pag->pagf_init = 1;
++		pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
+ 	}
+ #ifdef DEBUG
+ 	else if (!XFS_FORCED_SHUTDOWN(mp)) {
+diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
+index 65a0cafe06e4..a311a2414a6b 100644
+--- a/fs/xfs/libxfs/xfs_alloc.h
++++ b/fs/xfs/libxfs/xfs_alloc.h
+@@ -26,6 +26,8 @@ struct xfs_trans;
+ 
+ extern struct workqueue_struct *xfs_alloc_wq;
+ 
++unsigned int xfs_agfl_size(struct xfs_mount *mp);
++
+ /*
+  * Freespace allocation types.  Argument to xfs_alloc_[v]extent.
+  */
+diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
+index 1acb584fc5f7..42956d8d95ed 100644
+--- a/fs/xfs/libxfs/xfs_format.h
++++ b/fs/xfs/libxfs/xfs_format.h
+@@ -803,24 +803,13 @@ typedef struct xfs_agi {
+ 		&(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \
+ 		(__be32 *)(bp)->b_addr)
+ 
+-/*
+- * Size of the AGFL.  For CRC-enabled filesystes we steal a couple of
+- * slots in the beginning of the block for a proper header with the
+- * location information and CRC.
+- */
+-#define XFS_AGFL_SIZE(mp) \
+-	(((mp)->m_sb.sb_sectsize - \
+-	 (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
+-		sizeof(struct xfs_agfl) : 0)) / \
+-	  sizeof(xfs_agblock_t))
+-
+ typedef struct xfs_agfl {
+ 	__be32		agfl_magicnum;
+ 	__be32		agfl_seqno;
+ 	uuid_t		agfl_uuid;
+ 	__be64		agfl_lsn;
+ 	__be32		agfl_crc;
+-	__be32		agfl_bno[];	/* actually XFS_AGFL_SIZE(mp) */
++	__be32		agfl_bno[];	/* actually xfs_agfl_size(mp) */
+ } __attribute__((packed)) xfs_agfl_t;
+ 
+ #define XFS_AGFL_CRC_OFF	offsetof(struct xfs_agfl, agfl_crc)
+diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
+index 05c66e05ae20..018aabbd9394 100644
+--- a/fs/xfs/scrub/agheader.c
++++ b/fs/xfs/scrub/agheader.c
+@@ -80,7 +80,7 @@ xfs_scrub_walk_agfl(
+ 	}
+ 
+ 	/* first to the end */
+-	for (i = flfirst; i < XFS_AGFL_SIZE(mp); i++) {
++	for (i = flfirst; i < xfs_agfl_size(mp); i++) {
+ 		error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
+ 		if (error)
+ 			return error;
+@@ -664,7 +664,7 @@ xfs_scrub_agf(
+ 	if (agfl_last > agfl_first)
+ 		fl_count = agfl_last - agfl_first + 1;
+ 	else
+-		fl_count = XFS_AGFL_SIZE(mp) - agfl_first + agfl_last + 1;
++		fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
+ 	if (agfl_count != 0 && fl_count != agfl_count)
+ 		xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ 
+@@ -791,7 +791,7 @@ xfs_scrub_agfl(
+ 	/* Allocate buffer to ensure uniqueness of AGFL entries. */
+ 	agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ 	agflcount = be32_to_cpu(agf->agf_flcount);
+-	if (agflcount > XFS_AGFL_SIZE(sc->mp)) {
++	if (agflcount > xfs_agfl_size(sc->mp)) {
+ 		xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ 		goto out;
+ 	}
+diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
+index 8b4545623e25..523792768080 100644
+--- a/fs/xfs/xfs_fsops.c
++++ b/fs/xfs/xfs_fsops.c
+@@ -217,7 +217,7 @@ xfs_growfs_data_private(
+ 		}
+ 
+ 		agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
+-		for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
++		for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
+ 			agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
+ 
+ 		error = xfs_bwrite(bp);
+diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
+index e0792d036be2..d359a88ea249 100644
+--- a/fs/xfs/xfs_mount.h
++++ b/fs/xfs/xfs_mount.h
+@@ -353,6 +353,7 @@ typedef struct xfs_perag {
+ 	char		pagi_inodeok;	/* The agi is ok for inodes */
+ 	uint8_t		pagf_levels[XFS_BTNUM_AGF];
+ 					/* # of levels in bno & cnt btree */
++	bool		pagf_agflreset; /* agfl requires reset before use */
+ 	uint32_t	pagf_flcount;	/* count of blocks in freelist */
+ 	xfs_extlen_t	pagf_freeblks;	/* total free blocks */
+ 	xfs_extlen_t	pagf_longest;	/* longest free space */
+diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
+index 945de08af7ba..a982c0b623d0 100644
+--- a/fs/xfs/xfs_trace.h
++++ b/fs/xfs/xfs_trace.h
+@@ -1477,7 +1477,7 @@ TRACE_EVENT(xfs_extent_busy_trim,
+ 		  __entry->tlen)
+ );
+ 
+-TRACE_EVENT(xfs_agf,
++DECLARE_EVENT_CLASS(xfs_agf_class,
+ 	TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
+ 		 unsigned long caller_ip),
+ 	TP_ARGS(mp, agf, flags, caller_ip),
+@@ -1533,6 +1533,13 @@ TRACE_EVENT(xfs_agf,
+ 		  __entry->longest,
+ 		  (void *)__entry->caller_ip)
+ );
++#define DEFINE_AGF_EVENT(name) \
++DEFINE_EVENT(xfs_agf_class, name, \
++	TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, \
++		 unsigned long caller_ip), \
++	TP_ARGS(mp, agf, flags, caller_ip))
++DEFINE_AGF_EVENT(xfs_agf);
++DEFINE_AGF_EVENT(xfs_agfl_reset);
+ 
+ TRACE_EVENT(xfs_free_extent,
+ 	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
+diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
+index b9e22b7e2f28..d1171db23742 100644
+--- a/include/linux/iio/buffer_impl.h
++++ b/include/linux/iio/buffer_impl.h
+@@ -53,7 +53,7 @@ struct iio_buffer_access_funcs {
+ 	int (*request_update)(struct iio_buffer *buffer);
+ 
+ 	int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
+-	int (*set_length)(struct iio_buffer *buffer, int length);
++	int (*set_length)(struct iio_buffer *buffer, unsigned int length);
+ 
+ 	int (*enable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
+ 	int (*disable)(struct iio_buffer *buffer, struct iio_dev *indio_dev);
+@@ -72,10 +72,10 @@ struct iio_buffer_access_funcs {
+  */
+ struct iio_buffer {
+ 	/** @length: Number of datums in buffer. */
+-	int length;
++	unsigned int length;
+ 
+ 	/**  @bytes_per_datum: Size of individual datum including timestamp. */
+-	int bytes_per_datum;
++	size_t bytes_per_datum;
+ 
+ 	/**
+ 	 * @access: Buffer access functions associated with the
+diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
+index 2e08c6f3ac3e..59dabe8e11aa 100644
+--- a/include/uapi/linux/nl80211.h
++++ b/include/uapi/linux/nl80211.h
+@@ -2618,7 +2618,7 @@ enum nl80211_attrs {
+ #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
+ #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
+ 
+-#define NL80211_WIPHY_NAME_MAXLEN		128
++#define NL80211_WIPHY_NAME_MAXLEN		64
+ 
+ #define NL80211_MAX_SUPP_RATES			32
+ #define NL80211_MAX_SUPP_HT_RATES		77
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 20a2300ae4e8..ed025da81714 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -892,7 +892,7 @@ int __trace_bputs(unsigned long ip, const char *str)
+ EXPORT_SYMBOL_GPL(__trace_bputs);
+ 
+ #ifdef CONFIG_TRACER_SNAPSHOT
+-static void tracing_snapshot_instance(struct trace_array *tr)
++void tracing_snapshot_instance(struct trace_array *tr)
+ {
+ 	struct tracer *tracer = tr->current_trace;
+ 	unsigned long flags;
+@@ -948,7 +948,7 @@ static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
+ 					struct trace_buffer *size_buf, int cpu_id);
+ static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
+ 
+-static int alloc_snapshot(struct trace_array *tr)
++int tracing_alloc_snapshot_instance(struct trace_array *tr)
+ {
+ 	int ret;
+ 
+@@ -994,7 +994,7 @@ int tracing_alloc_snapshot(void)
+ 	struct trace_array *tr = &global_trace;
+ 	int ret;
+ 
+-	ret = alloc_snapshot(tr);
++	ret = tracing_alloc_snapshot_instance(tr);
+ 	WARN_ON(ret < 0);
+ 
+ 	return ret;
+@@ -5395,7 +5395,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ 
+ #ifdef CONFIG_TRACER_MAX_TRACE
+ 	if (t->use_max_tr && !had_max_tr) {
+-		ret = alloc_snapshot(tr);
++		ret = tracing_alloc_snapshot_instance(tr);
+ 		if (ret < 0)
+ 			goto out;
+ 	}
+@@ -6373,7 +6373,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ 		}
+ #endif
+ 		if (!tr->allocated_snapshot) {
+-			ret = alloc_snapshot(tr);
++			ret = tracing_alloc_snapshot_instance(tr);
+ 			if (ret < 0)
+ 				break;
+ 		}
+@@ -7094,7 +7094,7 @@ ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
+ 		return ret;
+ 
+  out_reg:
+-	ret = alloc_snapshot(tr);
++	ret = tracing_alloc_snapshot_instance(tr);
+ 	if (ret < 0)
+ 		goto out;
+ 
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 2a6d0325a761..6092711bd0aa 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -1812,6 +1812,17 @@ static inline void __init trace_event_init(void) { }
+ static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
+ #endif
+ 
++#ifdef CONFIG_TRACER_SNAPSHOT
++void tracing_snapshot_instance(struct trace_array *tr);
++int tracing_alloc_snapshot_instance(struct trace_array *tr);
++#else
++static inline void tracing_snapshot_instance(struct trace_array *tr) { }
++static inline int tracing_alloc_snapshot_instance(struct trace_array *tr)
++{
++	return 0;
++}
++#endif
++
+ extern struct trace_iterator *tracepoint_print_iter;
+ 
+ #endif /* _LINUX_KERNEL_TRACE_H */
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index 87411482a46f..ece7b7e8e96d 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -482,9 +482,10 @@ clear_event_triggers(struct trace_array *tr)
+ 	struct trace_event_file *file;
+ 
+ 	list_for_each_entry(file, &tr->events, list) {
+-		struct event_trigger_data *data;
+-		list_for_each_entry_rcu(data, &file->triggers, list) {
++		struct event_trigger_data *data, *n;
++		list_for_each_entry_safe(data, n, &file->triggers, list) {
+ 			trace_event_trigger_enable_disable(file, 0);
++			list_del_rcu(&data->list);
+ 			if (data->ops->free)
+ 				data->ops->free(data->ops, data);
+ 		}
+@@ -641,6 +642,7 @@ event_trigger_callback(struct event_command *cmd_ops,
+ 	trigger_data->count = -1;
+ 	trigger_data->ops = trigger_ops;
+ 	trigger_data->cmd_ops = cmd_ops;
++	trigger_data->private_data = file;
+ 	INIT_LIST_HEAD(&trigger_data->list);
+ 	INIT_LIST_HEAD(&trigger_data->named_list);
+ 
+@@ -1041,7 +1043,12 @@ static struct event_command trigger_traceoff_cmd = {
+ static void
+ snapshot_trigger(struct event_trigger_data *data, void *rec)
+ {
+-	tracing_snapshot();
++	struct trace_event_file *file = data->private_data;
++
++	if (file)
++		tracing_snapshot_instance(file->tr);
++	else
++		tracing_snapshot();
+ }
+ 
+ static void
+@@ -1063,7 +1070,7 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
+ {
+ 	int ret = register_trigger(glob, ops, data, file);
+ 
+-	if (ret > 0 && tracing_alloc_snapshot() != 0) {
++	if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
+ 		unregister_trigger(glob, ops, data, file);
+ 		ret = 0;
+ 	}
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 5a68730eebd6..82e8f5ad7c81 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2432,7 +2432,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
+ 		__split_huge_page_tail(head, i, lruvec, list);
+ 		/* Some pages can be beyond i_size: drop them from page cache */
+ 		if (head[i].index >= end) {
+-			__ClearPageDirty(head + i);
++			ClearPageDirty(head + i);
+ 			__delete_from_page_cache(head + i, NULL);
+ 			if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
+ 				shmem_uncharge(head->mapping->host, 1);
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index a47621fa8496..f9ae07ef5ce8 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1392,7 +1392,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
+ 				return ret;
+ 
+ 			mapping = page_mapping(page);
+-			migrate_dirty = mapping && mapping->a_ops->migratepage;
++			migrate_dirty = !mapping || mapping->a_ops->migratepage;
+ 			unlock_page(page);
+ 			if (!migrate_dirty)
+ 				return ret;
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index 8900ea5cbabf..1dde563aff1d 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -1448,7 +1448,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
+ 				      scontext_len, &context, def_sid);
+ 	if (rc == -EINVAL && force) {
+ 		context.str = str;
+-		context.len = scontext_len;
++		context.len = strlen(str) + 1;
+ 		str = NULL;
+ 	} else if (rc)
+ 		goto out_unlock;
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 5409f6f6c48d..3a31b238f885 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -59,6 +59,31 @@ static struct instruction *next_insn_same_sec(struct objtool_file *file,
+ 	return next;
+ }
+ 
++static struct instruction *next_insn_same_func(struct objtool_file *file,
++					       struct instruction *insn)
++{
++	struct instruction *next = list_next_entry(insn, list);
++	struct symbol *func = insn->func;
++
++	if (!func)
++		return NULL;
++
++	if (&next->list != &file->insn_list && next->func == func)
++		return next;
++
++	/* Check if we're already in the subfunction: */
++	if (func == func->cfunc)
++		return NULL;
++
++	/* Move to the subfunction: */
++	return find_insn(file, func->cfunc->sec, func->cfunc->offset);
++}
++
++#define func_for_each_insn_all(file, func, insn)			\
++	for (insn = find_insn(file, func->sec, func->offset);		\
++	     insn;							\
++	     insn = next_insn_same_func(file, insn))
++
+ #define func_for_each_insn(file, func, insn)				\
+ 	for (insn = find_insn(file, func->sec, func->offset);		\
+ 	     insn && &insn->list != &file->insn_list &&			\
+@@ -149,10 +174,14 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
+ 			if (!strcmp(func->name, global_noreturns[i]))
+ 				return 1;
+ 
+-	if (!func->sec)
++	if (!func->len)
+ 		return 0;
+ 
+-	func_for_each_insn(file, func, insn) {
++	insn = find_insn(file, func->sec, func->offset);
++	if (!insn->func)
++		return 0;
++
++	func_for_each_insn_all(file, func, insn) {
+ 		empty = false;
+ 
+ 		if (insn->type == INSN_RETURN)
+@@ -167,35 +196,28 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
+ 	 * case, the function's dead-end status depends on whether the target
+ 	 * of the sibling call returns.
+ 	 */
+-	func_for_each_insn(file, func, insn) {
+-		if (insn->sec != func->sec ||
+-		    insn->offset >= func->offset + func->len)
+-			break;
+-
++	func_for_each_insn_all(file, func, insn) {
+ 		if (insn->type == INSN_JUMP_UNCONDITIONAL) {
+ 			struct instruction *dest = insn->jump_dest;
+-			struct symbol *dest_func;
+ 
+ 			if (!dest)
+ 				/* sibling call to another file */
+ 				return 0;
+ 
+-			if (dest->sec != func->sec ||
+-			    dest->offset < func->offset ||
+-			    dest->offset >= func->offset + func->len) {
+-				/* local sibling call */
+-				dest_func = find_symbol_by_offset(dest->sec,
+-								  dest->offset);
+-				if (!dest_func)
+-					continue;
++			if (dest->func && dest->func->pfunc != insn->func->pfunc) {
+ 
++				/* local sibling call */
+ 				if (recursion == 5) {
+-					WARN_FUNC("infinite recursion (objtool bug!)",
+-						  dest->sec, dest->offset);
+-					return -1;
++					/*
++					 * Infinite recursion: two functions
++					 * have sibling calls to each other.
++					 * This is a very rare case.  It means
++					 * they aren't dead ends.
++					 */
++					return 0;
+ 				}
+ 
+-				return __dead_end_function(file, dest_func,
++				return __dead_end_function(file, dest->func,
+ 							   recursion + 1);
+ 			}
+ 		}
+@@ -422,7 +444,7 @@ static void add_ignores(struct objtool_file *file)
+ 			if (!ignore_func(file, func))
+ 				continue;
+ 
+-			func_for_each_insn(file, func, insn)
++			func_for_each_insn_all(file, func, insn)
+ 				insn->ignore = true;
+ 		}
+ 	}
+@@ -782,30 +804,35 @@ static int add_special_section_alts(struct objtool_file *file)
+ 	return ret;
+ }
+ 
+-static int add_switch_table(struct objtool_file *file, struct symbol *func,
+-			    struct instruction *insn, struct rela *table,
+-			    struct rela *next_table)
++static int add_switch_table(struct objtool_file *file, struct instruction *insn,
++			    struct rela *table, struct rela *next_table)
+ {
+ 	struct rela *rela = table;
+ 	struct instruction *alt_insn;
+ 	struct alternative *alt;
++	struct symbol *pfunc = insn->func->pfunc;
++	unsigned int prev_offset = 0;
+ 
+ 	list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
+ 		if (rela == next_table)
+ 			break;
+ 
+-		if (rela->sym->sec != insn->sec ||
+-		    rela->addend <= func->offset ||
+-		    rela->addend >= func->offset + func->len)
++		/* Make sure the switch table entries are consecutive: */
++		if (prev_offset && rela->offset != prev_offset + 8)
+ 			break;
+ 
+-		alt_insn = find_insn(file, insn->sec, rela->addend);
+-		if (!alt_insn) {
+-			WARN("%s: can't find instruction at %s+0x%x",
+-			     file->rodata->rela->name, insn->sec->name,
+-			     rela->addend);
+-			return -1;
+-		}
++		/* Detect function pointers from contiguous objects: */
++		if (rela->sym->sec == pfunc->sec &&
++		    rela->addend == pfunc->offset)
++			break;
++
++		alt_insn = find_insn(file, rela->sym->sec, rela->addend);
++		if (!alt_insn)
++			break;
++
++		/* Make sure the jmp dest is in the function or subfunction: */
++		if (alt_insn->func->pfunc != pfunc)
++			break;
+ 
+ 		alt = malloc(sizeof(*alt));
+ 		if (!alt) {
+@@ -815,6 +842,13 @@ static int add_switch_table(struct objtool_file *file, struct symbol *func,
+ 
+ 		alt->insn = alt_insn;
+ 		list_add_tail(&alt->list, &insn->alts);
++		prev_offset = rela->offset;
++	}
++
++	if (!prev_offset) {
++		WARN_FUNC("can't find switch jump table",
++			  insn->sec, insn->offset);
++		return -1;
+ 	}
+ 
+ 	return 0;
+@@ -869,40 +903,21 @@ static struct rela *find_switch_table(struct objtool_file *file,
+ {
+ 	struct rela *text_rela, *rodata_rela;
+ 	struct instruction *orig_insn = insn;
++	unsigned long table_offset;
+ 
+-	text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
+-	if (text_rela && text_rela->sym == file->rodata->sym) {
+-		/* case 1 */
+-		rodata_rela = find_rela_by_dest(file->rodata,
+-						text_rela->addend);
+-		if (rodata_rela)
+-			return rodata_rela;
+-
+-		/* case 2 */
+-		rodata_rela = find_rela_by_dest(file->rodata,
+-						text_rela->addend + 4);
+-		if (!rodata_rela)
+-			return NULL;
+-
+-		file->ignore_unreachables = true;
+-		return rodata_rela;
+-	}
+-
+-	/* case 3 */
+ 	/*
+ 	 * Backward search using the @first_jump_src links, these help avoid
+ 	 * much of the 'in between' code. Which avoids us getting confused by
+ 	 * it.
+ 	 */
+-	for (insn = list_prev_entry(insn, list);
+-
++	for (;
+ 	     &insn->list != &file->insn_list &&
+ 	     insn->sec == func->sec &&
+ 	     insn->offset >= func->offset;
+ 
+ 	     insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
+ 
+-		if (insn->type == INSN_JUMP_DYNAMIC)
++		if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
+ 			break;
+ 
+ 		/* allow small jumps within the range */
+@@ -918,18 +933,29 @@ static struct rela *find_switch_table(struct objtool_file *file,
+ 		if (!text_rela || text_rela->sym != file->rodata->sym)
+ 			continue;
+ 
++		table_offset = text_rela->addend;
++		if (text_rela->type == R_X86_64_PC32)
++			table_offset += 4;
++
+ 		/*
+ 		 * Make sure the .rodata address isn't associated with a
+ 		 * symbol.  gcc jump tables are anonymous data.
+ 		 */
+-		if (find_symbol_containing(file->rodata, text_rela->addend))
++		if (find_symbol_containing(file->rodata, table_offset))
+ 			continue;
+ 
+-		rodata_rela = find_rela_by_dest(file->rodata, text_rela->addend);
+-		if (!rodata_rela)
+-			continue;
++		rodata_rela = find_rela_by_dest(file->rodata, table_offset);
++		if (rodata_rela) {
++			/*
++			 * Use of RIP-relative switch jumps is quite rare, and
++			 * indicates a rare GCC quirk/bug which can leave dead
++			 * code behind.
++			 */
++			if (text_rela->type == R_X86_64_PC32)
++				file->ignore_unreachables = true;
+ 
+-		return rodata_rela;
++			return rodata_rela;
++		}
+ 	}
+ 
+ 	return NULL;
+@@ -943,7 +969,7 @@ static int add_func_switch_tables(struct objtool_file *file,
+ 	struct rela *rela, *prev_rela = NULL;
+ 	int ret;
+ 
+-	func_for_each_insn(file, func, insn) {
++	func_for_each_insn_all(file, func, insn) {
+ 		if (!last)
+ 			last = insn;
+ 
+@@ -974,8 +1000,7 @@ static int add_func_switch_tables(struct objtool_file *file,
+ 		 * the beginning of another switch table in the same function.
+ 		 */
+ 		if (prev_jump) {
+-			ret = add_switch_table(file, func, prev_jump, prev_rela,
+-					       rela);
++			ret = add_switch_table(file, prev_jump, prev_rela, rela);
+ 			if (ret)
+ 				return ret;
+ 		}
+@@ -985,7 +1010,7 @@ static int add_func_switch_tables(struct objtool_file *file,
+ 	}
+ 
+ 	if (prev_jump) {
+-		ret = add_switch_table(file, func, prev_jump, prev_rela, NULL);
++		ret = add_switch_table(file, prev_jump, prev_rela, NULL);
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -1749,15 +1774,13 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
+ 	while (1) {
+ 		next_insn = next_insn_same_sec(file, insn);
+ 
+-
+-		if (file->c_file && func && insn->func && func != insn->func) {
++		if (file->c_file && func && insn->func && func != insn->func->pfunc) {
+ 			WARN("%s() falls through to next function %s()",
+ 			     func->name, insn->func->name);
+ 			return 1;
+ 		}
+ 
+-		if (insn->func)
+-			func = insn->func;
++		func = insn->func ? insn->func->pfunc : NULL;
+ 
+ 		if (func && insn->ignore) {
+ 			WARN_FUNC("BUG: why am I validating an ignored function?",
+@@ -1778,7 +1801,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
+ 
+ 				i = insn;
+ 				save_insn = NULL;
+-				func_for_each_insn_continue_reverse(file, func, i) {
++				func_for_each_insn_continue_reverse(file, insn->func, i) {
+ 					if (i->save) {
+ 						save_insn = i;
+ 						break;
+@@ -1865,7 +1888,7 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
+ 		case INSN_JUMP_UNCONDITIONAL:
+ 			if (insn->jump_dest &&
+ 			    (!func || !insn->jump_dest->func ||
+-			     func == insn->jump_dest->func)) {
++			     insn->jump_dest->func->pfunc == func)) {
+ 				ret = validate_branch(file, insn->jump_dest,
+ 						      state);
+ 				if (ret)
+@@ -2060,7 +2083,7 @@ static int validate_functions(struct objtool_file *file)
+ 
+ 	for_each_sec(file, sec) {
+ 		list_for_each_entry(func, &sec->symbol_list, list) {
+-			if (func->type != STT_FUNC)
++			if (func->type != STT_FUNC || func->pfunc != func)
+ 				continue;
+ 
+ 			insn = find_insn(file, sec, func->offset);
+diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
+index c1c338661699..4e60e105583e 100644
+--- a/tools/objtool/elf.c
++++ b/tools/objtool/elf.c
+@@ -79,6 +79,19 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
+ 	return NULL;
+ }
+ 
++struct symbol *find_symbol_by_name(struct elf *elf, const char *name)
++{
++	struct section *sec;
++	struct symbol *sym;
++
++	list_for_each_entry(sec, &elf->sections, list)
++		list_for_each_entry(sym, &sec->symbol_list, list)
++			if (!strcmp(sym->name, name))
++				return sym;
++
++	return NULL;
++}
++
+ struct symbol *find_symbol_containing(struct section *sec, unsigned long offset)
+ {
+ 	struct symbol *sym;
+@@ -203,10 +216,11 @@ static int read_sections(struct elf *elf)
+ 
+ static int read_symbols(struct elf *elf)
+ {
+-	struct section *symtab;
+-	struct symbol *sym;
++	struct section *symtab, *sec;
++	struct symbol *sym, *pfunc;
+ 	struct list_head *entry, *tmp;
+ 	int symbols_nr, i;
++	char *coldstr;
+ 
+ 	symtab = find_section_by_name(elf, ".symtab");
+ 	if (!symtab) {
+@@ -281,6 +295,30 @@ static int read_symbols(struct elf *elf)
+ 		hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx);
+ 	}
+ 
++	/* Create parent/child links for any cold subfunctions */
++	list_for_each_entry(sec, &elf->sections, list) {
++		list_for_each_entry(sym, &sec->symbol_list, list) {
++			if (sym->type != STT_FUNC)
++				continue;
++			sym->pfunc = sym->cfunc = sym;
++			coldstr = strstr(sym->name, ".cold.");
++			if (coldstr) {
++				coldstr[0] = '\0';
++				pfunc = find_symbol_by_name(elf, sym->name);
++				coldstr[0] = '.';
++
++				if (!pfunc) {
++					WARN("%s(): can't find parent function",
++					     sym->name);
++					goto err;
++				}
++
++				sym->pfunc = pfunc;
++				pfunc->cfunc = sym;
++			}
++		}
++	}
++
+ 	return 0;
+ 
+ err:
+diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
+index d86e2ff14466..de5cd2ddded9 100644
+--- a/tools/objtool/elf.h
++++ b/tools/objtool/elf.h
+@@ -61,6 +61,7 @@ struct symbol {
+ 	unsigned char bind, type;
+ 	unsigned long offset;
+ 	unsigned int len;
++	struct symbol *pfunc, *cfunc;
+ };
+ 
+ struct rela {
+@@ -86,6 +87,7 @@ struct elf {
+ struct elf *elf_open(const char *name, int flags);
+ struct section *find_section_by_name(struct elf *elf, const char *name);
+ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
++struct symbol *find_symbol_by_name(struct elf *elf, const char *name);
+ struct symbol *find_symbol_containing(struct section *sec, unsigned long offset);
+ struct rela *find_rela_by_dest(struct section *sec, unsigned long offset);
+ struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-05-30 11:44 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-05-30 11:44 UTC (permalink / raw
  To: gentoo-commits

commit:     8a5950d77db4cc1cc9e4b9b359bdd8d288d2167c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 30 11:44:38 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 30 11:44:38 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8a5950d7

Linux patch 4.16.13

 0000_README              |     4 +
 1012_linux-4.16.13.patch | 10200 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 10204 insertions(+)

diff --git a/0000_README b/0000_README
index 603fb6f..f199583 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch:  1011_linux-4.16.12.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.12
 
+Patch:  1012_linux-4.16.13.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.13
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1012_linux-4.16.13.patch b/1012_linux-4.16.13.patch
new file mode 100644
index 0000000..8fb1dc5
--- /dev/null
+++ b/1012_linux-4.16.13.patch
@@ -0,0 +1,10200 @@
+diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
+index 4ca21c3a6fc9..460ef27b1008 100644
+--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
++++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
+@@ -20,6 +20,7 @@ Required properties :
+ 		- "allwinner,sun50i-a64-ccu"
+ 		- "allwinner,sun50i-a64-r-ccu"
+ 		- "allwinner,sun50i-h5-ccu"
++		- "allwinner,sun50i-h6-ccu"
+ 		- "nextthing,gr8-ccu"
+ 
+ - reg: Must contain the registers base address and length
+@@ -31,6 +32,9 @@ Required properties :
+ - #clock-cells : must contain 1
+ - #reset-cells : must contain 1
+ 
++For the main CCU on H6, one more clock is needed:
++- "iosc": the SoC's internal frequency oscillator
++
+ For the PRCM CCUs on A83T/H3/A64, two more clocks are needed:
+ - "pll-periph": the SoC's peripheral PLL from the main CCU
+ - "iosc": the SoC's internal frequency oscillator
+diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
+index a6671bd2c85a..ae38a1ee9c29 100644
+--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
++++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
+@@ -102,7 +102,11 @@ Required properties:
+ - clocks: Phandles to device clocks. See [1] for details on clock bindings.
+ - clock-names: the following clocks are required:
+   * "iface"
++  For 28nm HPM/LP, 28nm 8960 PHYs:
+ - vddio-supply: phandle to vdd-io regulator device node
++  For 20nm PHY:
++- vddio-supply: phandle to vdd-io regulator device node
++- vcca-supply: phandle to vcca regulator device node
+ 
+ Optional properties:
+ - qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
+diff --git a/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
+index 47284f85ec80..c3f9826692bc 100644
+--- a/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
++++ b/Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt
+@@ -20,7 +20,8 @@ Required subnode-properties:
+ 		gpio: cpuclkoutgrp0, udlclkoutgrp0, i2c1grp0, i2c2grp0,
+ 		      i2c3grp0, i2s0grp0, i2s1grp0, i2srefclkgrp0, spi0grp0,
+ 		      spi1grp0, pciedebuggrp0, uart0grp0, uart0grp1, uart1grp0,
+-		      uart2grp0, uart2grp1, uart3grp0, uart4grp0, uart5grp0
++		      uart2grp0, uart2grp1, uart3grp0, uart4grp0, uart5grp0,
++		      uart5nocts
+ 		cpuclkout: cpuclkoutgrp0
+ 		udlclkout: udlclkoutgrp0
+ 		i2c1: i2c1grp0
+@@ -37,7 +38,7 @@ Required subnode-properties:
+ 		uart2: uart2grp0, uart2grp1
+ 		uart3: uart3grp0
+ 		uart4: uart4grp0
+-		uart5: uart5grp0
++		uart5: uart5grp0, uart5nocts
+ 		nand: nandgrp0
+ 		sdio0: sdio0grp0
+ 		sdio1: sdio1grp0
+diff --git a/Makefile b/Makefile
+index ded9e8480d74..146e527a5e06 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/boot/dts/at91-nattis-2-natte-2.dts b/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
+index 3ea1d26e1c68..c457eff25911 100644
+--- a/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
++++ b/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
+@@ -146,7 +146,7 @@
+ 	};
+ 
+ 	eeprom@50 {
+-		compatible = "nxp,24c02";
++		compatible = "nxp,se97b", "atmel,24c02";
+ 		reg = <0x50>;
+ 		pagesize = <16>;
+ 	};
+diff --git a/arch/arm/boot/dts/at91-tse850-3.dts b/arch/arm/boot/dts/at91-tse850-3.dts
+index 9b82cc8843e1..97b227693658 100644
+--- a/arch/arm/boot/dts/at91-tse850-3.dts
++++ b/arch/arm/boot/dts/at91-tse850-3.dts
+@@ -246,7 +246,7 @@
+ 	};
+ 
+ 	eeprom@50 {
+-		compatible = "nxp,24c02", "atmel,24c02";
++		compatible = "nxp,se97b", "atmel,24c02";
+ 		reg = <0x50>;
+ 		pagesize = <16>;
+ 	};
+diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
+index 9d293decf8d3..8d9a0df207a4 100644
+--- a/arch/arm/boot/dts/bcm283x.dtsi
++++ b/arch/arm/boot/dts/bcm283x.dtsi
+@@ -252,7 +252,7 @@
+ 
+ 			jtag_gpio4: jtag_gpio4 {
+ 				brcm,pins = <4 5 6 12 13>;
+-				brcm,function = <BCM2835_FSEL_ALT4>;
++				brcm,function = <BCM2835_FSEL_ALT5>;
+ 			};
+ 			jtag_gpio22: jtag_gpio22 {
+ 				brcm,pins = <22 23 24 25 26 27>;
+@@ -397,8 +397,8 @@
+ 
+ 		i2s: i2s@7e203000 {
+ 			compatible = "brcm,bcm2835-i2s";
+-			reg = <0x7e203000 0x20>,
+-			      <0x7e101098 0x02>;
++			reg = <0x7e203000 0x24>;
++			clocks = <&clocks BCM2835_CLOCK_PCM>;
+ 
+ 			dmas = <&dma 2>,
+ 			       <&dma 3>;
+diff --git a/arch/arm/boot/dts/dra71-evm.dts b/arch/arm/boot/dts/dra71-evm.dts
+index 41c9132eb550..64363f75c01a 100644
+--- a/arch/arm/boot/dts/dra71-evm.dts
++++ b/arch/arm/boot/dts/dra71-evm.dts
+@@ -24,13 +24,13 @@
+ 
+ 		regulator-name = "vddshv8";
+ 		regulator-min-microvolt = <1800000>;
+-		regulator-max-microvolt = <3000000>;
++		regulator-max-microvolt = <3300000>;
+ 		regulator-boot-on;
+ 		vin-supply = <&evm_5v0>;
+ 
+ 		gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>;
+ 		states = <1800000 0x0
+-			  3000000 0x1>;
++			  3300000 0x1>;
+ 	};
+ 
+ 	evm_1v8_sw: fixedregulator-evm_1v8 {
+diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
+index ae45af1ad062..3cc1fb9ce441 100644
+--- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
++++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
+@@ -213,37 +213,37 @@
+ &iomuxc {
+ 	pinctrl_enet1: enet1grp {
+ 		fsl,pins = <
+-			MX7D_PAD_SD2_CD_B__ENET1_MDIO			0x3
+-			MX7D_PAD_SD2_WP__ENET1_MDC			0x3
+-			MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC	0x1
+-			MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0	0x1
+-			MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1	0x1
+-			MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2	0x1
+-			MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3	0x1
+-			MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL	0x1
+-			MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC	0x1
+-			MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0	0x1
+-			MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1	0x1
+-			MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2	0x1
+-			MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3	0x1
+-			MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL	0x1
++			MX7D_PAD_SD2_CD_B__ENET1_MDIO			0x30
++			MX7D_PAD_SD2_WP__ENET1_MDC			0x30
++			MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC	0x11
++			MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0	0x11
++			MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1	0x11
++			MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2	0x11
++			MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3	0x11
++			MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL	0x11
++			MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC	0x11
++			MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0	0x11
++			MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1	0x11
++			MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2	0x11
++			MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3	0x11
++			MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL	0x11
+ 		>;
+ 	};
+ 
+ 	pinctrl_enet2: enet2grp {
+ 		fsl,pins = <
+-			MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC		0x1
+-			MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0		0x1
+-			MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1		0x1
+-			MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2		0x1
+-			MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3		0x1
+-			MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL		0x1
+-			MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC		0x1
+-			MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0		0x1
+-			MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1		0x1
+-			MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2		0x1
+-			MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3		0x1
+-			MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL		0x1
++			MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC		0x11
++			MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0		0x11
++			MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1		0x11
++			MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2		0x11
++			MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3		0x11
++			MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL		0x11
++			MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC		0x11
++			MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0		0x11
++			MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1		0x11
++			MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2		0x11
++			MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3		0x11
++			MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL		0x11
+ 		>;
+ 	};
+ 
+diff --git a/arch/arm/boot/dts/keystone-k2e-clocks.dtsi b/arch/arm/boot/dts/keystone-k2e-clocks.dtsi
+index 5e0e7d232161..f7592155a740 100644
+--- a/arch/arm/boot/dts/keystone-k2e-clocks.dtsi
++++ b/arch/arm/boot/dts/keystone-k2e-clocks.dtsi
+@@ -42,7 +42,7 @@ clocks {
+ 		domain-id = <0>;
+ 	};
+ 
+-	clkhyperlink0: clkhyperlink02350030 {
++	clkhyperlink0: clkhyperlink0@2350030 {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,keystone,psc-clock";
+ 		clocks = <&chipclk12>;
+diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
+index eb374956294f..9a02d03b23c2 100644
+--- a/arch/arm/boot/dts/r8a7791-porter.dts
++++ b/arch/arm/boot/dts/r8a7791-porter.dts
+@@ -425,7 +425,7 @@
+ 		      "dclkin.0", "dclkin.1";
+ 
+ 	ports {
+-		port@1 {
++		port@0 {
+ 			endpoint {
+ 				remote-endpoint = <&adv7511_in>;
+ 			};
+diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
+index c42ca7022e8c..486d4e7433ed 100644
+--- a/arch/arm/boot/dts/socfpga.dtsi
++++ b/arch/arm/boot/dts/socfpga.dtsi
+@@ -831,7 +831,7 @@
+ 		timer@fffec600 {
+ 			compatible = "arm,cortex-a9-twd-timer";
+ 			reg = <0xfffec600 0x100>;
+-			interrupts = <1 13 0xf04>;
++			interrupts = <1 13 0xf01>;
+ 			clocks = <&mpu_periph_clk>;
+ 		};
+ 
+diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
+index 4f2f2eea0755..5df34345a354 100644
+--- a/arch/arm/boot/dts/sun4i-a10.dtsi
++++ b/arch/arm/boot/dts/sun4i-a10.dtsi
+@@ -76,7 +76,7 @@
+ 			allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
+ 			clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_HDMI0>,
+ 				 <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
+-				 <&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
++				 <&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>,
+ 				 <&ccu CLK_TCON0_CH1>, <&ccu CLK_HDMI>,
+ 				 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
+ 			status = "disabled";
+@@ -88,7 +88,7 @@
+ 			allwinner,pipeline = "de_fe0-de_be0-lcd0";
+ 			clocks = <&ccu CLK_AHB_LCD0>, <&ccu CLK_AHB_DE_BE0>,
+ 				 <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_DE_BE0>,
+-				 <&ccu CLK_AHB_DE_FE0>, <&ccu CLK_TCON0_CH0>,
++				 <&ccu CLK_DE_FE0>, <&ccu CLK_TCON0_CH0>,
+ 				 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
+ 			status = "disabled";
+ 		};
+@@ -99,7 +99,7 @@
+ 			allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0";
+ 			clocks = <&ccu CLK_AHB_TVE0>, <&ccu CLK_AHB_LCD0>,
+ 				 <&ccu CLK_AHB_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
+-				 <&ccu CLK_DE_BE0>, <&ccu CLK_AHB_DE_FE0>,
++				 <&ccu CLK_DE_BE0>, <&ccu CLK_DE_FE0>,
+ 				 <&ccu CLK_TCON0_CH1>, <&ccu CLK_DRAM_TVE0>,
+ 				 <&ccu CLK_DRAM_DE_FE0>, <&ccu CLK_DRAM_DE_BE0>;
+ 			status = "disabled";
+diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+index 0a6f7952bbb1..48b85653ad66 100644
+--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
+@@ -497,8 +497,8 @@
+ 		blsp2_spi5: spi@75ba000{
+ 			compatible = "qcom,spi-qup-v2.2.1";
+ 			reg = <0x075ba000 0x600>;
+-			interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+-			clocks = <&gcc GCC_BLSP2_QUP5_SPI_APPS_CLK>,
++			interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
++			clocks = <&gcc GCC_BLSP2_QUP6_SPI_APPS_CLK>,
+ 				 <&gcc GCC_BLSP2_AHB_CLK>;
+ 			clock-names = "core", "iface";
+ 			pinctrl-names = "default", "sleep";
+diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
+index 9ef0797380cb..f9b0b09153e0 100644
+--- a/arch/arm64/include/asm/atomic_lse.h
++++ b/arch/arm64/include/asm/atomic_lse.h
+@@ -117,7 +117,7 @@ static inline void atomic_and(int i, atomic_t *v)
+ 	/* LSE atomics */
+ 	"	mvn	%w[i], %w[i]\n"
+ 	"	stclr	%w[i], %[v]")
+-	: [i] "+r" (w0), [v] "+Q" (v->counter)
++	: [i] "+&r" (w0), [v] "+Q" (v->counter)
+ 	: "r" (x1)
+ 	: __LL_SC_CLOBBERS);
+ }
+@@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v)		\
+ 	/* LSE atomics */						\
+ 	"	mvn	%w[i], %w[i]\n"					\
+ 	"	ldclr" #mb "	%w[i], %w[i], %[v]")			\
+-	: [i] "+r" (w0), [v] "+Q" (v->counter)				\
++	: [i] "+&r" (w0), [v] "+Q" (v->counter)				\
+ 	: "r" (x1)							\
+ 	: __LL_SC_CLOBBERS, ##cl);					\
+ 									\
+@@ -161,7 +161,7 @@ static inline void atomic_sub(int i, atomic_t *v)
+ 	/* LSE atomics */
+ 	"	neg	%w[i], %w[i]\n"
+ 	"	stadd	%w[i], %[v]")
+-	: [i] "+r" (w0), [v] "+Q" (v->counter)
++	: [i] "+&r" (w0), [v] "+Q" (v->counter)
+ 	: "r" (x1)
+ 	: __LL_SC_CLOBBERS);
+ }
+@@ -180,7 +180,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v)		\
+ 	"	neg	%w[i], %w[i]\n"					\
+ 	"	ldadd" #mb "	%w[i], w30, %[v]\n"			\
+ 	"	add	%w[i], %w[i], w30")				\
+-	: [i] "+r" (w0), [v] "+Q" (v->counter)				\
++	: [i] "+&r" (w0), [v] "+Q" (v->counter)				\
+ 	: "r" (x1)							\
+ 	: __LL_SC_CLOBBERS , ##cl);					\
+ 									\
+@@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v)		\
+ 	/* LSE atomics */						\
+ 	"	neg	%w[i], %w[i]\n"					\
+ 	"	ldadd" #mb "	%w[i], %w[i], %[v]")			\
+-	: [i] "+r" (w0), [v] "+Q" (v->counter)				\
++	: [i] "+&r" (w0), [v] "+Q" (v->counter)				\
+ 	: "r" (x1)							\
+ 	: __LL_SC_CLOBBERS, ##cl);					\
+ 									\
+@@ -314,7 +314,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
+ 	/* LSE atomics */
+ 	"	mvn	%[i], %[i]\n"
+ 	"	stclr	%[i], %[v]")
+-	: [i] "+r" (x0), [v] "+Q" (v->counter)
++	: [i] "+&r" (x0), [v] "+Q" (v->counter)
+ 	: "r" (x1)
+ 	: __LL_SC_CLOBBERS);
+ }
+@@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v)	\
+ 	/* LSE atomics */						\
+ 	"	mvn	%[i], %[i]\n"					\
+ 	"	ldclr" #mb "	%[i], %[i], %[v]")			\
+-	: [i] "+r" (x0), [v] "+Q" (v->counter)				\
++	: [i] "+&r" (x0), [v] "+Q" (v->counter)				\
+ 	: "r" (x1)							\
+ 	: __LL_SC_CLOBBERS, ##cl);					\
+ 									\
+@@ -358,7 +358,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
+ 	/* LSE atomics */
+ 	"	neg	%[i], %[i]\n"
+ 	"	stadd	%[i], %[v]")
+-	: [i] "+r" (x0), [v] "+Q" (v->counter)
++	: [i] "+&r" (x0), [v] "+Q" (v->counter)
+ 	: "r" (x1)
+ 	: __LL_SC_CLOBBERS);
+ }
+@@ -377,7 +377,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v)	\
+ 	"	neg	%[i], %[i]\n"					\
+ 	"	ldadd" #mb "	%[i], x30, %[v]\n"			\
+ 	"	add	%[i], %[i], x30")				\
+-	: [i] "+r" (x0), [v] "+Q" (v->counter)				\
++	: [i] "+&r" (x0), [v] "+Q" (v->counter)				\
+ 	: "r" (x1)							\
+ 	: __LL_SC_CLOBBERS, ##cl);					\
+ 									\
+@@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v)	\
+ 	/* LSE atomics */						\
+ 	"	neg	%[i], %[i]\n"					\
+ 	"	ldadd" #mb "	%[i], %[i], %[v]")			\
+-	: [i] "+r" (x0), [v] "+Q" (v->counter)				\
++	: [i] "+&r" (x0), [v] "+Q" (v->counter)				\
+ 	: "r" (x1)							\
+ 	: __LL_SC_CLOBBERS, ##cl);					\
+ 									\
+@@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
+ 	"	sub	x30, x30, %[ret]\n"
+ 	"	cbnz	x30, 1b\n"
+ 	"2:")
+-	: [ret] "+r" (x0), [v] "+Q" (v->counter)
++	: [ret] "+&r" (x0), [v] "+Q" (v->counter)
+ 	:
+ 	: __LL_SC_CLOBBERS, "cc", "memory");
+ 
+@@ -516,7 +516,7 @@ static inline long __cmpxchg_double##name(unsigned long old1,		\
+ 	"	eor	%[old1], %[old1], %[oldval1]\n"			\
+ 	"	eor	%[old2], %[old2], %[oldval2]\n"			\
+ 	"	orr	%[old1], %[old1], %[old2]")			\
+-	: [old1] "+r" (x0), [old2] "+r" (x1),				\
++	: [old1] "+&r" (x0), [old2] "+&r" (x1),				\
+ 	  [v] "+Q" (*(unsigned long *)ptr)				\
+ 	: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),		\
+ 	  [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)		\
+diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
+index 66be504edb6c..d894a20b70b2 100644
+--- a/arch/arm64/kernel/arm64ksyms.c
++++ b/arch/arm64/kernel/arm64ksyms.c
+@@ -75,3 +75,11 @@ NOKPROBE_SYMBOL(_mcount);
+ 	/* arm-smccc */
+ EXPORT_SYMBOL(__arm_smccc_smc);
+ EXPORT_SYMBOL(__arm_smccc_hvc);
++
++	/* tishift.S */
++extern long long __ashlti3(long long a, int b);
++EXPORT_SYMBOL(__ashlti3);
++extern long long __ashrti3(long long a, int b);
++EXPORT_SYMBOL(__ashrti3);
++extern long long __lshrti3(long long a, int b);
++EXPORT_SYMBOL(__lshrti3);
+diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S
+index d3db9b2cd479..0fdff97794de 100644
+--- a/arch/arm64/lib/tishift.S
++++ b/arch/arm64/lib/tishift.S
+@@ -1,17 +1,6 @@
+-/*
+- * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
++/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+  *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
++ * Copyright (C) 2017-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+  */
+ 
+ #include <linux/linkage.h>
+diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c
+index 84938fdbbada..908d58347790 100644
+--- a/arch/m68k/coldfire/device.c
++++ b/arch/m68k/coldfire/device.c
+@@ -135,7 +135,11 @@ static struct platform_device mcf_fec0 = {
+ 	.id			= 0,
+ 	.num_resources		= ARRAY_SIZE(mcf_fec0_resources),
+ 	.resource		= mcf_fec0_resources,
+-	.dev.platform_data	= FEC_PDATA,
++	.dev = {
++		.dma_mask		= &mcf_fec0.dev.coherent_dma_mask,
++		.coherent_dma_mask	= DMA_BIT_MASK(32),
++		.platform_data		= FEC_PDATA,
++	}
+ };
+ 
+ #ifdef MCFFEC_BASE1
+@@ -167,7 +171,11 @@ static struct platform_device mcf_fec1 = {
+ 	.id			= 1,
+ 	.num_resources		= ARRAY_SIZE(mcf_fec1_resources),
+ 	.resource		= mcf_fec1_resources,
+-	.dev.platform_data	= FEC_PDATA,
++	.dev = {
++		.dma_mask		= &mcf_fec1.dev.coherent_dma_mask,
++		.coherent_dma_mask	= DMA_BIT_MASK(32),
++		.platform_data		= FEC_PDATA,
++	}
+ };
+ #endif /* MCFFEC_BASE1 */
+ #endif /* CONFIG_FEC */
+diff --git a/arch/mips/boot/compressed/uart-16550.c b/arch/mips/boot/compressed/uart-16550.c
+index b3043c08f769..aee8d7b8f091 100644
+--- a/arch/mips/boot/compressed/uart-16550.c
++++ b/arch/mips/boot/compressed/uart-16550.c
+@@ -18,9 +18,9 @@
+ #define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset))
+ #endif
+ 
+-#if defined(CONFIG_MACH_JZ4740) || defined(CONFIG_MACH_JZ4780)
+-#include <asm/mach-jz4740/base.h>
+-#define PORT(offset) (CKSEG1ADDR(JZ4740_UART0_BASE_ADDR) + (4 * offset))
++#ifdef CONFIG_MACH_INGENIC
++#define INGENIC_UART0_BASE_ADDR	0x10030000
++#define PORT(offset) (CKSEG1ADDR(INGENIC_UART0_BASE_ADDR) + (4 * offset))
+ #endif
+ 
+ #ifdef CONFIG_CPU_XLR
+diff --git a/arch/mips/boot/dts/xilfpga/Makefile b/arch/mips/boot/dts/xilfpga/Makefile
+index 9987e0e378c5..69ca00590b8d 100644
+--- a/arch/mips/boot/dts/xilfpga/Makefile
++++ b/arch/mips/boot/dts/xilfpga/Makefile
+@@ -1,4 +1,2 @@
+ # SPDX-License-Identifier: GPL-2.0
+ dtb-$(CONFIG_FIT_IMAGE_FDT_XILFPGA)	+= nexys4ddr.dtb
+-
+-obj-y				+= $(patsubst %.dtb, %.dtb.o, $(dtb-y))
+diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
+index d99f5242169e..b3aec101a65d 100644
+--- a/arch/mips/cavium-octeon/octeon-irq.c
++++ b/arch/mips/cavium-octeon/octeon-irq.c
+@@ -2271,7 +2271,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
+ 
+ 	parent_irq = irq_of_parse_and_map(ciu_node, 0);
+ 	if (!parent_irq) {
+-		pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
++		pr_err("ERROR: Couldn't acquire parent_irq for %s\n",
+ 			ciu_node->name);
+ 		return -EINVAL;
+ 	}
+@@ -2283,7 +2283,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
+ 
+ 	addr = of_get_address(ciu_node, 0, NULL, NULL);
+ 	if (!addr) {
+-		pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
++		pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name);
+ 		return -EINVAL;
+ 	}
+ 	host_data->raw_reg = (u64)phys_to_virt(
+@@ -2291,7 +2291,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
+ 
+ 	addr = of_get_address(ciu_node, 1, NULL, NULL);
+ 	if (!addr) {
+-		pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
++		pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name);
+ 		return -EINVAL;
+ 	}
+ 	host_data->en_reg = (u64)phys_to_virt(
+@@ -2299,7 +2299,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
+ 
+ 	r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
+ 	if (r) {
+-		pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
++		pr_err("ERROR: Couldn't read cavium,max-bits from %s\n",
+ 			ciu_node->name);
+ 		return r;
+ 	}
+@@ -2309,7 +2309,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
+ 					   &octeon_irq_domain_cib_ops,
+ 					   host_data);
+ 	if (!cib_domain) {
+-		pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
++		pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
+ 		return -ENOMEM;
+ 	}
+ 
+diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform
+index b51432dd10b6..0dd0d5d460a5 100644
+--- a/arch/mips/generic/Platform
++++ b/arch/mips/generic/Platform
+@@ -16,3 +16,4 @@ all-$(CONFIG_MIPS_GENERIC)	:= vmlinux.gz.itb
+ its-y					:= vmlinux.its.S
+ its-$(CONFIG_FIT_IMAGE_FDT_BOSTON)	+= board-boston.its.S
+ its-$(CONFIG_FIT_IMAGE_FDT_NI169445)	+= board-ni169445.its.S
++its-$(CONFIG_FIT_IMAGE_FDT_XILFPGA)	+= board-xilfpga.its.S
+diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+index aa3800c82332..d99ca862dae3 100644
+--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
++++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+@@ -167,7 +167,7 @@
+ #define AR71XX_AHB_DIV_MASK		0x7
+ 
+ #define AR724X_PLL_REG_CPU_CONFIG	0x00
+-#define AR724X_PLL_REG_PCIE_CONFIG	0x18
++#define AR724X_PLL_REG_PCIE_CONFIG	0x10
+ 
+ #define AR724X_PLL_FB_SHIFT		0
+ #define AR724X_PLL_FB_MASK		0x3ff
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index 0b23b1ad99e6..8d098b9f395c 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -463,7 +463,7 @@ static int fpr_get_msa(struct task_struct *target,
+ /*
+  * Copy the floating-point context to the supplied NT_PRFPREG buffer.
+  * Choose the appropriate helper for general registers, and then copy
+- * the FCSR register separately.
++ * the FCSR and FIR registers separately.
+  */
+ static int fpr_get(struct task_struct *target,
+ 		   const struct user_regset *regset,
+@@ -471,6 +471,7 @@ static int fpr_get(struct task_struct *target,
+ 		   void *kbuf, void __user *ubuf)
+ {
+ 	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
++	const int fir_pos = fcr31_pos + sizeof(u32);
+ 	int err;
+ 
+ 	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+@@ -483,6 +484,12 @@ static int fpr_get(struct task_struct *target,
+ 	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ 				  &target->thread.fpu.fcr31,
+ 				  fcr31_pos, fcr31_pos + sizeof(u32));
++	if (err)
++		return err;
++
++	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
++				  &boot_cpu_data.fpu_id,
++				  fir_pos, fir_pos + sizeof(u32));
+ 
+ 	return err;
+ }
+@@ -531,7 +538,8 @@ static int fpr_set_msa(struct task_struct *target,
+ /*
+  * Copy the supplied NT_PRFPREG buffer to the floating-point context.
+  * Choose the appropriate helper for general registers, and then copy
+- * the FCSR register separately.
++ * the FCSR register separately.  Ignore the incoming FIR register
++ * contents though, as the register is read-only.
+  *
+  * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
+  * which is supposed to have been guaranteed by the kernel before
+@@ -545,6 +553,7 @@ static int fpr_set(struct task_struct *target,
+ 		   const void *kbuf, const void __user *ubuf)
+ {
+ 	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
++	const int fir_pos = fcr31_pos + sizeof(u32);
+ 	u32 fcr31;
+ 	int err;
+ 
+@@ -572,6 +581,11 @@ static int fpr_set(struct task_struct *target,
+ 		ptrace_setfcr31(target, fcr31);
+ 	}
+ 
++	if (count > 0)
++		err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
++						fir_pos,
++						fir_pos + sizeof(u32));
++
+ 	return err;
+ }
+ 
+@@ -793,7 +807,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ 			fregs = get_fpu_regs(child);
+ 
+ #ifdef CONFIG_32BIT
+-			if (test_thread_flag(TIF_32BIT_FPREGS)) {
++			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ 				/*
+ 				 * The odd registers are actually the high
+ 				 * order bits of the values stored in the even
+@@ -888,7 +902,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ 
+ 			init_fp_ctx(child);
+ #ifdef CONFIG_32BIT
+-			if (test_thread_flag(TIF_32BIT_FPREGS)) {
++			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ 				/*
+ 				 * The odd registers are actually the high
+ 				 * order bits of the values stored in the even
+diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
+index 2b9260f92ccd..656a137c1fe2 100644
+--- a/arch/mips/kernel/ptrace32.c
++++ b/arch/mips/kernel/ptrace32.c
+@@ -99,7 +99,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ 				break;
+ 			}
+ 			fregs = get_fpu_regs(child);
+-			if (test_thread_flag(TIF_32BIT_FPREGS)) {
++			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ 				/*
+ 				 * The odd registers are actually the high
+ 				 * order bits of the values stored in the even
+@@ -212,7 +212,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ 				       sizeof(child->thread.fpu));
+ 				child->thread.fpu.fcr31 = 0;
+ 			}
+-			if (test_thread_flag(TIF_32BIT_FPREGS)) {
++			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ 				/*
+ 				 * The odd registers are actually the high
+ 				 * order bits of the values stored in the even
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index 2549fdd27ee1..0f725e9cee8f 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -45,7 +45,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
+ 	{ "cache",	  VCPU_STAT(cache_exits),	 KVM_STAT_VCPU },
+ 	{ "signal",	  VCPU_STAT(signal_exits),	 KVM_STAT_VCPU },
+ 	{ "interrupt",	  VCPU_STAT(int_exits),		 KVM_STAT_VCPU },
+-	{ "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
++	{ "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
+ 	{ "tlbmod",	  VCPU_STAT(tlbmod_exits),	 KVM_STAT_VCPU },
+ 	{ "tlbmiss_ld",	  VCPU_STAT(tlbmiss_ld_exits),	 KVM_STAT_VCPU },
+ 	{ "tlbmiss_st",	  VCPU_STAT(tlbmiss_st_exits),	 KVM_STAT_VCPU },
+diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
+index 6f534b209971..e12dfa48b478 100644
+--- a/arch/mips/mm/c-r4k.c
++++ b/arch/mips/mm/c-r4k.c
+@@ -851,9 +851,12 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
+ 	/*
+ 	 * Either no secondary cache or the available caches don't have the
+ 	 * subset property so we have to flush the primary caches
+-	 * explicitly
++	 * explicitly.
++	 * If we would need IPI to perform an INDEX-type operation, then
++	 * we have to use the HIT-type alternative as IPI cannot be used
++	 * here due to interrupts possibly being disabled.
+ 	 */
+-	if (size >= dcache_size) {
++	if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
+ 		r4k_blast_dcache();
+ 	} else {
+ 		R4600_HIT_CACHEOP_WAR_IMPL;
+@@ -890,7 +893,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
+ 		return;
+ 	}
+ 
+-	if (size >= dcache_size) {
++	if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) {
+ 		r4k_blast_dcache();
+ 	} else {
+ 		R4600_HIT_CACHEOP_WAR_IMPL;
+diff --git a/arch/powerpc/include/asm/book3s/64/slice.h b/arch/powerpc/include/asm/book3s/64/slice.h
+new file mode 100644
+index 000000000000..db0dedab65ee
+--- /dev/null
++++ b/arch/powerpc/include/asm/book3s/64/slice.h
+@@ -0,0 +1,27 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H
++#define _ASM_POWERPC_BOOK3S_64_SLICE_H
++
++#ifdef CONFIG_PPC_MM_SLICES
++
++#define SLICE_LOW_SHIFT		28
++#define SLICE_LOW_TOP		(0x100000000ul)
++#define SLICE_NUM_LOW		(SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
++#define GET_LOW_SLICE_INDEX(addr)	((addr) >> SLICE_LOW_SHIFT)
++
++#define SLICE_HIGH_SHIFT	40
++#define SLICE_NUM_HIGH		(H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
++#define GET_HIGH_SLICE_INDEX(addr)	((addr) >> SLICE_HIGH_SHIFT)
++
++#else /* CONFIG_PPC_MM_SLICES */
++
++#define get_slice_psize(mm, addr)	((mm)->context.user_psize)
++#define slice_set_user_psize(mm, psize)		\
++do {						\
++	(mm)->context.user_psize = (psize);	\
++	(mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
++} while (0)
++
++#endif /* CONFIG_PPC_MM_SLICES */
++
++#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */
+diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h
+index c6d3078bd8c3..b8b0be8f1a07 100644
+--- a/arch/powerpc/include/asm/irq_work.h
++++ b/arch/powerpc/include/asm/irq_work.h
+@@ -6,5 +6,6 @@ static inline bool arch_irq_work_has_interrupt(void)
+ {
+ 	return true;
+ }
++extern void arch_irq_work_raise(void);
+ 
+ #endif /* _ASM_POWERPC_IRQ_WORK_H */
+diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
+index 2f806e329648..b324ab46d838 100644
+--- a/arch/powerpc/include/asm/mmu-8xx.h
++++ b/arch/powerpc/include/asm/mmu-8xx.h
+@@ -191,6 +191,12 @@ typedef struct {
+ 	unsigned int id;
+ 	unsigned int active;
+ 	unsigned long vdso_base;
++#ifdef CONFIG_PPC_MM_SLICES
++	u16 user_psize;		/* page size index */
++	u64 low_slices_psize;	/* page size encodings */
++	unsigned char high_slices_psize[0];
++	unsigned long slb_addr_limit;
++#endif
+ } mm_context_t;
+ 
+ #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
+diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h
+new file mode 100644
+index 000000000000..95d532e18092
+--- /dev/null
++++ b/arch/powerpc/include/asm/nohash/32/slice.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H
++#define _ASM_POWERPC_NOHASH_32_SLICE_H
++
++#ifdef CONFIG_PPC_MM_SLICES
++
++#define SLICE_LOW_SHIFT		28
++#define SLICE_LOW_TOP		(0x100000000ull)
++#define SLICE_NUM_LOW		(SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
++#define GET_LOW_SLICE_INDEX(addr)	((addr) >> SLICE_LOW_SHIFT)
++
++#define SLICE_HIGH_SHIFT	0
++#define SLICE_NUM_HIGH		0ul
++#define GET_HIGH_SLICE_INDEX(addr)	(addr & 0)
++
++#endif /* CONFIG_PPC_MM_SLICES */
++
++#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */
+diff --git a/arch/powerpc/include/asm/nohash/64/slice.h b/arch/powerpc/include/asm/nohash/64/slice.h
+new file mode 100644
+index 000000000000..ad0d6e3cc1c5
+--- /dev/null
++++ b/arch/powerpc/include/asm/nohash/64/slice.h
+@@ -0,0 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_POWERPC_NOHASH_64_SLICE_H
++#define _ASM_POWERPC_NOHASH_64_SLICE_H
++
++#ifdef CONFIG_PPC_64K_PAGES
++#define get_slice_psize(mm, addr)	MMU_PAGE_64K
++#else /* CONFIG_PPC_64K_PAGES */
++#define get_slice_psize(mm, addr)	MMU_PAGE_4K
++#endif /* !CONFIG_PPC_64K_PAGES */
++#define slice_set_user_psize(mm, psize)	do { BUG(); } while (0)
++
++#endif /* _ASM_POWERPC_NOHASH_64_SLICE_H */
+diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
+index 8da5d4c1cab2..d5f1c41b7dba 100644
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -344,5 +344,6 @@ typedef struct page *pgtable_t;
+ 
+ #include <asm-generic/memory_model.h>
+ #endif /* __ASSEMBLY__ */
++#include <asm/slice.h>
+ 
+ #endif /* _ASM_POWERPC_PAGE_H */
+diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
+index 56234c6fcd61..af04acdb873f 100644
+--- a/arch/powerpc/include/asm/page_64.h
++++ b/arch/powerpc/include/asm/page_64.h
+@@ -86,65 +86,6 @@ extern u64 ppc64_pft_size;
+ 
+ #endif /* __ASSEMBLY__ */
+ 
+-#ifdef CONFIG_PPC_MM_SLICES
+-
+-#define SLICE_LOW_SHIFT		28
+-#define SLICE_HIGH_SHIFT	40
+-
+-#define SLICE_LOW_TOP		(0x100000000ul)
+-#define SLICE_NUM_LOW		(SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
+-#define SLICE_NUM_HIGH		(H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
+-
+-#define GET_LOW_SLICE_INDEX(addr)	((addr) >> SLICE_LOW_SHIFT)
+-#define GET_HIGH_SLICE_INDEX(addr)	((addr) >> SLICE_HIGH_SHIFT)
+-
+-#ifndef __ASSEMBLY__
+-struct mm_struct;
+-
+-extern unsigned long slice_get_unmapped_area(unsigned long addr,
+-					     unsigned long len,
+-					     unsigned long flags,
+-					     unsigned int psize,
+-					     int topdown);
+-
+-extern unsigned int get_slice_psize(struct mm_struct *mm,
+-				    unsigned long addr);
+-
+-extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
+-extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
+-				  unsigned long len, unsigned int psize);
+-
+-#endif /* __ASSEMBLY__ */
+-#else
+-#define slice_init()
+-#ifdef CONFIG_PPC_BOOK3S_64
+-#define get_slice_psize(mm, addr)	((mm)->context.user_psize)
+-#define slice_set_user_psize(mm, psize)		\
+-do {						\
+-	(mm)->context.user_psize = (psize);	\
+-	(mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
+-} while (0)
+-#else /* !CONFIG_PPC_BOOK3S_64 */
+-#ifdef CONFIG_PPC_64K_PAGES
+-#define get_slice_psize(mm, addr)	MMU_PAGE_64K
+-#else /* CONFIG_PPC_64K_PAGES */
+-#define get_slice_psize(mm, addr)	MMU_PAGE_4K
+-#endif /* !CONFIG_PPC_64K_PAGES */
+-#define slice_set_user_psize(mm, psize)	do { BUG(); } while(0)
+-#endif /* CONFIG_PPC_BOOK3S_64 */
+-
+-#define slice_set_range_psize(mm, start, len, psize)	\
+-	slice_set_user_psize((mm), (psize))
+-#endif /* CONFIG_PPC_MM_SLICES */
+-
+-#ifdef CONFIG_HUGETLB_PAGE
+-
+-#ifdef CONFIG_PPC_MM_SLICES
+-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+-#endif
+-
+-#endif /* !CONFIG_HUGETLB_PAGE */
+-
+ #define VM_DATA_DEFAULT_FLAGS \
+ 	(is_32bit_task() ? \
+ 	 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
+diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h
+new file mode 100644
+index 000000000000..172711fadb1c
+--- /dev/null
++++ b/arch/powerpc/include/asm/slice.h
+@@ -0,0 +1,42 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_POWERPC_SLICE_H
++#define _ASM_POWERPC_SLICE_H
++
++#ifdef CONFIG_PPC_BOOK3S_64
++#include <asm/book3s/64/slice.h>
++#elif defined(CONFIG_PPC64)
++#include <asm/nohash/64/slice.h>
++#elif defined(CONFIG_PPC_MMU_NOHASH)
++#include <asm/nohash/32/slice.h>
++#endif
++
++#ifdef CONFIG_PPC_MM_SLICES
++
++#ifdef CONFIG_HUGETLB_PAGE
++#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
++#endif
++#define HAVE_ARCH_UNMAPPED_AREA
++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
++
++#ifndef __ASSEMBLY__
++
++struct mm_struct;
++
++unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
++				      unsigned long flags, unsigned int psize,
++				      int topdown);
++
++unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
++
++void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
++void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
++			   unsigned long len, unsigned int psize);
++#endif /* __ASSEMBLY__ */
++
++#else /* CONFIG_PPC_MM_SLICES */
++
++#define slice_set_range_psize(mm, start, len, psize)	\
++	slice_set_user_psize((mm), (psize))
++#endif /* CONFIG_PPC_MM_SLICES */
++
++#endif /* _ASM_POWERPC_SLICE_H */
+diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
+index 3f30c994e931..458b928dbd84 100644
+--- a/arch/powerpc/kernel/cpu_setup_power.S
++++ b/arch/powerpc/kernel/cpu_setup_power.S
+@@ -28,6 +28,7 @@ _GLOBAL(__setup_cpu_power7)
+ 	beqlr
+ 	li	r0,0
+ 	mtspr	SPRN_LPID,r0
++	mtspr	SPRN_PCR,r0
+ 	mfspr	r3,SPRN_LPCR
+ 	li	r4,(LPCR_LPES1 >> LPCR_LPES_SH)
+ 	bl	__init_LPCR_ISA206
+@@ -41,6 +42,7 @@ _GLOBAL(__restore_cpu_power7)
+ 	beqlr
+ 	li	r0,0
+ 	mtspr	SPRN_LPID,r0
++	mtspr	SPRN_PCR,r0
+ 	mfspr	r3,SPRN_LPCR
+ 	li	r4,(LPCR_LPES1 >> LPCR_LPES_SH)
+ 	bl	__init_LPCR_ISA206
+@@ -57,6 +59,7 @@ _GLOBAL(__setup_cpu_power8)
+ 	beqlr
+ 	li	r0,0
+ 	mtspr	SPRN_LPID,r0
++	mtspr	SPRN_PCR,r0
+ 	mfspr	r3,SPRN_LPCR
+ 	ori	r3, r3, LPCR_PECEDH
+ 	li	r4,0 /* LPES = 0 */
+@@ -78,6 +81,7 @@ _GLOBAL(__restore_cpu_power8)
+ 	beqlr
+ 	li	r0,0
+ 	mtspr	SPRN_LPID,r0
++	mtspr	SPRN_PCR,r0
+ 	mfspr   r3,SPRN_LPCR
+ 	ori	r3, r3, LPCR_PECEDH
+ 	li	r4,0 /* LPES = 0 */
+@@ -99,6 +103,7 @@ _GLOBAL(__setup_cpu_power9)
+ 	mtspr	SPRN_PSSCR,r0
+ 	mtspr	SPRN_LPID,r0
+ 	mtspr	SPRN_PID,r0
++	mtspr	SPRN_PCR,r0
+ 	mfspr	r3,SPRN_LPCR
+ 	LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE  | LPCR_HEIC)
+ 	or	r3, r3, r4
+@@ -123,6 +128,7 @@ _GLOBAL(__restore_cpu_power9)
+ 	mtspr	SPRN_PSSCR,r0
+ 	mtspr	SPRN_LPID,r0
+ 	mtspr	SPRN_PID,r0
++	mtspr	SPRN_PCR,r0
+ 	mfspr   r3,SPRN_LPCR
+ 	LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
+ 	or	r3, r3, r4
+diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
+index 078553a177de..afe6808d7a41 100644
+--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
+@@ -114,6 +114,7 @@ static void __restore_cpu_cpufeatures(void)
+ 	if (hv_mode) {
+ 		mtspr(SPRN_LPID, 0);
+ 		mtspr(SPRN_HFSCR, system_registers.hfscr);
++		mtspr(SPRN_PCR, 0);
+ 	}
+ 	mtspr(SPRN_FSCR, system_registers.fscr);
+ 
+diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
+index 01e1c1997893..2fce278446f5 100644
+--- a/arch/powerpc/kernel/idle_book3s.S
++++ b/arch/powerpc/kernel/idle_book3s.S
+@@ -834,6 +834,8 @@ BEGIN_FTR_SECTION
+ 	mtspr	SPRN_PTCR,r4
+ 	ld	r4,_RPR(r1)
+ 	mtspr	SPRN_RPR,r4
++	ld	r4,_AMOR(r1)
++	mtspr	SPRN_AMOR,r4
+ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ 
+ 	ld	r4,_TSCR(r1)
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index d73ec518ef80..a6002f9449b1 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -919,6 +919,8 @@ void __init setup_arch(char **cmdline_p)
+ #ifdef CONFIG_PPC64
+ 	if (!radix_enabled())
+ 		init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
++#elif defined(CONFIG_PPC_8xx)
++	init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW;
+ #else
+ #error	"context.addr_limit not initialized."
+ #endif
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 1e48d157196a..578c5e80aa14 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -208,6 +208,12 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
+ 	}
+ 	raw_local_irq_restore(flags);
+ 
++	/*
++	 * system_reset_excption handles debugger, crash dump, panic, for 0x100
++	 */
++	if (TRAP(regs) == 0x100)
++		return;
++
+ 	crash_fadump(regs, "die oops");
+ 
+ 	if (kexec_should_crash(current))
+@@ -272,8 +278,13 @@ void die(const char *str, struct pt_regs *regs, long err)
+ {
+ 	unsigned long flags;
+ 
+-	if (debugger(regs))
+-		return;
++	/*
++	 * system_reset_excption handles debugger, crash dump, panic, for 0x100
++	 */
++	if (TRAP(regs) != 0x100) {
++		if (debugger(regs))
++			return;
++	}
+ 
+ 	flags = oops_begin(regs);
+ 	if (__die(str, regs, err))
+@@ -1612,6 +1623,22 @@ void facility_unavailable_exception(struct pt_regs *regs)
+ 		value = mfspr(SPRN_FSCR);
+ 
+ 	status = value >> 56;
++	if ((hv || status >= 2) &&
++	    (status < ARRAY_SIZE(facility_strings)) &&
++	    facility_strings[status])
++		facility = facility_strings[status];
++
++	/* We should not have taken this interrupt in kernel */
++	if (!user_mode(regs)) {
++		pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
++			 facility, status, regs->nip);
++		die("Unexpected facility unavailable exception", regs, SIGABRT);
++	}
++
++	/* We restore the interrupt state now */
++	if (!arch_irq_disabled_regs(regs))
++		local_irq_enable();
++
+ 	if (status == FSCR_DSCR_LG) {
+ 		/*
+ 		 * User is accessing the DSCR register using the problem
+@@ -1678,25 +1705,11 @@ void facility_unavailable_exception(struct pt_regs *regs)
+ 		return;
+ 	}
+ 
+-	if ((hv || status >= 2) &&
+-	    (status < ARRAY_SIZE(facility_strings)) &&
+-	    facility_strings[status])
+-		facility = facility_strings[status];
+-
+-	/* We restore the interrupt state now */
+-	if (!arch_irq_disabled_regs(regs))
+-		local_irq_enable();
+-
+ 	pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
+ 		hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
+ 
+ out:
+-	if (user_mode(regs)) {
+-		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+-		return;
+-	}
+-
+-	die("Unexpected facility unavailable exception", regs, SIGABRT);
++	_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+ }
+ #endif
+ 
+diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
+index 849f50cd62f2..cf77d755246d 100644
+--- a/arch/powerpc/mm/8xx_mmu.c
++++ b/arch/powerpc/mm/8xx_mmu.c
+@@ -192,7 +192,7 @@ void set_context(unsigned long id, pgd_t *pgd)
+ 	mtspr(SPRN_M_TW, __pa(pgd) - offset);
+ 
+ 	/* Update context */
+-	mtspr(SPRN_M_CASID, id);
++	mtspr(SPRN_M_CASID, id - 1);
+ 	/* sync */
+ 	mb();
+ }
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index 876da2bc1796..590be3fa0ce2 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -553,9 +553,11 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ 	struct hstate *hstate = hstate_file(file);
+ 	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
+ 
++#ifdef CONFIG_PPC_RADIX_MMU
+ 	if (radix_enabled())
+ 		return radix__hugetlb_get_unmapped_area(file, addr, len,
+ 						       pgoff, flags);
++#endif
+ 	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
+ }
+ #endif
+diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
+index 4554d6527682..d98f7e5c141b 100644
+--- a/arch/powerpc/mm/mmu_context_nohash.c
++++ b/arch/powerpc/mm/mmu_context_nohash.c
+@@ -331,6 +331,20 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
+ {
+ 	pr_hard("initing context for mm @%p\n", mm);
+ 
++#ifdef	CONFIG_PPC_MM_SLICES
++	if (!mm->context.slb_addr_limit)
++		mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
++
++	/*
++	 * We have MMU_NO_CONTEXT set to be ~0. Hence check
++	 * explicitly against context.id == 0. This ensures that we properly
++	 * initialize context slice details for newly allocated mm's (which will
++	 * have id == 0) and don't alter context slice inherited via fork (which
++	 * will have id != 0).
++	 */
++	if (mm->context.id == 0)
++		slice_set_user_psize(mm, mmu_virtual_psize);
++#endif
+ 	mm->context.id = MMU_NO_CONTEXT;
+ 	mm->context.active = 0;
+ 	return 0;
+@@ -428,8 +442,8 @@ void __init mmu_context_init(void)
+ 	 *      -- BenH
+ 	 */
+ 	if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
+-		first_context = 0;
+-		last_context = 15;
++		first_context = 1;
++		last_context = 16;
+ 		no_selective_tlbil = true;
+ 	} else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
+ 		first_context = 1;
+diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
+index 23ec2c5e3b78..0beca1ba2282 100644
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -73,10 +73,12 @@ static void slice_range_to_mask(unsigned long start, unsigned long len,
+ 	unsigned long end = start + len - 1;
+ 
+ 	ret->low_slices = 0;
+-	bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
++	if (SLICE_NUM_HIGH)
++		bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
+ 
+ 	if (start < SLICE_LOW_TOP) {
+-		unsigned long mend = min(end, (SLICE_LOW_TOP - 1));
++		unsigned long mend = min(end,
++					 (unsigned long)(SLICE_LOW_TOP - 1));
+ 
+ 		ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
+ 			- (1u << GET_LOW_SLICE_INDEX(start));
+@@ -113,11 +115,13 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
+ 	unsigned long start = slice << SLICE_HIGH_SHIFT;
+ 	unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
+ 
++#ifdef CONFIG_PPC64
+ 	/* Hack, so that each addresses is controlled by exactly one
+ 	 * of the high or low area bitmaps, the first high area starts
+ 	 * at 4GB, not 0 */
+ 	if (start == 0)
+ 		start = SLICE_LOW_TOP;
++#endif
+ 
+ 	return !slice_area_is_free(mm, start, end - start);
+ }
+@@ -128,7 +132,8 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
+ 	unsigned long i;
+ 
+ 	ret->low_slices = 0;
+-	bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
++	if (SLICE_NUM_HIGH)
++		bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
+ 
+ 	for (i = 0; i < SLICE_NUM_LOW; i++)
+ 		if (!slice_low_has_vma(mm, i))
+@@ -151,7 +156,8 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
+ 	u64 lpsizes;
+ 
+ 	ret->low_slices = 0;
+-	bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
++	if (SLICE_NUM_HIGH)
++		bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
+ 
+ 	lpsizes = mm->context.low_slices_psize;
+ 	for (i = 0; i < SLICE_NUM_LOW; i++)
+@@ -180,6 +186,10 @@ static int slice_check_fit(struct mm_struct *mm,
+ 	 */
+ 	unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
+ 
++	if (!SLICE_NUM_HIGH)
++		return (mask.low_slices & available.low_slices) ==
++		       mask.low_slices;
++
+ 	bitmap_and(result, mask.high_slices,
+ 		   available.high_slices, slice_count);
+ 
+@@ -189,6 +199,7 @@ static int slice_check_fit(struct mm_struct *mm,
+ 
+ static void slice_flush_segments(void *parm)
+ {
++#ifdef CONFIG_PPC64
+ 	struct mm_struct *mm = parm;
+ 	unsigned long flags;
+ 
+@@ -200,6 +211,7 @@ static void slice_flush_segments(void *parm)
+ 	local_irq_save(flags);
+ 	slb_flush_and_rebolt();
+ 	local_irq_restore(flags);
++#endif
+ }
+ 
+ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
+@@ -388,21 +400,21 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
+ 
+ static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src)
+ {
+-	DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+-
+ 	dst->low_slices |= src->low_slices;
+-	bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
+-	bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
++	if (!SLICE_NUM_HIGH)
++		return;
++	bitmap_or(dst->high_slices, dst->high_slices, src->high_slices,
++		  SLICE_NUM_HIGH);
+ }
+ 
+ static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src)
+ {
+-	DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+-
+ 	dst->low_slices &= ~src->low_slices;
+ 
+-	bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
+-	bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
++	if (!SLICE_NUM_HIGH)
++		return;
++	bitmap_andnot(dst->high_slices, dst->high_slices, src->high_slices,
++		      SLICE_NUM_HIGH);
+ }
+ 
+ #ifdef CONFIG_PPC_64K_PAGES
+@@ -450,14 +462,17 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+ 	 * init different masks
+ 	 */
+ 	mask.low_slices = 0;
+-	bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
+ 
+ 	/* silence stupid warning */;
+ 	potential_mask.low_slices = 0;
+-	bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
+ 
+ 	compat_mask.low_slices = 0;
+-	bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
++
++	if (SLICE_NUM_HIGH) {
++		bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
++		bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
++		bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
++	}
+ 
+ 	/* Sanity checks */
+ 	BUG_ON(mm->task_size == 0);
+@@ -595,7 +610,9 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+  convert:
+ 	slice_andnot_mask(&mask, &good_mask);
+ 	slice_andnot_mask(&mask, &compat_mask);
+-	if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) {
++	if (mask.low_slices ||
++	    (SLICE_NUM_HIGH &&
++	     !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH))) {
+ 		slice_convert(mm, mask, psize);
+ 		if (psize > MMU_PAGE_BASE)
+ 			on_each_cpu(slice_flush_segments, mm, 1);
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index f89bbd54ecec..1e55ae2f2afd 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -457,6 +457,16 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
+ 				/* invalid entry */
+ 				continue;
+ 
++			/*
++			 * BHRB rolling buffer could very much contain the kernel
++			 * addresses at this point. Check the privileges before
++			 * exporting it to userspace (avoid exposure of regions
++			 * where we could have speculative execution)
++			 */
++			if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
++				is_kernel_addr(addr))
++				continue;
++
+ 			/* Branches are read most recent first (ie. mfbhrb 0 is
+ 			 * the most recent branch).
+ 			 * There are two types of valid entries:
+@@ -1226,6 +1236,7 @@ static void power_pmu_disable(struct pmu *pmu)
+ 		 */
+ 		write_mmcr0(cpuhw, val);
+ 		mb();
++		isync();
+ 
+ 		/*
+ 		 * Disable instruction sampling if it was enabled
+@@ -1234,12 +1245,26 @@ static void power_pmu_disable(struct pmu *pmu)
+ 			mtspr(SPRN_MMCRA,
+ 			      cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
+ 			mb();
++			isync();
+ 		}
+ 
+ 		cpuhw->disabled = 1;
+ 		cpuhw->n_added = 0;
+ 
+ 		ebb_switch_out(mmcr0);
++
++#ifdef CONFIG_PPC64
++		/*
++		 * These are readable by userspace, may contain kernel
++		 * addresses and are not switched by context switch, so clear
++		 * them now to avoid leaking anything to userspace in general
++		 * including to another process.
++		 */
++		if (ppmu->flags & PPMU_ARCH_207S) {
++			mtspr(SPRN_SDAR, 0);
++			mtspr(SPRN_SIAR, 0);
++		}
++#endif
+ 	}
+ 
+ 	local_irq_restore(flags);
+diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
+index a429d859f15d..5a8b1bf1e819 100644
+--- a/arch/powerpc/platforms/Kconfig.cputype
++++ b/arch/powerpc/platforms/Kconfig.cputype
+@@ -326,6 +326,7 @@ config PPC_BOOK3E_MMU
+ config PPC_MM_SLICES
+ 	bool
+ 	default y if PPC_BOOK3S_64
++	default y if PPC_8xx && HUGETLB_PAGE
+ 	default n
+ 
+ config PPC_HAVE_PMU_SUPPORT
+diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
+index e7b621f619b2..a9a3d62c34d6 100644
+--- a/arch/powerpc/platforms/powernv/npu-dma.c
++++ b/arch/powerpc/platforms/powernv/npu-dma.c
+@@ -417,6 +417,11 @@ struct npu_context {
+ 	void *priv;
+ };
+ 
++struct mmio_atsd_reg {
++	struct npu *npu;
++	int reg;
++};
++
+ /*
+  * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
+  * if none are available.
+@@ -426,7 +431,7 @@ static int get_mmio_atsd_reg(struct npu *npu)
+ 	int i;
+ 
+ 	for (i = 0; i < npu->mmio_atsd_count; i++) {
+-		if (!test_and_set_bit(i, &npu->mmio_atsd_usage))
++		if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
+ 			return i;
+ 	}
+ 
+@@ -435,86 +440,90 @@ static int get_mmio_atsd_reg(struct npu *npu)
+ 
+ static void put_mmio_atsd_reg(struct npu *npu, int reg)
+ {
+-	clear_bit(reg, &npu->mmio_atsd_usage);
++	clear_bit_unlock(reg, &npu->mmio_atsd_usage);
+ }
+ 
+ /* MMIO ATSD register offsets */
+ #define XTS_ATSD_AVA  1
+ #define XTS_ATSD_STAT 2
+ 
+-static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
+-				unsigned long va)
++static void mmio_launch_invalidate(struct mmio_atsd_reg *mmio_atsd_reg,
++				unsigned long launch, unsigned long va)
+ {
+-	int mmio_atsd_reg;
+-
+-	do {
+-		mmio_atsd_reg = get_mmio_atsd_reg(npu);
+-		cpu_relax();
+-	} while (mmio_atsd_reg < 0);
++	struct npu *npu = mmio_atsd_reg->npu;
++	int reg = mmio_atsd_reg->reg;
+ 
+ 	__raw_writeq(cpu_to_be64(va),
+-		npu->mmio_atsd_regs[mmio_atsd_reg] + XTS_ATSD_AVA);
++		npu->mmio_atsd_regs[reg] + XTS_ATSD_AVA);
+ 	eieio();
+-	__raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[mmio_atsd_reg]);
+-
+-	return mmio_atsd_reg;
++	__raw_writeq(cpu_to_be64(launch), npu->mmio_atsd_regs[reg]);
+ }
+ 
+-static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
++static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
++				unsigned long pid, bool flush)
+ {
++	int i;
+ 	unsigned long launch;
+ 
+-	/* IS set to invalidate matching PID */
+-	launch = PPC_BIT(12);
++	for (i = 0; i <= max_npu2_index; i++) {
++		if (mmio_atsd_reg[i].reg < 0)
++			continue;
++
++		/* IS set to invalidate matching PID */
++		launch = PPC_BIT(12);
+ 
+-	/* PRS set to process-scoped */
+-	launch |= PPC_BIT(13);
++		/* PRS set to process-scoped */
++		launch |= PPC_BIT(13);
+ 
+-	/* AP */
+-	launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
++		/* AP */
++		launch |= (u64)
++			mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
+ 
+-	/* PID */
+-	launch |= pid << PPC_BITLSHIFT(38);
++		/* PID */
++		launch |= pid << PPC_BITLSHIFT(38);
+ 
+-	/* No flush */
+-	launch |= !flush << PPC_BITLSHIFT(39);
++		/* No flush */
++		launch |= !flush << PPC_BITLSHIFT(39);
+ 
+-	/* Invalidating the entire process doesn't use a va */
+-	return mmio_launch_invalidate(npu, launch, 0);
++		/* Invalidating the entire process doesn't use a va */
++		mmio_launch_invalidate(&mmio_atsd_reg[i], launch, 0);
++	}
+ }
+ 
+-static int mmio_invalidate_va(struct npu *npu, unsigned long va,
+-			unsigned long pid, bool flush)
++static void mmio_invalidate_va(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
++			unsigned long va, unsigned long pid, bool flush)
+ {
++	int i;
+ 	unsigned long launch;
+ 
+-	/* IS set to invalidate target VA */
+-	launch = 0;
++	for (i = 0; i <= max_npu2_index; i++) {
++		if (mmio_atsd_reg[i].reg < 0)
++			continue;
++
++		/* IS set to invalidate target VA */
++		launch = 0;
+ 
+-	/* PRS set to process scoped */
+-	launch |= PPC_BIT(13);
++		/* PRS set to process scoped */
++		launch |= PPC_BIT(13);
+ 
+-	/* AP */
+-	launch |= (u64) mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
++		/* AP */
++		launch |= (u64)
++			mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
+ 
+-	/* PID */
+-	launch |= pid << PPC_BITLSHIFT(38);
++		/* PID */
++		launch |= pid << PPC_BITLSHIFT(38);
+ 
+-	/* No flush */
+-	launch |= !flush << PPC_BITLSHIFT(39);
++		/* No flush */
++		launch |= !flush << PPC_BITLSHIFT(39);
+ 
+-	return mmio_launch_invalidate(npu, launch, va);
++		mmio_launch_invalidate(&mmio_atsd_reg[i], launch, va);
++	}
+ }
+ 
+ #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
+ 
+-struct mmio_atsd_reg {
+-	struct npu *npu;
+-	int reg;
+-};
+-
+ static void mmio_invalidate_wait(
+-	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
++	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
+ {
+ 	struct npu *npu;
+ 	int i, reg;
+@@ -529,16 +538,67 @@ static void mmio_invalidate_wait(
+ 		reg = mmio_atsd_reg[i].reg;
+ 		while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
+ 			cpu_relax();
++	}
++}
++
++/*
++ * Acquires all the address translation shootdown (ATSD) registers required to
++ * launch an ATSD on all links this npu_context is active on.
++ */
++static void acquire_atsd_reg(struct npu_context *npu_context,
++			struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
++{
++	int i, j;
++	struct npu *npu;
++	struct pci_dev *npdev;
++	struct pnv_phb *nphb;
+ 
+-		put_mmio_atsd_reg(npu, reg);
++	for (i = 0; i <= max_npu2_index; i++) {
++		mmio_atsd_reg[i].reg = -1;
++		for (j = 0; j < NV_MAX_LINKS; j++) {
++			/*
++			 * There are no ordering requirements with respect to
++			 * the setup of struct npu_context, but to ensure
++			 * consistent behaviour we need to ensure npdev[][] is
++			 * only read once.
++			 */
++			npdev = READ_ONCE(npu_context->npdev[i][j]);
++			if (!npdev)
++				continue;
+ 
++			nphb = pci_bus_to_host(npdev->bus)->private_data;
++			npu = &nphb->npu;
++			mmio_atsd_reg[i].npu = npu;
++			mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
++			while (mmio_atsd_reg[i].reg < 0) {
++				mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
++				cpu_relax();
++			}
++			break;
++		}
++	}
++}
++
++/*
++ * Release previously acquired ATSD registers. To avoid deadlocks the registers
++ * must be released in the same order they were acquired above in
++ * acquire_atsd_reg.
++ */
++static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
++{
++	int i;
++
++	for (i = 0; i <= max_npu2_index; i++) {
+ 		/*
+-		 * The GPU requires two flush ATSDs to ensure all entries have
+-		 * been flushed. We use PID 0 as it will never be used for a
+-		 * process on the GPU.
++		 * We can't rely on npu_context->npdev[][] being the same here
++		 * as when acquire_atsd_reg() was called, hence we use the
++		 * values stored in mmio_atsd_reg during the acquire phase
++		 * rather than re-reading npdev[][].
+ 		 */
+-		if (flush)
+-			mmio_invalidate_pid(npu, 0, true);
++		if (mmio_atsd_reg[i].reg < 0)
++			continue;
++
++		put_mmio_atsd_reg(mmio_atsd_reg[i].npu, mmio_atsd_reg[i].reg);
+ 	}
+ }
+ 
+@@ -549,10 +609,6 @@ static void mmio_invalidate_wait(
+ static void mmio_invalidate(struct npu_context *npu_context, int va,
+ 			unsigned long address, bool flush)
+ {
+-	int i, j;
+-	struct npu *npu;
+-	struct pnv_phb *nphb;
+-	struct pci_dev *npdev;
+ 	struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
+ 	unsigned long pid = npu_context->mm->context.id;
+ 
+@@ -568,37 +624,25 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
+ 	 * Loop over all the NPUs this process is active on and launch
+ 	 * an invalidate.
+ 	 */
+-	for (i = 0; i <= max_npu2_index; i++) {
+-		mmio_atsd_reg[i].reg = -1;
+-		for (j = 0; j < NV_MAX_LINKS; j++) {
+-			npdev = npu_context->npdev[i][j];
+-			if (!npdev)
+-				continue;
+-
+-			nphb = pci_bus_to_host(npdev->bus)->private_data;
+-			npu = &nphb->npu;
+-			mmio_atsd_reg[i].npu = npu;
+-
+-			if (va)
+-				mmio_atsd_reg[i].reg =
+-					mmio_invalidate_va(npu, address, pid,
+-							flush);
+-			else
+-				mmio_atsd_reg[i].reg =
+-					mmio_invalidate_pid(npu, pid, flush);
+-
+-			/*
+-			 * The NPU hardware forwards the shootdown to all GPUs
+-			 * so we only have to launch one shootdown per NPU.
+-			 */
+-			break;
+-		}
++	acquire_atsd_reg(npu_context, mmio_atsd_reg);
++	if (va)
++		mmio_invalidate_va(mmio_atsd_reg, address, pid, flush);
++	else
++		mmio_invalidate_pid(mmio_atsd_reg, pid, flush);
++
++	mmio_invalidate_wait(mmio_atsd_reg);
++	if (flush) {
++		/*
++		 * The GPU requires two flush ATSDs to ensure all entries have
++		 * been flushed. We use PID 0 as it will never be used for a
++		 * process on the GPU.
++		 */
++		mmio_invalidate_pid(mmio_atsd_reg, 0, true);
++		mmio_invalidate_wait(mmio_atsd_reg);
++		mmio_invalidate_pid(mmio_atsd_reg, 0, true);
++		mmio_invalidate_wait(mmio_atsd_reg);
+ 	}
+-
+-	mmio_invalidate_wait(mmio_atsd_reg, flush);
+-	if (flush)
+-		/* Wait for the flush to complete */
+-		mmio_invalidate_wait(mmio_atsd_reg, false);
++	release_atsd_reg(mmio_atsd_reg);
+ }
+ 
+ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
+@@ -741,7 +785,16 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
+ 	if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
+ 							&nvlink_index)))
+ 		return ERR_PTR(-ENODEV);
+-	npu_context->npdev[npu->index][nvlink_index] = npdev;
++
++	/*
++	 * npdev is a pci_dev pointer setup by the PCI code. We assign it to
++	 * npdev[][] to indicate to the mmu notifiers that an invalidation
++	 * should also be sent over this nvlink. The notifiers don't use any
++	 * other fields in npu_context, so we just need to ensure that when they
++	 * deference npu_context->npdev[][] it is either a valid pointer or
++	 * NULL.
++	 */
++	WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev);
+ 
+ 	if (!nphb->npu.nmmu_flush) {
+ 		/*
+@@ -793,7 +846,7 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
+ 	if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
+ 							&nvlink_index)))
+ 		return;
+-	npu_context->npdev[npu->index][nvlink_index] = NULL;
++	WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL);
+ 	opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
+ 				PCI_DEVID(gpdev->bus->number, gpdev->devfn));
+ 	kref_put(&npu_context->kref, pnv_npu2_release_context);
+diff --git a/arch/powerpc/platforms/powernv/vas-debug.c b/arch/powerpc/platforms/powernv/vas-debug.c
+index ca22f1eae050..3161e39eea1d 100644
+--- a/arch/powerpc/platforms/powernv/vas-debug.c
++++ b/arch/powerpc/platforms/powernv/vas-debug.c
+@@ -179,6 +179,7 @@ void vas_instance_init_dbgdir(struct vas_instance *vinst)
+ {
+ 	struct dentry *d;
+ 
++	vas_init_dbgdir();
+ 	if (!vas_debugfs)
+ 		return;
+ 
+@@ -201,8 +202,18 @@ void vas_instance_init_dbgdir(struct vas_instance *vinst)
+ 	vinst->dbgdir = NULL;
+ }
+ 
++/*
++ * Set up the "root" VAS debugfs dir. Return if we already set it up
++ * (or failed to) in an earlier instance of VAS.
++ */
+ void vas_init_dbgdir(void)
+ {
++	static bool first_time = true;
++
++	if (!first_time)
++		return;
++
++	first_time = false;
+ 	vas_debugfs = debugfs_create_dir("vas", NULL);
+ 	if (IS_ERR(vas_debugfs))
+ 		vas_debugfs = NULL;
+diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c
+index aebbe95c9230..5a2b24cbbc88 100644
+--- a/arch/powerpc/platforms/powernv/vas.c
++++ b/arch/powerpc/platforms/powernv/vas.c
+@@ -160,8 +160,6 @@ static int __init vas_init(void)
+ 	int found = 0;
+ 	struct device_node *dn;
+ 
+-	vas_init_dbgdir();
+-
+ 	platform_driver_register(&vas_driver);
+ 
+ 	for_each_compatible_node(dn, NULL, "ibm,vas") {
+@@ -169,8 +167,10 @@ static int __init vas_init(void)
+ 		found++;
+ 	}
+ 
+-	if (!found)
++	if (!found) {
++		platform_driver_unregister(&vas_driver);
+ 		return -ENODEV;
++	}
+ 
+ 	pr_devel("Found %d instances\n", found);
+ 
+diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
+index 73067805300a..1d4e0ef658d3 100644
+--- a/arch/powerpc/sysdev/mpic.c
++++ b/arch/powerpc/sysdev/mpic.c
+@@ -626,7 +626,7 @@ static inline u32 mpic_physmask(u32 cpumask)
+ 	int i;
+ 	u32 mask = 0;
+ 
+-	for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1)
++	for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1)
+ 		mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
+ 	return mask;
+ }
+diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
+new file mode 100644
+index 000000000000..2b443a3a487f
+--- /dev/null
++++ b/arch/riscv/include/asm/fence.h
+@@ -0,0 +1,12 @@
++#ifndef _ASM_RISCV_FENCE_H
++#define _ASM_RISCV_FENCE_H
++
++#ifdef CONFIG_SMP
++#define RISCV_ACQUIRE_BARRIER		"\tfence r , rw\n"
++#define RISCV_RELEASE_BARRIER		"\tfence rw,  w\n"
++#else
++#define RISCV_ACQUIRE_BARRIER
++#define RISCV_RELEASE_BARRIER
++#endif
++
++#endif	/* _ASM_RISCV_FENCE_H */
+diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
+index 2fd27e8ef1fd..8eb26d1ede81 100644
+--- a/arch/riscv/include/asm/spinlock.h
++++ b/arch/riscv/include/asm/spinlock.h
+@@ -17,6 +17,7 @@
+ 
+ #include <linux/kernel.h>
+ #include <asm/current.h>
++#include <asm/fence.h>
+ 
+ /*
+  * Simple spin lock operations.  These provide no fairness guarantees.
+@@ -28,10 +29,7 @@
+ 
+ static inline void arch_spin_unlock(arch_spinlock_t *lock)
+ {
+-	__asm__ __volatile__ (
+-		"amoswap.w.rl x0, x0, %0"
+-		: "=A" (lock->lock)
+-		:: "memory");
++	smp_store_release(&lock->lock, 0);
+ }
+ 
+ static inline int arch_spin_trylock(arch_spinlock_t *lock)
+@@ -39,7 +37,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
+ 	int tmp = 1, busy;
+ 
+ 	__asm__ __volatile__ (
+-		"amoswap.w.aq %0, %2, %1"
++		"	amoswap.w %0, %2, %1\n"
++		RISCV_ACQUIRE_BARRIER
+ 		: "=r" (busy), "+A" (lock->lock)
+ 		: "r" (tmp)
+ 		: "memory");
+@@ -68,8 +67,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
+ 		"1:	lr.w	%1, %0\n"
+ 		"	bltz	%1, 1b\n"
+ 		"	addi	%1, %1, 1\n"
+-		"	sc.w.aq	%1, %1, %0\n"
++		"	sc.w	%1, %1, %0\n"
+ 		"	bnez	%1, 1b\n"
++		RISCV_ACQUIRE_BARRIER
+ 		: "+A" (lock->lock), "=&r" (tmp)
+ 		:: "memory");
+ }
+@@ -82,8 +82,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
+ 		"1:	lr.w	%1, %0\n"
+ 		"	bnez	%1, 1b\n"
+ 		"	li	%1, -1\n"
+-		"	sc.w.aq	%1, %1, %0\n"
++		"	sc.w	%1, %1, %0\n"
+ 		"	bnez	%1, 1b\n"
++		RISCV_ACQUIRE_BARRIER
+ 		: "+A" (lock->lock), "=&r" (tmp)
+ 		:: "memory");
+ }
+@@ -96,8 +97,9 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
+ 		"1:	lr.w	%1, %0\n"
+ 		"	bltz	%1, 1f\n"
+ 		"	addi	%1, %1, 1\n"
+-		"	sc.w.aq	%1, %1, %0\n"
++		"	sc.w	%1, %1, %0\n"
+ 		"	bnez	%1, 1b\n"
++		RISCV_ACQUIRE_BARRIER
+ 		"1:\n"
+ 		: "+A" (lock->lock), "=&r" (busy)
+ 		:: "memory");
+@@ -113,8 +115,9 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
+ 		"1:	lr.w	%1, %0\n"
+ 		"	bnez	%1, 1f\n"
+ 		"	li	%1, -1\n"
+-		"	sc.w.aq	%1, %1, %0\n"
++		"	sc.w	%1, %1, %0\n"
+ 		"	bnez	%1, 1b\n"
++		RISCV_ACQUIRE_BARRIER
+ 		"1:\n"
+ 		: "+A" (lock->lock), "=&r" (busy)
+ 		:: "memory");
+@@ -125,7 +128,8 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
+ static inline void arch_read_unlock(arch_rwlock_t *lock)
+ {
+ 	__asm__ __volatile__(
+-		"amoadd.w.rl x0, %1, %0"
++		RISCV_RELEASE_BARRIER
++		"	amoadd.w x0, %1, %0\n"
+ 		: "+A" (lock->lock)
+ 		: "r" (-1)
+ 		: "memory");
+@@ -133,10 +137,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
+ 
+ static inline void arch_write_unlock(arch_rwlock_t *lock)
+ {
+-	__asm__ __volatile__ (
+-		"amoswap.w.rl x0, x0, %0"
+-		: "=A" (lock->lock)
+-		:: "memory");
++	smp_store_release(&lock->lock, 0);
+ }
+ 
+ #endif /* _ASM_RISCV_SPINLOCK_H */
+diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
+index 8961e3970901..969882b54266 100644
+--- a/arch/s390/kvm/vsie.c
++++ b/arch/s390/kvm/vsie.c
+@@ -578,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+ 
+ 	gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
+ 	if (gpa && (scb_s->ecb & ECB_TE)) {
+-		if (!(gpa & ~0x1fffU)) {
++		if (!(gpa & ~0x1fffUL)) {
+ 			rc = set_validity_icpt(scb_s, 0x0080U);
+ 			goto unpin;
+ 		}
+diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
+index c001f782c5f1..28cc61216b64 100644
+--- a/arch/sh/kernel/entry-common.S
++++ b/arch/sh/kernel/entry-common.S
+@@ -255,7 +255,7 @@ debug_trap:
+ 	mov.l	@r8, r8
+ 	jsr	@r8
+ 	 nop
+-	bra	__restore_all
++	bra	ret_from_exception
+ 	 nop
+ 	CFI_ENDPROC
+ 
+diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
+index abad97edf736..28db058d471b 100644
+--- a/arch/sparc/include/asm/atomic_64.h
++++ b/arch/sparc/include/asm/atomic_64.h
+@@ -83,7 +83,11 @@ ATOMIC_OPS(xor)
+ #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
+ 
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++
++static inline int atomic_xchg(atomic_t *v, int new)
++{
++	return xchg(&v->counter, new);
++}
+ 
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 06086439b7bd..70610604c360 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -1162,16 +1162,13 @@ int x86_perf_event_set_period(struct perf_event *event)
+ 
+ 	per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
+ 
+-	if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) ||
+-	    local64_read(&hwc->prev_count) != (u64)-left) {
+-		/*
+-		 * The hw event starts counting from this event offset,
+-		 * mark it to be able to extra future deltas:
+-		 */
+-		local64_set(&hwc->prev_count, (u64)-left);
++	/*
++	 * The hw event starts counting from this event offset,
++	 * mark it to be able to extra future deltas:
++	 */
++	local64_set(&hwc->prev_count, (u64)-left);
+ 
+-		wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
+-	}
++	wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
+ 
+ 	/*
+ 	 * Due to erratum on certan cpu we need
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 1e41d7508d99..39cd0b36c790 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2201,9 +2201,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
+ 	int bit, loops;
+ 	u64 status;
+ 	int handled;
++	int pmu_enabled;
+ 
+ 	cpuc = this_cpu_ptr(&cpu_hw_events);
+ 
++	/*
++	 * Save the PMU state.
++	 * It needs to be restored when leaving the handler.
++	 */
++	pmu_enabled = cpuc->enabled;
+ 	/*
+ 	 * No known reason to not always do late ACK,
+ 	 * but just in case do it opt-in.
+@@ -2211,6 +2217,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
+ 	if (!x86_pmu.late_ack)
+ 		apic_write(APIC_LVTPC, APIC_DM_NMI);
+ 	intel_bts_disable_local();
++	cpuc->enabled = 0;
+ 	__intel_pmu_disable_all();
+ 	handled = intel_pmu_drain_bts_buffer();
+ 	handled += intel_bts_interrupt();
+@@ -2320,7 +2327,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
+ 
+ done:
+ 	/* Only restore PMU state when it's active. See x86_pmu_disable(). */
+-	if (cpuc->enabled)
++	cpuc->enabled = pmu_enabled;
++	if (pmu_enabled)
+ 		__intel_pmu_enable_all(0, true);
+ 	intel_bts_enable_local();
+ 
+@@ -3188,7 +3196,7 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+  * Therefore the effective (average) period matches the requested period,
+  * despite coarser hardware granularity.
+  */
+-static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
++static u64 bdw_limit_period(struct perf_event *event, u64 left)
+ {
+ 	if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
+ 			X86_CONFIG(.event=0xc0, .umask=0x01)) {
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 5e526c54247e..cc0eb543cc70 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -1315,17 +1315,84 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
+ 	return NULL;
+ }
+ 
++/*
++ * Special variant of intel_pmu_save_and_restart() for auto-reload.
++ */
++static int
++intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
++{
++	struct hw_perf_event *hwc = &event->hw;
++	int shift = 64 - x86_pmu.cntval_bits;
++	u64 period = hwc->sample_period;
++	u64 prev_raw_count, new_raw_count;
++	s64 new, old;
++
++	WARN_ON(!period);
++
++	/*
++	 * drain_pebs() only happens when the PMU is disabled.
++	 */
++	WARN_ON(this_cpu_read(cpu_hw_events.enabled));
++
++	prev_raw_count = local64_read(&hwc->prev_count);
++	rdpmcl(hwc->event_base_rdpmc, new_raw_count);
++	local64_set(&hwc->prev_count, new_raw_count);
++
++	/*
++	 * Since the counter increments a negative counter value and
++	 * overflows on the sign switch, giving the interval:
++	 *
++	 *   [-period, 0]
++	 *
++	 * the difference between two consequtive reads is:
++	 *
++	 *   A) value2 - value1;
++	 *      when no overflows have happened in between,
++	 *
++	 *   B) (0 - value1) + (value2 - (-period));
++	 *      when one overflow happened in between,
++	 *
++	 *   C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
++	 *      when @n overflows happened in between.
++	 *
++	 * Here A) is the obvious difference, B) is the extension to the
++	 * discrete interval, where the first term is to the top of the
++	 * interval and the second term is from the bottom of the next
++	 * interval and C) the extension to multiple intervals, where the
++	 * middle term is the whole intervals covered.
++	 *
++	 * An equivalent of C, by reduction, is:
++	 *
++	 *   value2 - value1 + n * period
++	 */
++	new = ((s64)(new_raw_count << shift) >> shift);
++	old = ((s64)(prev_raw_count << shift) >> shift);
++	local64_add(new - old + count * period, &event->count);
++
++	perf_event_update_userpage(event);
++
++	return 0;
++}
++
+ static void __intel_pmu_pebs_event(struct perf_event *event,
+ 				   struct pt_regs *iregs,
+ 				   void *base, void *top,
+ 				   int bit, int count)
+ {
++	struct hw_perf_event *hwc = &event->hw;
+ 	struct perf_sample_data data;
+ 	struct pt_regs regs;
+ 	void *at = get_next_pebs_record_by_bit(base, top, bit);
+ 
+-	if (!intel_pmu_save_and_restart(event) &&
+-	    !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD))
++	if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
++		/*
++		 * Now, auto-reload is only enabled in fixed period mode.
++		 * The reload value is always hwc->sample_period.
++		 * May need to change it, if auto-reload is enabled in
++		 * freq mode later.
++		 */
++		intel_pmu_save_and_restart_reload(event, count);
++	} else if (!intel_pmu_save_and_restart(event))
+ 		return;
+ 
+ 	while (count > 1) {
+@@ -1377,8 +1444,11 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
+ 		return;
+ 
+ 	n = top - at;
+-	if (n <= 0)
++	if (n <= 0) {
++		if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
++			intel_pmu_save_and_restart_reload(event, 0);
+ 		return;
++	}
+ 
+ 	__intel_pmu_pebs_event(event, iregs, at, top, 0, n);
+ }
+@@ -1401,8 +1471,22 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
+ 
+ 	ds->pebs_index = ds->pebs_buffer_base;
+ 
+-	if (unlikely(base >= top))
++	if (unlikely(base >= top)) {
++		/*
++		 * The drain_pebs() could be called twice in a short period
++		 * for auto-reload event in pmu::read(). There are no
++		 * overflows have happened in between.
++		 * It needs to call intel_pmu_save_and_restart_reload() to
++		 * update the event->count for this case.
++		 */
++		for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
++				 x86_pmu.max_pebs_events) {
++			event = cpuc->events[bit];
++			if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
++				intel_pmu_save_and_restart_reload(event, 0);
++		}
+ 		return;
++	}
+ 
+ 	for (at = base; at < top; at += x86_pmu.pebs_record_size) {
+ 		struct pebs_record_nhm *p = at;
+diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
+index 39cd0615f04f..5e2ef399ac86 100644
+--- a/arch/x86/events/perf_event.h
++++ b/arch/x86/events/perf_event.h
+@@ -557,7 +557,7 @@ struct x86_pmu {
+ 	struct x86_pmu_quirk *quirks;
+ 	int		perfctr_second_write;
+ 	bool		late_ack;
+-	unsigned	(*limit_period)(struct perf_event *event, unsigned l);
++	u64		(*limit_period)(struct perf_event *event, u64 l);
+ 
+ 	/*
+ 	 * sysfs attrs
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index 84137c22fdfa..6690cd3fc8b1 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -131,7 +131,12 @@ static inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
+ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
+ {
+ 	VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE);
+-	VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID));
++	/*
++	 * Use boot_cpu_has() instead of this_cpu_has() as this function
++	 * might be called during early boot. This should work even after
++	 * boot because all CPU's the have same capabilities:
++	 */
++	VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID));
+ 	return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH;
+ }
+ 
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index b203af0855b5..5071cc7972ea 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1570,7 +1570,7 @@ void setup_local_APIC(void)
+ 	 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
+ 	 */
+ 	value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
+-	if (!cpu && (pic_mode || !value)) {
++	if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
+ 		value = APIC_DM_EXTINT;
+ 		apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
+ 	} else {
+diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
+index 25de5f6ca997..5cd387fcc777 100644
+--- a/arch/x86/kernel/devicetree.c
++++ b/arch/x86/kernel/devicetree.c
+@@ -11,6 +11,7 @@
+ #include <linux/of_address.h>
+ #include <linux/of_platform.h>
+ #include <linux/of_irq.h>
++#include <linux/libfdt.h>
+ #include <linux/slab.h>
+ #include <linux/pci.h>
+ #include <linux/of_pci.h>
+@@ -194,19 +195,22 @@ static struct of_ioapic_type of_ioapic_type[] =
+ static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
+ 			      unsigned int nr_irqs, void *arg)
+ {
+-	struct of_phandle_args *irq_data = (void *)arg;
++	struct irq_fwspec *fwspec = (struct irq_fwspec *)arg;
+ 	struct of_ioapic_type *it;
+ 	struct irq_alloc_info tmp;
++	int type_index;
+ 
+-	if (WARN_ON(irq_data->args_count < 2))
++	if (WARN_ON(fwspec->param_count < 2))
+ 		return -EINVAL;
+-	if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type))
++
++	type_index = fwspec->param[1];
++	if (type_index >= ARRAY_SIZE(of_ioapic_type))
+ 		return -EINVAL;
+ 
+-	it = &of_ioapic_type[irq_data->args[1]];
++	it = &of_ioapic_type[type_index];
+ 	ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity);
+ 	tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain));
+-	tmp.ioapic_pin = irq_data->args[0];
++	tmp.ioapic_pin = fwspec->param[0];
+ 
+ 	return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp);
+ }
+@@ -270,14 +274,15 @@ static void __init x86_flattree_get_config(void)
+ 
+ 	map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128);
+ 
+-	initial_boot_params = dt = early_memremap(initial_dtb, map_len);
+-	size = of_get_flat_dt_size();
++	dt = early_memremap(initial_dtb, map_len);
++	size = fdt_totalsize(dt);
+ 	if (map_len < size) {
+ 		early_memunmap(dt, map_len);
+-		initial_boot_params = dt = early_memremap(initial_dtb, size);
++		dt = early_memremap(initial_dtb, size);
+ 		map_len = size;
+ 	}
+ 
++	early_init_dt_verify(dt);
+ 	unflatten_and_copy_device_tree();
+ 	early_memunmap(dt, map_len);
+ }
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 3f400004f602..752f361ef453 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -402,8 +402,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 
+ 	/* cpuid 7.0.edx*/
+ 	const u32 kvm_cpuid_7_0_edx_x86_features =
+-		F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
+-		F(ARCH_CAPABILITIES);
++		F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
++		F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
+ 
+ 	/* all calls to cpuid_count() should be made on the same cpu */
+ 	get_cpu();
+@@ -490,6 +490,11 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 				entry->ecx &= ~F(PKU);
+ 			entry->edx &= kvm_cpuid_7_0_edx_x86_features;
+ 			cpuid_mask(&entry->edx, CPUID_7_EDX);
++			/*
++			 * We emulate ARCH_CAPABILITIES in software even
++			 * if the host doesn't support it.
++			 */
++			entry->edx |= F(ARCH_CAPABILITIES);
+ 		} else {
+ 			entry->ebx = 0;
+ 			entry->ecx = 0;
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 7cf470a3755f..3773c4625114 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -321,8 +321,16 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
+ 	if (!lapic_in_kernel(vcpu))
+ 		return;
+ 
++	/*
++	 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
++	 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
++	 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
++	 * version first and level-triggered interrupts never get EOIed in
++	 * IOAPIC.
++	 */
+ 	feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
+-	if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
++	if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
++	    !ioapic_in_kernel(vcpu->kvm))
+ 		v |= APIC_LVR_DIRECTED_EOI;
+ 	kvm_lapic_set_reg(apic, APIC_LVR, v);
+ }
+@@ -1514,11 +1522,23 @@ static bool set_target_expiration(struct kvm_lapic *apic)
+ 
+ static void advance_periodic_target_expiration(struct kvm_lapic *apic)
+ {
+-	apic->lapic_timer.tscdeadline +=
+-		nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
++	ktime_t now = ktime_get();
++	u64 tscl = rdtsc();
++	ktime_t delta;
++
++	/*
++	 * Synchronize both deadlines to the same time source or
++	 * differences in the periods (caused by differences in the
++	 * underlying clocks or numerical approximation errors) will
++	 * cause the two to drift apart over time as the errors
++	 * accumulate.
++	 */
+ 	apic->lapic_timer.target_expiration =
+ 		ktime_add_ns(apic->lapic_timer.target_expiration,
+ 				apic->lapic_timer.period);
++	delta = ktime_sub(apic->lapic_timer.target_expiration, now);
++	apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
++		nsec_to_cycles(apic->vcpu, delta);
+ }
+ 
+ static void start_sw_period(struct kvm_lapic *apic)
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 3deb153bf9d9..11e2147c3824 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2561,6 +2561,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
+ 		return;
+ 	}
+ 
++	WARN_ON_ONCE(vmx->emulation_required);
++
+ 	if (kvm_exception_is_soft(nr)) {
+ 		vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+ 			     vmx->vcpu.arch.event_exit_inst_len);
+@@ -6854,12 +6856,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
+ 			goto out;
+ 		}
+ 
+-		if (err != EMULATE_DONE) {
+-			vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+-			vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+-			vcpu->run->internal.ndata = 0;
+-			return 0;
+-		}
++		if (err != EMULATE_DONE)
++			goto emulation_error;
++
++		if (vmx->emulation_required && !vmx->rmode.vm86_active &&
++		    vcpu->arch.exception.pending)
++			goto emulation_error;
+ 
+ 		if (vcpu->arch.halt_request) {
+ 			vcpu->arch.halt_request = 0;
+@@ -6875,6 +6877,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
+ 
+ out:
+ 	return ret;
++
++emulation_error:
++	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
++	vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
++	vcpu->run->internal.ndata = 0;
++	return 0;
+ }
+ 
+ static int __grow_ple_window(int val)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index f3df3a934733..999560ff12b5 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7777,6 +7777,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ {
+ 	struct msr_data apic_base_msr;
+ 	int mmu_reset_needed = 0;
++	int cpuid_update_needed = 0;
+ 	int pending_vec, max_bits, idx;
+ 	struct desc_ptr dt;
+ 	int ret = -EINVAL;
+@@ -7817,8 +7818,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ 	vcpu->arch.cr0 = sregs->cr0;
+ 
+ 	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
++	cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
++				(X86_CR4_OSXSAVE | X86_CR4_PKE));
+ 	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
+-	if (sregs->cr4 & (X86_CR4_OSXSAVE | X86_CR4_PKE))
++	if (cpuid_update_needed)
+ 		kvm_update_cpuid(vcpu);
+ 
+ 	idx = srcu_read_lock(&vcpu->kvm->srcu);
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index 85cf12219dea..94c41044a578 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -298,9 +298,11 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
+ 
+ 	/*
+ 	 * The .rodata section needs to be read-only. Using the pfn
+-	 * catches all aliases.
++	 * catches all aliases.  This also includes __ro_after_init,
++	 * so do not enforce until kernel_set_to_readonly is true.
+ 	 */
+-	if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
++	if (kernel_set_to_readonly &&
++	    within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
+ 		   __pa_symbol(__end_rodata) >> PAGE_SHIFT))
+ 		pgprot_val(forbidden) |= _PAGE_RW;
+ 
+diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
+index 34cda7e0551b..c03c85e4fb6a 100644
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/mm.h>
+ #include <linux/gfp.h>
++#include <linux/hugetlb.h>
+ #include <asm/pgalloc.h>
+ #include <asm/pgtable.h>
+ #include <asm/tlb.h>
+@@ -636,6 +637,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
+ 	    (mtrr != MTRR_TYPE_WRBACK))
+ 		return 0;
+ 
++	/* Bail out if we are we on a populated non-leaf entry: */
++	if (pud_present(*pud) && !pud_huge(*pud))
++		return 0;
++
+ 	prot = pgprot_4k_2_large(prot);
+ 
+ 	set_pte((pte_t *)pud, pfn_pte(
+@@ -664,6 +669,10 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
+ 		return 0;
+ 	}
+ 
++	/* Bail out if we are we on a populated non-leaf entry: */
++	if (pmd_present(*pmd) && !pmd_huge(*pmd))
++		return 0;
++
+ 	prot = pgprot_4k_2_large(prot);
+ 
+ 	set_pte((pte_t *)pmd, pfn_pte(
+diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
+index 754431031282..552c1f725b6c 100644
+--- a/drivers/acpi/acpi_pad.c
++++ b/drivers/acpi/acpi_pad.c
+@@ -110,6 +110,7 @@ static void round_robin_cpu(unsigned int tsk_index)
+ 		cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
+ 	if (cpumask_empty(tmp)) {
+ 		mutex_unlock(&round_robin_lock);
++		free_cpumask_var(tmp);
+ 		return;
+ 	}
+ 	for_each_cpu(cpu, tmp) {
+@@ -127,6 +128,8 @@ static void round_robin_cpu(unsigned int tsk_index)
+ 	mutex_unlock(&round_robin_lock);
+ 
+ 	set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
++
++	free_cpumask_var(tmp);
+ }
+ 
+ static void exit_round_robin(unsigned int tsk_index)
+diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
+index 4b2b0b44a16b..a65c186114eb 100644
+--- a/drivers/acpi/acpica/evevent.c
++++ b/drivers/acpi/acpica/evevent.c
+@@ -204,6 +204,7 @@ u32 acpi_ev_fixed_event_detect(void)
+ 	u32 fixed_status;
+ 	u32 fixed_enable;
+ 	u32 i;
++	acpi_status status;
+ 
+ 	ACPI_FUNCTION_NAME(ev_fixed_event_detect);
+ 
+@@ -211,8 +212,12 @@ u32 acpi_ev_fixed_event_detect(void)
+ 	 * Read the fixed feature status and enable registers, as all the cases
+ 	 * depend on their values. Ignore errors here.
+ 	 */
+-	(void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
+-	(void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
++	status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
++	status |=
++	    acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
++	if (ACPI_FAILURE(status)) {
++		return (int_status);
++	}
+ 
+ 	ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
+ 			  "Fixed Event Block: Enable %08X Status %08X\n",
+diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
+index c2d883b8c45e..a18e61081013 100644
+--- a/drivers/acpi/acpica/nseval.c
++++ b/drivers/acpi/acpica/nseval.c
+@@ -308,6 +308,14 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
+ 		/* Map AE_CTRL_RETURN_VALUE to AE_OK, we are done with it */
+ 
+ 		status = AE_OK;
++	} else if (ACPI_FAILURE(status)) {
++
++		/* If return_object exists, delete it */
++
++		if (info->return_object) {
++			acpi_ut_remove_reference(info->return_object);
++			info->return_object = NULL;
++		}
+ 	}
+ 
+ 	ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
+index dbc51bc5fdd6..5ca895da3b10 100644
+--- a/drivers/acpi/acpica/psargs.c
++++ b/drivers/acpi/acpica/psargs.c
+@@ -890,6 +890,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
+ 						      ACPI_POSSIBLE_METHOD_CALL);
+ 
+ 			if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) {
++
++				/* Free method call op and corresponding namestring sub-ob */
++
++				acpi_ps_free_op(arg->common.value.arg);
+ 				acpi_ps_free_op(arg);
+ 				arg = NULL;
+ 				walk_state->arg_count = 1;
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 1ff17799769d..1d396b6e6000 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -334,6 +334,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
++	{ PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_mobile }, /* Cannon Lake PCH-LP AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 0df21f046fc6..d4fb9e0c29ee 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4493,6 +4493,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
+ 	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ, },
+ 
++	/* Some Sandisk SSDs lock up hard with NCQ enabled.  Reported on
++	   SD7SN6S256G and SD8SN8U256G */
++	{ "SanDisk SD[78]SN*G",	NULL,		ATA_HORKAGE_NONCQ, },
++
+ 	/* devices which puke on READ_NATIVE_MAX */
+ 	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA, },
+ 	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
+@@ -4553,6 +4557,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "SanDisk SD7UB3Q*G1001",	NULL,	ATA_HORKAGE_NOLPM, },
+ 
+ 	/* devices that don't properly handle queued TRIM commands */
++	{ "Micron_M500IT_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
++						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
+index 7dd36ace6152..103b5a38ee38 100644
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -524,7 +524,7 @@ static int fw_add_devm_name(struct device *dev, const char *name)
+ 
+ 	fwn = fw_find_devm_name(dev, name);
+ 	if (fwn)
+-		return 1;
++		return 0;
+ 
+ 	fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
+ 			   GFP_KERNEL);
+@@ -552,6 +552,7 @@ static int assign_fw(struct firmware *fw, struct device *device,
+ 		     unsigned int opt_flags)
+ {
+ 	struct fw_priv *fw_priv = fw->priv;
++	int ret;
+ 
+ 	mutex_lock(&fw_lock);
+ 	if (!fw_priv->size || fw_state_is_aborted(fw_priv)) {
+@@ -568,8 +569,13 @@ static int assign_fw(struct firmware *fw, struct device *device,
+ 	 */
+ 	/* don't cache firmware handled without uevent */
+ 	if (device && (opt_flags & FW_OPT_UEVENT) &&
+-	    !(opt_flags & FW_OPT_NOCACHE))
+-		fw_add_devm_name(device, fw_priv->fw_name);
++	    !(opt_flags & FW_OPT_NOCACHE)) {
++		ret = fw_add_devm_name(device, fw_priv->fw_name);
++		if (ret) {
++			mutex_unlock(&fw_lock);
++			return ret;
++		}
++	}
+ 
+ 	/*
+ 	 * After caching firmware image is started, let it piggyback
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 02a497e7c785..e5e067091572 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -1923,10 +1923,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
+ 
+ 	dev->power.wakeup_path = false;
+ 
+-	if (dev->power.no_pm_callbacks) {
+-		ret = 1;	/* Let device go direct_complete */
++	if (dev->power.no_pm_callbacks)
+ 		goto unlock;
+-	}
+ 
+ 	if (dev->pm_domain)
+ 		callback = dev->pm_domain->ops.prepare;
+@@ -1960,7 +1958,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
+ 	 */
+ 	spin_lock_irq(&dev->power.lock);
+ 	dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
+-		pm_runtime_suspended(dev) && ret > 0 &&
++		((pm_runtime_suspended(dev) && ret > 0) ||
++		 dev->power.no_pm_callbacks) &&
+ 		!dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
+ 	spin_unlock_irq(&dev->power.lock);
+ 	return 0;
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 453116fd4362..c7b7c5fa73ab 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -99,7 +99,7 @@ bool regmap_cached(struct regmap *map, unsigned int reg)
+ 	int ret;
+ 	unsigned int val;
+ 
+-	if (map->cache == REGCACHE_NONE)
++	if (map->cache_type == REGCACHE_NONE)
+ 		return false;
+ 
+ 	if (!map->cache_ops)
+diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
+index f040aba48d50..27e9686b6d3a 100644
+--- a/drivers/bcma/driver_mips.c
++++ b/drivers/bcma/driver_mips.c
+@@ -184,7 +184,7 @@ static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
+ {
+ 	int i;
+ 	static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
+-	char interrupts[20];
++	char interrupts[25];
+ 	char *ints = interrupts;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(irq_name); i++)
+diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
+index 287a09611c0f..763f06603131 100644
+--- a/drivers/block/null_blk.c
++++ b/drivers/block/null_blk.c
+@@ -72,6 +72,7 @@ enum nullb_device_flags {
+ 	NULLB_DEV_FL_CACHE	= 3,
+ };
+ 
++#define MAP_SZ		((PAGE_SIZE >> SECTOR_SHIFT) + 2)
+ /*
+  * nullb_page is a page in memory for nullb devices.
+  *
+@@ -86,10 +87,10 @@ enum nullb_device_flags {
+  */
+ struct nullb_page {
+ 	struct page *page;
+-	unsigned long bitmap;
++	DECLARE_BITMAP(bitmap, MAP_SZ);
+ };
+-#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1)
+-#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2)
++#define NULLB_PAGE_LOCK (MAP_SZ - 1)
++#define NULLB_PAGE_FREE (MAP_SZ - 2)
+ 
+ struct nullb_device {
+ 	struct nullb *nullb;
+@@ -728,7 +729,7 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
+ 	if (!t_page->page)
+ 		goto out_freepage;
+ 
+-	t_page->bitmap = 0;
++	memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
+ 	return t_page;
+ out_freepage:
+ 	kfree(t_page);
+@@ -738,13 +739,20 @@ static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
+ 
+ static void null_free_page(struct nullb_page *t_page)
+ {
+-	__set_bit(NULLB_PAGE_FREE, &t_page->bitmap);
+-	if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap))
++	__set_bit(NULLB_PAGE_FREE, t_page->bitmap);
++	if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
+ 		return;
+ 	__free_page(t_page->page);
+ 	kfree(t_page);
+ }
+ 
++static bool null_page_empty(struct nullb_page *page)
++{
++	int size = MAP_SZ - 2;
++
++	return find_first_bit(page->bitmap, size) == size;
++}
++
+ static void null_free_sector(struct nullb *nullb, sector_t sector,
+ 	bool is_cache)
+ {
+@@ -759,9 +767,9 @@ static void null_free_sector(struct nullb *nullb, sector_t sector,
+ 
+ 	t_page = radix_tree_lookup(root, idx);
+ 	if (t_page) {
+-		__clear_bit(sector_bit, &t_page->bitmap);
++		__clear_bit(sector_bit, t_page->bitmap);
+ 
+-		if (!t_page->bitmap) {
++		if (null_page_empty(t_page)) {
+ 			ret = radix_tree_delete_item(root, idx, t_page);
+ 			WARN_ON(ret != t_page);
+ 			null_free_page(ret);
+@@ -832,7 +840,7 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
+ 	t_page = radix_tree_lookup(root, idx);
+ 	WARN_ON(t_page && t_page->page->index != idx);
+ 
+-	if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap)))
++	if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
+ 		return t_page;
+ 
+ 	return NULL;
+@@ -895,10 +903,10 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
+ 
+ 	t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
+ 
+-	__clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap);
+-	if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) {
++	__clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
++	if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
+ 		null_free_page(c_page);
+-		if (t_page && t_page->bitmap == 0) {
++		if (t_page && null_page_empty(t_page)) {
+ 			ret = radix_tree_delete_item(&nullb->dev->data,
+ 				idx, t_page);
+ 			null_free_page(t_page);
+@@ -914,11 +922,11 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
+ 
+ 	for (i = 0; i < PAGE_SECTORS;
+ 			i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
+-		if (test_bit(i, &c_page->bitmap)) {
++		if (test_bit(i, c_page->bitmap)) {
+ 			offset = (i << SECTOR_SHIFT);
+ 			memcpy(dst + offset, src + offset,
+ 				nullb->dev->blocksize);
+-			__set_bit(i, &t_page->bitmap);
++			__set_bit(i, t_page->bitmap);
+ 		}
+ 	}
+ 
+@@ -955,10 +963,10 @@ static int null_make_cache_space(struct nullb *nullb, unsigned long n)
+ 		 * We found the page which is being flushed to disk by other
+ 		 * threads
+ 		 */
+-		if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap))
++		if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
+ 			c_pages[i] = NULL;
+ 		else
+-			__set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap);
++			__set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
+ 	}
+ 
+ 	one_round = 0;
+@@ -1011,7 +1019,7 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source,
+ 		kunmap_atomic(dst);
+ 		kunmap_atomic(src);
+ 
+-		__set_bit(sector & SECTOR_MASK, &t_page->bitmap);
++		__set_bit(sector & SECTOR_MASK, t_page->bitmap);
+ 
+ 		if (is_fua)
+ 			null_free_sector(nullb, sector, true);
+@@ -1802,10 +1810,6 @@ static int __init null_init(void)
+ 	struct nullb *nullb;
+ 	struct nullb_device *dev;
+ 
+-	/* check for nullb_page.bitmap */
+-	if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT))
+-		return -EINVAL;
+-
+ 	if (g_bs > PAGE_SIZE) {
+ 		pr_warn("null_blk: invalid block size\n");
+ 		pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
+diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
+index 7b8c6368beb7..a026211afb51 100644
+--- a/drivers/block/paride/pcd.c
++++ b/drivers/block/paride/pcd.c
+@@ -230,6 +230,8 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode)
+ 	struct pcd_unit *cd = bdev->bd_disk->private_data;
+ 	int ret;
+ 
++	check_disk_change(bdev);
++
+ 	mutex_lock(&pcd_mutex);
+ 	ret = cdrom_open(&cd->info, bdev, mode);
+ 	mutex_unlock(&pcd_mutex);
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 5f7d86509f2f..bfc566d3f31a 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -1152,9 +1152,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
+ 
+ 	cd_dbg(CD_OPEN, "entering cdrom_open\n");
+ 
+-	/* open is event synchronization point, check events first */
+-	check_disk_change(bdev);
+-
+ 	/* if this was a O_NONBLOCK open and we should honor the flags,
+ 	 * do a quick open without drive/disc integrity checks. */
+ 	cdi->use_count++;
+diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
+index 6495b03f576c..ae3a7537cf0f 100644
+--- a/drivers/cdrom/gdrom.c
++++ b/drivers/cdrom/gdrom.c
+@@ -497,6 +497,9 @@ static const struct cdrom_device_ops gdrom_ops = {
+ static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
+ {
+ 	int ret;
++
++	check_disk_change(bdev);
++
+ 	mutex_lock(&gdrom_mutex);
+ 	ret = cdrom_open(gd.cd_info, bdev, mode);
+ 	mutex_unlock(&gdrom_mutex);
+diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
+index 7a84cec30c3a..6767d965c36c 100644
+--- a/drivers/char/hw_random/bcm2835-rng.c
++++ b/drivers/char/hw_random/bcm2835-rng.c
+@@ -163,6 +163,8 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
+ 
+ 	/* Clock is optional on most platforms */
+ 	priv->clk = devm_clk_get(dev, NULL);
++	if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
++		return -EPROBE_DEFER;
+ 
+ 	priv->rng.name = pdev->name;
+ 	priv->rng.init = bcm2835_rng_init;
+diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
+index 63d84e6f1891..83c695938a2d 100644
+--- a/drivers/char/hw_random/stm32-rng.c
++++ b/drivers/char/hw_random/stm32-rng.c
+@@ -21,6 +21,7 @@
+ #include <linux/of_address.h>
+ #include <linux/of_platform.h>
+ #include <linux/pm_runtime.h>
++#include <linux/reset.h>
+ #include <linux/slab.h>
+ 
+ #define RNG_CR 0x00
+@@ -46,6 +47,7 @@ struct stm32_rng_private {
+ 	struct hwrng rng;
+ 	void __iomem *base;
+ 	struct clk *clk;
++	struct reset_control *rst;
+ };
+ 
+ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+@@ -140,6 +142,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
+ 	if (IS_ERR(priv->clk))
+ 		return PTR_ERR(priv->clk);
+ 
++	priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
++	if (!IS_ERR(priv->rst)) {
++		reset_control_assert(priv->rst);
++		udelay(2);
++		reset_control_deassert(priv->rst);
++	}
++
+ 	dev_set_drvdata(dev, priv);
+ 
+ 	priv->rng.name = dev_driver_string(dev),
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index f929e72bdac8..16d7fb563718 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -761,7 +761,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 			ssif_info->ssif_state = SSIF_NORMAL;
+ 			ipmi_ssif_unlock_cond(ssif_info, flags);
+ 			pr_warn(PFX "Error getting flags: %d %d, %x\n",
+-			       result, len, data[2]);
++			       result, len, (len >= 3) ? data[2] : 0);
+ 		} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ 			   || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
+ 			/*
+@@ -783,7 +783,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 		if ((result < 0) || (len < 3) || (data[2] != 0)) {
+ 			/* Error clearing flags */
+ 			pr_warn(PFX "Error clearing flags: %d %d, %x\n",
+-			       result, len, data[2]);
++			       result, len, (len >= 3) ? data[2] : 0);
+ 		} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ 			   || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
+ 			pr_warn(PFX "Invalid response clearing flags: %x %x\n",
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index dcb1cb9a4572..8b432d6e846d 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -167,9 +167,19 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 		NSEC_PER_USEC;
+ 	policy->shared_type = cpu->shared_type;
+ 
+-	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
++	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
++		int i;
++
+ 		cpumask_copy(policy->cpus, cpu->shared_cpu_map);
+-	else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
++
++		for_each_cpu(i, policy->cpus) {
++			if (unlikely(i == policy->cpu))
++				continue;
++
++			memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
++			       sizeof(cpu->perf_caps));
++		}
++	} else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
+ 		/* Support only SW_ANY for now. */
+ 		pr_debug("Unsupported CPU co-ord type\n");
+ 		return -EFAULT;
+@@ -233,8 +243,13 @@ static int __init cppc_cpufreq_init(void)
+ 	return ret;
+ 
+ out:
+-	for_each_possible_cpu(i)
+-		kfree(all_cpu_data[i]);
++	for_each_possible_cpu(i) {
++		cpu = all_cpu_data[i];
++		if (!cpu)
++			break;
++		free_cpumask_var(cpu->shared_cpu_map);
++		kfree(cpu);
++	}
+ 
+ 	kfree(all_cpu_data);
+ 	return -ENODEV;
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index de33ebf008ad..8814c572e263 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -1327,14 +1327,14 @@ static int cpufreq_online(unsigned int cpu)
+ 	return 0;
+ 
+ out_exit_policy:
++	for_each_cpu(j, policy->real_cpus)
++		remove_cpu_dev_symlink(policy, get_cpu_device(j));
++
+ 	up_write(&policy->rwsem);
+ 
+ 	if (cpufreq_driver->exit)
+ 		cpufreq_driver->exit(policy);
+ 
+-	for_each_cpu(j, policy->real_cpus)
+-		remove_cpu_dev_symlink(policy, get_cpu_device(j));
+-
+ out_free_policy:
+ 	cpufreq_policy_free(policy);
+ 	return ret;
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index d7327fd5f445..de1fd59fe136 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -1510,7 +1510,7 @@ static void pl330_dotask(unsigned long data)
+ /* Returns 1 if state was updated, 0 otherwise */
+ static int pl330_update(struct pl330_dmac *pl330)
+ {
+-	struct dma_pl330_desc *descdone, *tmp;
++	struct dma_pl330_desc *descdone;
+ 	unsigned long flags;
+ 	void __iomem *regs;
+ 	u32 val;
+@@ -1588,7 +1588,9 @@ static int pl330_update(struct pl330_dmac *pl330)
+ 	}
+ 
+ 	/* Now that we are in no hurry, do the callbacks */
+-	list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
++	while (!list_empty(&pl330->req_done)) {
++		descdone = list_first_entry(&pl330->req_done,
++					    struct dma_pl330_desc, rqd);
+ 		list_del(&descdone->rqd);
+ 		spin_unlock_irqrestore(&pl330->lock, flags);
+ 		dma_pl330_rqcb(descdone, PL330_ERR_NONE);
+diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
+index d076940e0c69..4cc58904ee52 100644
+--- a/drivers/dma/qcom/bam_dma.c
++++ b/drivers/dma/qcom/bam_dma.c
+@@ -393,6 +393,7 @@ struct bam_device {
+ 	struct device_dma_parameters dma_parms;
+ 	struct bam_chan *channels;
+ 	u32 num_channels;
++	u32 num_ees;
+ 
+ 	/* execution environment ID, from DT */
+ 	u32 ee;
+@@ -1128,15 +1129,19 @@ static int bam_init(struct bam_device *bdev)
+ 	u32 val;
+ 
+ 	/* read revision and configuration information */
+-	val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT;
+-	val &= NUM_EES_MASK;
++	if (!bdev->num_ees) {
++		val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
++		bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
++	}
+ 
+ 	/* check that configured EE is within range */
+-	if (bdev->ee >= val)
++	if (bdev->ee >= bdev->num_ees)
+ 		return -EINVAL;
+ 
+-	val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
+-	bdev->num_channels = val & BAM_NUM_PIPES_MASK;
++	if (!bdev->num_channels) {
++		val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
++		bdev->num_channels = val & BAM_NUM_PIPES_MASK;
++	}
+ 
+ 	if (bdev->controlled_remotely)
+ 		return 0;
+@@ -1232,6 +1237,18 @@ static int bam_dma_probe(struct platform_device *pdev)
+ 	bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
+ 						"qcom,controlled-remotely");
+ 
++	if (bdev->controlled_remotely) {
++		ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
++					   &bdev->num_channels);
++		if (ret)
++			dev_err(bdev->dev, "num-channels unspecified in dt\n");
++
++		ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
++					   &bdev->num_ees);
++		if (ret)
++			dev_err(bdev->dev, "num-ees unspecified in dt\n");
++	}
++
+ 	bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
+ 	if (IS_ERR(bdev->bamclk))
+ 		return PTR_ERR(bdev->bamclk);
+diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
+index d0cacdb0713e..2a2ccd9c78e4 100644
+--- a/drivers/dma/sh/rcar-dmac.c
++++ b/drivers/dma/sh/rcar-dmac.c
+@@ -1301,8 +1301,17 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
+ 	 * If the cookie doesn't correspond to the currently running transfer
+ 	 * then the descriptor hasn't been processed yet, and the residue is
+ 	 * equal to the full descriptor size.
++	 * Also, a client driver is possible to call this function before
++	 * rcar_dmac_isr_channel_thread() runs. In this case, the "desc.running"
++	 * will be the next descriptor, and the done list will appear. So, if
++	 * the argument cookie matches the done list's cookie, we can assume
++	 * the residue is zero.
+ 	 */
+ 	if (cookie != desc->async_tx.cookie) {
++		list_for_each_entry(desc, &chan->desc.done, node) {
++			if (cookie == desc->async_tx.cookie)
++				return 0;
++		}
+ 		list_for_each_entry(desc, &chan->desc.pending, node) {
+ 			if (cookie == desc->async_tx.cookie)
+ 				return desc->size;
+@@ -1677,8 +1686,8 @@ static const struct dev_pm_ops rcar_dmac_pm = {
+ 	 *   - Wait for the current transfer to complete and stop the device,
+ 	 *   - Resume transfers, if any.
+ 	 */
+-	SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+-				     pm_runtime_force_resume)
++	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
++				      pm_runtime_force_resume)
+ 	SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
+ 			   NULL)
+ };
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index e763e1484331..c3be8ef9243f 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -186,7 +186,7 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
+ 	char *s;
+ 	int is_ff = 1, is_00 = 1, i;
+ 
+-	if (dmi_ident[slot] || dm->length <= index + 16)
++	if (dmi_ident[slot] || dm->length < index + 16)
+ 		return;
+ 
+ 	d = (u8 *) dm + index;
+diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
+index 1cc41c3d6315..86a1ad17a32e 100644
+--- a/drivers/firmware/efi/arm-runtime.c
++++ b/drivers/firmware/efi/arm-runtime.c
+@@ -54,6 +54,9 @@ static struct ptdump_info efi_ptdump_info = {
+ 
+ static int __init ptdump_init(void)
+ {
++	if (!efi_enabled(EFI_RUNTIME_SERVICES))
++		return 0;
++
+ 	return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables");
+ }
+ device_initcall(ptdump_init);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+index 2a519f9062ee..e515ca01ffb2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+@@ -26,6 +26,7 @@
+ #define AMDGPU_AMDKFD_H_INCLUDED
+ 
+ #include <linux/types.h>
++#include <linux/mm.h>
+ #include <linux/mmu_context.h>
+ #include <kgd_kfd_interface.h>
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index a162d87ca0c8..b552a9416e92 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -321,14 +321,45 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
+ {
+ 	unsigned i;
+ 	int r, ret = 0;
++	long tmo_gfx, tmo_mm;
++
++	tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
++	if (amdgpu_sriov_vf(adev)) {
++		/* for MM engines in hypervisor side they are not scheduled together
++		 * with CP and SDMA engines, so even in exclusive mode MM engine could
++		 * still running on other VF thus the IB TEST TIMEOUT for MM engines
++		 * under SR-IOV should be set to a long time. 8 sec should be enough
++		 * for the MM comes back to this VF.
++		 */
++		tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
++	}
++
++	if (amdgpu_sriov_runtime(adev)) {
++		/* for CP & SDMA engines since they are scheduled together so
++		 * need to make the timeout width enough to cover the time
++		 * cost waiting for it coming back under RUNTIME only
++		*/
++		tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
++	}
+ 
+ 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ 		struct amdgpu_ring *ring = adev->rings[i];
++		long tmo;
+ 
+ 		if (!ring || !ring->ready)
+ 			continue;
+ 
+-		r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT);
++		/* MM engine need more time */
++		if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
++			ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
++			ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
++			ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
++			ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
++			tmo = tmo_mm;
++		else
++			tmo = tmo_gfx;
++
++		r = amdgpu_ring_test_ib(ring, tmo);
+ 		if (r) {
+ 			ring->ready = false;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index c06479615e8a..d7bbccd67eb9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -2954,7 +2954,13 @@ static int gfx_v9_0_hw_fini(void *handle)
+ 		gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
+ 
+ 	if (amdgpu_sriov_vf(adev)) {
+-		pr_debug("For SRIOV client, shouldn't do anything.\n");
++		gfx_v9_0_cp_gfx_enable(adev, false);
++		/* must disable polling for SRIOV when hw finished, otherwise
++		 * CPC engine may still keep fetching WB address which is already
++		 * invalid after sw finished and trigger DMAR reading error in
++		 * hypervisor side.
++		 */
++		WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
+ 		return 0;
+ 	}
+ 	gfx_v9_0_cp_enable(adev, false);
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index fa63c564cf91..7657cc5784a5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -719,14 +719,17 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
+ 		WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
+ 		       upper_32_bits(wptr_gpu_addr));
+ 		wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
+-		if (ring->use_pollmem)
++		if (ring->use_pollmem) {
++			/*wptr polling is not enogh fast, directly clean the wptr register */
++			WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ 			wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ 						       SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ 						       ENABLE, 1);
+-		else
++		} else {
+ 			wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ 						       SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ 						       ENABLE, 0);
++		}
+ 		WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);
+ 
+ 		/* enable DMA RB */
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 4d07ffebfd31..6d1dd64f50c3 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -35,6 +35,7 @@
+ #include "core_types.h"
+ #include "set_mode_types.h"
+ #include "virtual/virtual_stream_encoder.h"
++#include "dpcd_defs.h"
+ 
+ #include "dce80/dce80_resource.h"
+ #include "dce100/dce100_resource.h"
+@@ -2428,7 +2429,8 @@ static void set_vsc_info_packet(
+ 	unsigned int vscPacketRevision = 0;
+ 	unsigned int i;
+ 
+-	if (stream->sink->link->psr_enabled) {
++	/*VSC packet set to 2 when DP revision >= 1.2*/
++	if (stream->sink->link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ 		vscPacketRevision = 2;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
+index b1ab4ab09532..60373d7eb220 100644
+--- a/drivers/gpu/drm/bridge/sii902x.c
++++ b/drivers/gpu/drm/bridge/sii902x.c
+@@ -137,7 +137,9 @@ static int sii902x_get_modes(struct drm_connector *connector)
+ 	struct sii902x *sii902x = connector_to_sii902x(connector);
+ 	struct regmap *regmap = sii902x->regmap;
+ 	u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
++	struct device *dev = &sii902x->i2c->dev;
+ 	unsigned long timeout;
++	unsigned int retries;
+ 	unsigned int status;
+ 	struct edid *edid;
+ 	int num = 0;
+@@ -159,7 +161,7 @@ static int sii902x_get_modes(struct drm_connector *connector)
+ 		 time_before(jiffies, timeout));
+ 
+ 	if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+-		dev_err(&sii902x->i2c->dev, "failed to acquire the i2c bus\n");
++		dev_err(dev, "failed to acquire the i2c bus\n");
+ 		return -ETIMEDOUT;
+ 	}
+ 
+@@ -179,9 +181,19 @@ static int sii902x_get_modes(struct drm_connector *connector)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
++	/*
++	 * Sometimes the I2C bus can stall after failure to use the
++	 * EDID channel. Retry a few times to see if things clear
++	 * up, else continue anyway.
++	 */
++	retries = 5;
++	do {
++		ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA,
++				  &status);
++		retries--;
++	} while (ret && retries);
+ 	if (ret)
+-		return ret;
++		dev_err(dev, "failed to read status (%d)\n", ret);
+ 
+ 	ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
+ 				 SII902X_SYS_CTRL_DDC_BUS_REQ |
+@@ -201,7 +213,7 @@ static int sii902x_get_modes(struct drm_connector *connector)
+ 
+ 	if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+ 		      SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+-		dev_err(&sii902x->i2c->dev, "failed to release the i2c bus\n");
++		dev_err(dev, "failed to release the i2c bus\n");
+ 		return -ETIMEDOUT;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
+index 32d9bcf5be7f..f0d3ed5f2528 100644
+--- a/drivers/gpu/drm/drm_vblank.c
++++ b/drivers/gpu/drm/drm_vblank.c
+@@ -271,7 +271,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
+ 	store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
+ }
+ 
+-static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
++static u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
+ {
+ 	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ 
+@@ -292,11 +292,11 @@ static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
+  * This is mostly useful for hardware that can obtain the scanout position, but
+  * doesn't have a hardware frame counter.
+  */
+-u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
++u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	unsigned int pipe = drm_crtc_index(crtc);
+-	u32 vblank;
++	u64 vblank;
+ 	unsigned long flags;
+ 
+ 	WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp,
+@@ -1055,7 +1055,7 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
+ {
+ 	struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ 	int ret;
+-	u32 last;
++	u64 last;
+ 
+ 	if (WARN_ON(pipe >= dev->num_crtcs))
+ 		return;
+diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
+index f9ad0e960263..2751b9107fc5 100644
+--- a/drivers/gpu/drm/meson/meson_drv.c
++++ b/drivers/gpu/drm/meson/meson_drv.c
+@@ -189,40 +189,51 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
+ 
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
+ 	regs = devm_ioremap_resource(dev, res);
+-	if (IS_ERR(regs))
+-		return PTR_ERR(regs);
++	if (IS_ERR(regs)) {
++		ret = PTR_ERR(regs);
++		goto free_drm;
++	}
+ 
+ 	priv->io_base = regs;
+ 
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
+ 	/* Simply ioremap since it may be a shared register zone */
+ 	regs = devm_ioremap(dev, res->start, resource_size(res));
+-	if (!regs)
+-		return -EADDRNOTAVAIL;
++	if (!regs) {
++		ret = -EADDRNOTAVAIL;
++		goto free_drm;
++	}
+ 
+ 	priv->hhi = devm_regmap_init_mmio(dev, regs,
+ 					  &meson_regmap_config);
+ 	if (IS_ERR(priv->hhi)) {
+ 		dev_err(&pdev->dev, "Couldn't create the HHI regmap\n");
+-		return PTR_ERR(priv->hhi);
++		ret = PTR_ERR(priv->hhi);
++		goto free_drm;
+ 	}
+ 
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
+ 	/* Simply ioremap since it may be a shared register zone */
+ 	regs = devm_ioremap(dev, res->start, resource_size(res));
+-	if (!regs)
+-		return -EADDRNOTAVAIL;
++	if (!regs) {
++		ret = -EADDRNOTAVAIL;
++		goto free_drm;
++	}
+ 
+ 	priv->dmc = devm_regmap_init_mmio(dev, regs,
+ 					  &meson_regmap_config);
+ 	if (IS_ERR(priv->dmc)) {
+ 		dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
+-		return PTR_ERR(priv->dmc);
++		ret = PTR_ERR(priv->dmc);
++		goto free_drm;
+ 	}
+ 
+ 	priv->vsync_irq = platform_get_irq(pdev, 0);
+ 
+-	drm_vblank_init(drm, 1);
++	ret = drm_vblank_init(drm, 1);
++	if (ret)
++		goto free_drm;
++
+ 	drm_mode_config_init(drm);
+ 	drm->mode_config.max_width = 3840;
+ 	drm->mode_config.max_height = 2160;
+diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
+index 04300b2da1b1..f2a38727fa85 100644
+--- a/drivers/gpu/drm/omapdrm/dss/dss.c
++++ b/drivers/gpu/drm/omapdrm/dss/dss.c
+@@ -1300,88 +1300,18 @@ static const struct soc_device_attribute dss_soc_devices[] = {
+ 
+ static int dss_bind(struct device *dev)
+ {
+-	struct platform_device *pdev = to_platform_device(dev);
+-	struct resource *dss_mem;
+-	u32 rev;
+ 	int r;
+ 
+-	dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
+-	dss.base = devm_ioremap_resource(&pdev->dev, dss_mem);
+-	if (IS_ERR(dss.base))
+-		return PTR_ERR(dss.base);
+-
+-	r = dss_get_clocks();
++	r = component_bind_all(dev, NULL);
+ 	if (r)
+ 		return r;
+ 
+-	r = dss_setup_default_clock();
+-	if (r)
+-		goto err_setup_clocks;
+-
+-	r = dss_video_pll_probe(pdev);
+-	if (r)
+-		goto err_pll_init;
+-
+-	r = dss_init_ports(pdev);
+-	if (r)
+-		goto err_init_ports;
+-
+-	pm_runtime_enable(&pdev->dev);
+-
+-	r = dss_runtime_get();
+-	if (r)
+-		goto err_runtime_get;
+-
+-	dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
+-
+-	/* Select DPLL */
+-	REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
+-
+-	dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
+-
+-#ifdef CONFIG_OMAP2_DSS_VENC
+-	REG_FLD_MOD(DSS_CONTROL, 1, 4, 4);	/* venc dac demen */
+-	REG_FLD_MOD(DSS_CONTROL, 1, 3, 3);	/* venc clock 4x enable */
+-	REG_FLD_MOD(DSS_CONTROL, 0, 2, 2);	/* venc clock mode = normal */
+-#endif
+-	dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK;
+-	dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK;
+-	dss.dispc_clk_source = DSS_CLK_SRC_FCK;
+-	dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
+-	dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
+-
+-	rev = dss_read_reg(DSS_REVISION);
+-	pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+-
+-	dss_runtime_put();
+-
+-	r = component_bind_all(&pdev->dev, NULL);
+-	if (r)
+-		goto err_component;
+-
+-	dss_debugfs_create_file("dss", dss_dump_regs);
+-
+ 	pm_set_vt_switch(0);
+ 
+ 	omapdss_gather_components(dev);
+ 	omapdss_set_is_initialized(true);
+ 
+ 	return 0;
+-
+-err_component:
+-err_runtime_get:
+-	pm_runtime_disable(&pdev->dev);
+-	dss_uninit_ports(pdev);
+-err_init_ports:
+-	if (dss.video1_pll)
+-		dss_video_pll_uninit(dss.video1_pll);
+-
+-	if (dss.video2_pll)
+-		dss_video_pll_uninit(dss.video2_pll);
+-err_pll_init:
+-err_setup_clocks:
+-	dss_put_clocks();
+-	return r;
+ }
+ 
+ static void dss_unbind(struct device *dev)
+@@ -1391,18 +1321,6 @@ static void dss_unbind(struct device *dev)
+ 	omapdss_set_is_initialized(false);
+ 
+ 	component_unbind_all(&pdev->dev, NULL);
+-
+-	if (dss.video1_pll)
+-		dss_video_pll_uninit(dss.video1_pll);
+-
+-	if (dss.video2_pll)
+-		dss_video_pll_uninit(dss.video2_pll);
+-
+-	dss_uninit_ports(pdev);
+-
+-	pm_runtime_disable(&pdev->dev);
+-
+-	dss_put_clocks();
+ }
+ 
+ static const struct component_master_ops dss_component_ops = {
+@@ -1434,10 +1352,46 @@ static int dss_add_child_component(struct device *dev, void *data)
+ 	return 0;
+ }
+ 
++static int dss_probe_hardware(void)
++{
++	u32 rev;
++	int r;
++
++	r = dss_runtime_get();
++	if (r)
++		return r;
++
++	dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
++
++	/* Select DPLL */
++	REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
++
++	dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
++
++#ifdef CONFIG_OMAP2_DSS_VENC
++	REG_FLD_MOD(DSS_CONTROL, 1, 4, 4);	/* venc dac demen */
++	REG_FLD_MOD(DSS_CONTROL, 1, 3, 3);	/* venc clock 4x enable */
++	REG_FLD_MOD(DSS_CONTROL, 0, 2, 2);	/* venc clock mode = normal */
++#endif
++	dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK;
++	dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK;
++	dss.dispc_clk_source = DSS_CLK_SRC_FCK;
++	dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
++	dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
++
++	rev = dss_read_reg(DSS_REVISION);
++	pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
++
++	dss_runtime_put();
++
++	return 0;
++}
++
+ static int dss_probe(struct platform_device *pdev)
+ {
+ 	const struct soc_device_attribute *soc;
+ 	struct component_match *match = NULL;
++	struct resource *dss_mem;
+ 	int r;
+ 
+ 	dss.pdev = pdev;
+@@ -1458,20 +1412,69 @@ static int dss_probe(struct platform_device *pdev)
+ 	else
+ 		dss.feat = of_match_device(dss_of_match, &pdev->dev)->data;
+ 
+-	r = dss_initialize_debugfs();
++	/* Map I/O registers, get and setup clocks. */
++	dss_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	dss.base = devm_ioremap_resource(&pdev->dev, dss_mem);
++	if (IS_ERR(dss.base))
++		return PTR_ERR(dss.base);
++
++	r = dss_get_clocks();
+ 	if (r)
+ 		return r;
+ 
+-	/* add all the child devices as components */
++	r = dss_setup_default_clock();
++	if (r)
++		goto err_put_clocks;
++
++	/* Setup the video PLLs and the DPI and SDI ports. */
++	r = dss_video_pll_probe(pdev);
++	if (r)
++		goto err_put_clocks;
++
++	r = dss_init_ports(pdev);
++	if (r)
++		goto err_uninit_plls;
++
++	/* Enable runtime PM and probe the hardware. */
++	pm_runtime_enable(&pdev->dev);
++
++	r = dss_probe_hardware();
++	if (r)
++		goto err_pm_runtime_disable;
++
++	/* Initialize debugfs. */
++	r = dss_initialize_debugfs();
++	if (r)
++		goto err_pm_runtime_disable;
++
++	dss_debugfs_create_file("dss", dss_dump_regs);
++
++	/* Add all the child devices as components. */
+ 	device_for_each_child(&pdev->dev, &match, dss_add_child_component);
+ 
+ 	r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
+-	if (r) {
+-		dss_uninitialize_debugfs();
+-		return r;
+-	}
++	if (r)
++		goto err_uninit_debugfs;
+ 
+ 	return 0;
++
++err_uninit_debugfs:
++	dss_uninitialize_debugfs();
++
++err_pm_runtime_disable:
++	pm_runtime_disable(&pdev->dev);
++	dss_uninit_ports(pdev);
++
++err_uninit_plls:
++	if (dss.video1_pll)
++		dss_video_pll_uninit(dss.video1_pll);
++	if (dss.video2_pll)
++		dss_video_pll_uninit(dss.video2_pll);
++
++err_put_clocks:
++	dss_put_clocks();
++
++	return r;
+ }
+ 
+ static int dss_remove(struct platform_device *pdev)
+@@ -1480,6 +1483,18 @@ static int dss_remove(struct platform_device *pdev)
+ 
+ 	dss_uninitialize_debugfs();
+ 
++	pm_runtime_disable(&pdev->dev);
++
++	dss_uninit_ports(pdev);
++
++	if (dss.video1_pll)
++		dss_video_pll_uninit(dss.video1_pll);
++
++	if (dss.video2_pll)
++		dss_video_pll_uninit(dss.video2_pll);
++
++	dss_put_clocks();
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 5591984a392b..f9649bded63f 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -1597,7 +1597,7 @@ static const struct panel_desc ontat_yx700wv03 = {
+ 		.width = 154,
+ 		.height = 83,
+ 	},
+-	.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
++	.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ };
+ 
+ static const struct drm_display_mode ortustech_com43h4m85ulc_mode  = {
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+index 12d22f3db1af..6a4b8c98a719 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+@@ -59,11 +59,8 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
+ 
+ 	rcar_lvds_write(lvds, LVDPLLCR, pllcr);
+ 
+-	/*
+-	 * Select the input, hardcode mode 0, enable LVDS operation and turn
+-	 * bias circuitry on.
+-	 */
+-	lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_BEN | LVDCR0_LVEN;
++	/* Select the input and set the LVDS mode. */
++	lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT;
+ 	if (rcrtc->index == 2)
+ 		lvdcr0 |= LVDCR0_DUSEL;
+ 	rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+@@ -74,6 +71,10 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
+ 			LVDCR1_CHSTBY_GEN2(1) | LVDCR1_CHSTBY_GEN2(0) |
+ 			LVDCR1_CLKSTBY_GEN2);
+ 
++	/* Enable LVDS operation and turn bias circuitry on. */
++	lvdcr0 |= LVDCR0_BEN | LVDCR0_LVEN;
++	rcar_lvds_write(lvds, LVDCR0, lvdcr0);
++
+ 	/*
+ 	 * Turn the PLL on, wait for the startup delay, and turn the output
+ 	 * on.
+@@ -95,7 +96,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
+ 	u32 lvdcr0;
+ 	u32 pllcr;
+ 
+-	/* PLL clock configuration */
++	/* Set the PLL clock configuration and LVDS mode. */
+ 	if (freq < 42000)
+ 		pllcr = LVDPLLCR_PLLDIVCNT_42M;
+ 	else if (freq < 85000)
+@@ -107,6 +108,9 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
+ 
+ 	rcar_lvds_write(lvds, LVDPLLCR, pllcr);
+ 
++	lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT;
++	rcar_lvds_write(lvds, LVDCR0, lvdcr0);
++
+ 	/* Turn all the channels on. */
+ 	rcar_lvds_write(lvds, LVDCR1,
+ 			LVDCR1_CHSTBY_GEN3(3) | LVDCR1_CHSTBY_GEN3(2) |
+@@ -117,7 +121,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
+ 	 * Turn the PLL on, set it to LVDS normal mode, wait for the startup
+ 	 * delay and turn the output on.
+ 	 */
+-	lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_PLLON;
++	lvdcr0 |= LVDCR0_PLLON;
+ 	rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+ 
+ 	lvdcr0 |= LVDCR0_PWD;
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+index 1d9655576b6e..6bf2f8289847 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+@@ -262,7 +262,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
+ 	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
+ 	 */
+ 	vma->vm_flags &= ~VM_PFNMAP;
+-	vma->vm_pgoff = 0;
+ 
+ 	if (rk_obj->pages)
+ 		ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
+@@ -297,6 +296,12 @@ int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	if (ret)
+ 		return ret;
+ 
++	/*
++	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
++	 * whole buffer from the start.
++	 */
++	vma->vm_pgoff = 0;
++
+ 	obj = vma->vm_private_data;
+ 
+ 	return rockchip_drm_gem_object_mmap(obj, vma);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+index 557a033fb610..8545488aa0cf 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+@@ -135,17 +135,24 @@
+ 
+ #else
+ 
+-/* In the 32-bit version of this macro, we use "m" because there is no
+- * more register left for bp
++/*
++ * In the 32-bit version of this macro, we store bp in a memory location
++ * because we've ran out of registers.
++ * Now we can't reference that memory location while we've modified
++ * %esp or %ebp, so we first push it on the stack, just before we push
++ * %ebp, and then when we need it we read it from the stack where we
++ * just pushed it.
+  */
+ #define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di,	\
+ 			port_num, magic, bp,		\
+ 			eax, ebx, ecx, edx, si, di)	\
+ ({							\
+-	asm volatile ("push %%ebp;"			\
+-		"mov %12, %%ebp;"			\
++	asm volatile ("push %12;"			\
++		"push %%ebp;"				\
++		"mov 0x04(%%esp), %%ebp;"		\
+ 		"rep outsb;"				\
+-		"pop %%ebp;" :				\
++		"pop %%ebp;"				\
++		"add $0x04, %%esp;" :			\
+ 		"=a"(eax),				\
+ 		"=b"(ebx),				\
+ 		"=c"(ecx),				\
+@@ -167,10 +174,12 @@
+ 		       port_num, magic, bp,		\
+ 		       eax, ebx, ecx, edx, si, di)	\
+ ({							\
+-	asm volatile ("push %%ebp;"			\
+-		"mov %12, %%ebp;"			\
++	asm volatile ("push %12;"			\
++		"push %%ebp;"				\
++		"mov 0x04(%%esp), %%ebp;"		\
+ 		"rep insb;"				\
+-		"pop %%ebp" :				\
++		"pop %%ebp;"				\
++		"add $0x04, %%esp;" :			\
+ 		"=a"(eax),				\
+ 		"=b"(ebx),				\
+ 		"=c"(ecx),				\
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+index 3ec9eae831b8..f9413c0199f0 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+@@ -453,7 +453,11 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
+ 				 struct drm_plane_state *old_state)
+ {
+ 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
++	struct drm_crtc *crtc = plane->state->crtc ?
++		plane->state->crtc : old_state->crtc;
+ 
++	if (vps->dmabuf)
++		vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false);
+ 	vmw_dmabuf_unreference(&vps->dmabuf);
+ 	vps->dmabuf_size = 0;
+ 
+@@ -491,10 +495,17 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
+ 	}
+ 
+ 	size = new_state->crtc_w * new_state->crtc_h * 4;
++	dev_priv = vmw_priv(crtc->dev);
+ 
+ 	if (vps->dmabuf) {
+-		if (vps->dmabuf_size == size)
+-			return 0;
++		if (vps->dmabuf_size == size) {
++			/*
++			 * Note that this might temporarily up the pin-count
++			 * to 2, until cleanup_fb() is called.
++			 */
++			return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf,
++						      true);
++		}
+ 
+ 		vmw_dmabuf_unreference(&vps->dmabuf);
+ 		vps->dmabuf_size = 0;
+@@ -504,7 +515,6 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
+ 	if (!vps->dmabuf)
+ 		return -ENOMEM;
+ 
+-	dev_priv = vmw_priv(crtc->dev);
+ 	vmw_svga_enable(dev_priv);
+ 
+ 	/* After we have alloced the backing store might not be able to
+@@ -515,13 +525,18 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
+ 			      &vmw_vram_ne_placement,
+ 			      false, &vmw_dmabuf_bo_free);
+ 	vmw_overlay_resume_all(dev_priv);
+-
+-	if (ret != 0)
++	if (ret) {
+ 		vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
+-	else
+-		vps->dmabuf_size = size;
++		return ret;
++	}
+ 
+-	return ret;
++	vps->dmabuf_size = size;
++
++	/*
++	 * TTM already thinks the buffer is pinned, but make sure the
++	 * pin_count is upped.
++	 */
++	return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true);
+ }
+ 
+ 
+diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
+index c219e43b8f02..f5f3f8cf57ea 100644
+--- a/drivers/hwmon/nct6775.c
++++ b/drivers/hwmon/nct6775.c
+@@ -1469,7 +1469,7 @@ static void nct6775_update_pwm(struct device *dev)
+ 		duty_is_dc = data->REG_PWM_MODE[i] &&
+ 		  (nct6775_read_value(data, data->REG_PWM_MODE[i])
+ 		   & data->PWM_MODE_MASK[i]);
+-		data->pwm_mode[i] = duty_is_dc;
++		data->pwm_mode[i] = !duty_is_dc;
+ 
+ 		fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]);
+ 		for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) {
+@@ -2350,7 +2350,7 @@ show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf)
+ 	struct nct6775_data *data = nct6775_update_device(dev);
+ 	struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+ 
+-	return sprintf(buf, "%d\n", !data->pwm_mode[sattr->index]);
++	return sprintf(buf, "%d\n", data->pwm_mode[sattr->index]);
+ }
+ 
+ static ssize_t
+@@ -2371,9 +2371,9 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
+ 	if (val > 1)
+ 		return -EINVAL;
+ 
+-	/* Setting DC mode is not supported for all chips/channels */
++	/* Setting DC mode (0) is not supported for all chips/channels */
+ 	if (data->REG_PWM_MODE[nr] == 0) {
+-		if (val)
++		if (!val)
+ 			return -EINVAL;
+ 		return count;
+ 	}
+@@ -2382,7 +2382,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
+ 	data->pwm_mode[nr] = val;
+ 	reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]);
+ 	reg &= ~data->PWM_MODE_MASK[nr];
+-	if (val)
++	if (!val)
+ 		reg |= data->PWM_MODE_MASK[nr];
+ 	nct6775_write_value(data, data->REG_PWM_MODE[nr], reg);
+ 	mutex_unlock(&data->update_lock);
+diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
+index 00d6995af4c2..8a44e94d5679 100644
+--- a/drivers/hwmon/pmbus/adm1275.c
++++ b/drivers/hwmon/pmbus/adm1275.c
+@@ -154,7 +154,7 @@ static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
+ 	const struct adm1275_data *data = to_adm1275_data(info);
+ 	int ret = 0;
+ 
+-	if (page)
++	if (page > 0)
+ 		return -ENXIO;
+ 
+ 	switch (reg) {
+@@ -240,7 +240,7 @@ static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
+ 	const struct adm1275_data *data = to_adm1275_data(info);
+ 	int ret;
+ 
+-	if (page)
++	if (page > 0)
+ 		return -ENXIO;
+ 
+ 	switch (reg) {
+diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
+index dd4883a19045..e951f9b87abb 100644
+--- a/drivers/hwmon/pmbus/max8688.c
++++ b/drivers/hwmon/pmbus/max8688.c
+@@ -45,7 +45,7 @@ static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
+ {
+ 	int ret;
+ 
+-	if (page)
++	if (page > 0)
+ 		return -ENXIO;
+ 
+ 	switch (reg) {
+diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
+index 6ea62c62ff27..9cdb3fbc8c1f 100644
+--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
++++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
+@@ -315,7 +315,7 @@ static void debug_dump_regs(struct debug_drvdata *drvdata)
+ 	}
+ 
+ 	pc = debug_adjust_pc(drvdata);
+-	dev_emerg(dev, " EDPCSR:  [<%p>] %pS\n", (void *)pc, (void *)pc);
++	dev_emerg(dev, " EDPCSR:  [<%px>] %pS\n", (void *)pc, (void *)pc);
+ 
+ 	if (drvdata->edcidsr_present)
+ 		dev_emerg(dev, " EDCIDSR: %08x\n", drvdata->edcidsr);
+diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
+index 1a023e30488c..c1793313bb08 100644
+--- a/drivers/hwtracing/intel_th/core.c
++++ b/drivers/hwtracing/intel_th/core.c
+@@ -935,7 +935,7 @@ EXPORT_SYMBOL_GPL(intel_th_trace_disable);
+ int intel_th_set_output(struct intel_th_device *thdev,
+ 			unsigned int master)
+ {
+-	struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent);
++	struct intel_th_device *hub = to_intel_th_hub(thdev);
+ 	struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
+ 
+ 	if (!hubdrv->set_output)
+diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
+index 440fe4a96e68..a5a95ea5b81a 100644
+--- a/drivers/i2c/busses/i2c-mv64xxx.c
++++ b/drivers/i2c/busses/i2c-mv64xxx.c
+@@ -845,12 +845,16 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
+ 	 */
+ 	if (of_device_is_compatible(np, "marvell,mv78230-i2c")) {
+ 		drv_data->offload_enabled = true;
+-		drv_data->errata_delay = true;
++		/* The delay is only needed in standard mode (100kHz) */
++		if (bus_freq <= 100000)
++			drv_data->errata_delay = true;
+ 	}
+ 
+ 	if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
+ 		drv_data->offload_enabled = false;
+-		drv_data->errata_delay = true;
++		/* The delay is only needed in standard mode (100kHz) */
++		if (bus_freq <= 100000)
++			drv_data->errata_delay = true;
+ 	}
+ 
+ 	if (of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
+diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
+index 7c3ed7c9af77..5613cc2d51fc 100644
+--- a/drivers/ide/ide-cd.c
++++ b/drivers/ide/ide-cd.c
+@@ -1613,6 +1613,8 @@ static int idecd_open(struct block_device *bdev, fmode_t mode)
+ 	struct cdrom_info *info;
+ 	int rc = -ENXIO;
+ 
++	check_disk_change(bdev);
++
+ 	mutex_lock(&ide_cd_mutex);
+ 	info = ide_cd_get(bdev->bd_disk);
+ 	if (!info)
+diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
+index 45f2f095f793..4eb72ff539fc 100644
+--- a/drivers/infiniband/core/multicast.c
++++ b/drivers/infiniband/core/multicast.c
+@@ -724,21 +724,19 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
+ {
+ 	int ret;
+ 	u16 gid_index;
+-	u8 p;
+-
+-	if (rdma_protocol_roce(device, port_num)) {
+-		ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
+-						 gid_type, port_num,
+-						 ndev,
+-						 &gid_index);
+-	} else if (rdma_protocol_ib(device, port_num)) {
+-		ret = ib_find_cached_gid(device, &rec->port_gid,
+-					 IB_GID_TYPE_IB, NULL, &p,
+-					 &gid_index);
+-	} else {
+-		ret = -EINVAL;
+-	}
+ 
++	/* GID table is not based on the netdevice for IB link layer,
++	 * so ignore ndev during search.
++	 */
++	if (rdma_protocol_ib(device, port_num))
++		ndev = NULL;
++	else if (!rdma_protocol_roce(device, port_num))
++		return -EINVAL;
++
++	ret = ib_find_cached_gid_by_port(device, &rec->port_gid,
++					 gid_type, port_num,
++					 ndev,
++					 &gid_index);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 9a4e899d94b3..2b6c9b516070 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -119,7 +119,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ 	umem->length     = size;
+ 	umem->address    = addr;
+ 	umem->page_shift = PAGE_SHIFT;
+-	umem->pid	 = get_task_pid(current, PIDTYPE_PID);
+ 	/*
+ 	 * We ask for writable memory if any of the following
+ 	 * access flags are set.  "Local write" and "remote write"
+@@ -132,7 +131,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ 		 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
+ 
+ 	if (access & IB_ACCESS_ON_DEMAND) {
+-		put_pid(umem->pid);
+ 		ret = ib_umem_odp_get(context, umem, access);
+ 		if (ret) {
+ 			kfree(umem);
+@@ -148,7 +146,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ 
+ 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
+ 	if (!page_list) {
+-		put_pid(umem->pid);
+ 		kfree(umem);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+@@ -231,7 +228,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ 	if (ret < 0) {
+ 		if (need_release)
+ 			__ib_umem_release(context->device, umem, 0);
+-		put_pid(umem->pid);
+ 		kfree(umem);
+ 	} else
+ 		current->mm->pinned_vm = locked;
+@@ -274,8 +270,7 @@ void ib_umem_release(struct ib_umem *umem)
+ 
+ 	__ib_umem_release(umem->context->device, umem, 1);
+ 
+-	task = get_pid_task(umem->pid, PIDTYPE_PID);
+-	put_pid(umem->pid);
++	task = get_pid_task(umem->context->tgid, PIDTYPE_PID);
+ 	if (!task)
+ 		goto out;
+ 	mm = get_task_mm(task);
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
+index e6a60fa59f2b..e6bdd0c1e80a 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -5944,6 +5944,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
+ 	u64 status;
+ 	u32 sw_index;
+ 	int i = 0;
++	unsigned long irq_flags;
+ 
+ 	sw_index = dd->hw_to_sw[hw_context];
+ 	if (sw_index >= dd->num_send_contexts) {
+@@ -5953,10 +5954,12 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
+ 		return;
+ 	}
+ 	sci = &dd->send_contexts[sw_index];
++	spin_lock_irqsave(&dd->sc_lock, irq_flags);
+ 	sc = sci->sc;
+ 	if (!sc) {
+ 		dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
+ 			   sw_index, hw_context);
++		spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
+ 		return;
+ 	}
+ 
+@@ -5978,6 +5981,7 @@ static void is_sendctxt_err_int(struct hfi1_devdata *dd,
+ 	 */
+ 	if (sc->type != SC_USER)
+ 		queue_work(dd->pport->hfi1_wq, &sc->halt_work);
++	spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
+ 
+ 	/*
+ 	 * Update the counters for the corresponding status bits.
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 0881f7907848..c14ed9cc9c9e 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -388,6 +388,9 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
+ 	if (err)
+ 		goto out;
+ 
++	props->active_width     = IB_WIDTH_4X;
++	props->active_speed     = IB_SPEED_QDR;
++
+ 	translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
+ 				 &props->active_width);
+ 
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index 45594091353c..7ef21fa2c3f0 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -1206,7 +1206,7 @@ int rxe_register_device(struct rxe_dev *rxe)
+ 			    rxe->ndev->dev_addr);
+ 	dev->dev.dma_ops = &dma_virt_ops;
+ 	dma_coerce_mask_and_coherent(&dev->dev,
+-				     dma_get_required_mask(dev->dev.parent));
++				     dma_get_required_mask(&dev->dev));
+ 
+ 	dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
+ 	dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 74788fdeb773..8b591c192daf 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -310,6 +310,8 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
+ 
+ 	if (dev_data == NULL) {
+ 		dev_data = alloc_dev_data(devid);
++		if (!dev_data)
++			return NULL;
+ 
+ 		if (translation_pre_enabled(iommu))
+ 			dev_data->defer_attach = true;
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index f227d73e7bf6..f2832a10fcea 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -60,7 +60,7 @@
+ 	(((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data))
+ 
+ #define REG_MMU_IVRP_PADDR			0x114
+-#define F_MMU_IVRP_PA_SET(pa, ext)		(((pa) >> 1) | ((!!(ext)) << 31))
++
+ #define REG_MMU_VLD_PA_RNG			0x118
+ #define F_MMU_VLD_PA_RNG(EA, SA)		(((EA) << 8) | (SA))
+ 
+@@ -539,8 +539,13 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
+ 		F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
+ 	writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
+ 
+-	writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
+-		       data->base + REG_MMU_IVRP_PADDR);
++	if (data->m4u_plat == M4U_MT8173)
++		regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
++	else
++		regval = lower_32_bits(data->protect_base) |
++			 upper_32_bits(data->protect_base);
++	writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
++
+ 	if (data->enable_4GB && data->m4u_plat != M4U_MT8173) {
+ 		/*
+ 		 * If 4GB mode is enabled, the validate PA range is from
+@@ -695,6 +700,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
+ 	reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
+ 	reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
+ 	reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
++	reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
+ 	clk_disable_unprepare(data->bclk);
+ 	return 0;
+ }
+@@ -717,8 +723,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
+ 	writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
+ 	writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
+ 	writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
+-	writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
+-		       base + REG_MMU_IVRP_PADDR);
++	writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
+ 	if (data->m4u_dom)
+ 		writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
+ 		       base + REG_MMU_PT_BASE_ADDR);
+diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
+index b4451a1c7c2f..778498b8633f 100644
+--- a/drivers/iommu/mtk_iommu.h
++++ b/drivers/iommu/mtk_iommu.h
+@@ -32,6 +32,7 @@ struct mtk_iommu_suspend_reg {
+ 	u32				ctrl_reg;
+ 	u32				int_control0;
+ 	u32				int_main_control;
++	u32				ivrp_paddr;
+ };
+ 
+ enum mtk_iommu_plat {
+diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
+index 910b5b6f96b1..eb65b6e78d57 100644
+--- a/drivers/macintosh/rack-meter.c
++++ b/drivers/macintosh/rack-meter.c
+@@ -154,8 +154,8 @@ static void rackmeter_do_pause(struct rackmeter *rm, int pause)
+ 		DBDMA_DO_STOP(rm->dma_regs);
+ 		return;
+ 	}
+-	memset(rdma->buf1, 0, ARRAY_SIZE(rdma->buf1));
+-	memset(rdma->buf2, 0, ARRAY_SIZE(rdma->buf2));
++	memset(rdma->buf1, 0, sizeof(rdma->buf1));
++	memset(rdma->buf2, 0, sizeof(rdma->buf2));
+ 
+ 	rm->dma_buf_v->mark = 0;
+ 
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 12e5197f186c..b5ddb848cd31 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -258,10 +258,11 @@ struct bcache_device {
+ 	struct gendisk		*disk;
+ 
+ 	unsigned long		flags;
+-#define BCACHE_DEV_CLOSING	0
+-#define BCACHE_DEV_DETACHING	1
+-#define BCACHE_DEV_UNLINK_DONE	2
+-
++#define BCACHE_DEV_CLOSING		0
++#define BCACHE_DEV_DETACHING		1
++#define BCACHE_DEV_UNLINK_DONE		2
++#define BCACHE_DEV_WB_RUNNING		3
++#define BCACHE_DEV_RATE_DW_RUNNING	4
+ 	unsigned		nr_stripes;
+ 	unsigned		stripe_size;
+ 	atomic_t		*stripe_sectors_dirty;
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index f2273143b3cb..432088adc497 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -899,6 +899,31 @@ void bch_cached_dev_run(struct cached_dev *dc)
+ 		pr_debug("error creating sysfs link");
+ }
+ 
++/*
++ * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
++ * work dc->writeback_rate_update is running. Wait until the routine
++ * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
++ * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
++ * seconds, give up waiting here and continue to cancel it too.
++ */
++static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
++{
++	int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ;
++
++	do {
++		if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING,
++			      &dc->disk.flags))
++			break;
++		time_out--;
++		schedule_timeout_interruptible(1);
++	} while (time_out > 0);
++
++	if (time_out == 0)
++		pr_warn("give up waiting for dc->writeback_write_update to quit");
++
++	cancel_delayed_work_sync(&dc->writeback_rate_update);
++}
++
+ static void cached_dev_detach_finish(struct work_struct *w)
+ {
+ 	struct cached_dev *dc = container_of(w, struct cached_dev, detach);
+@@ -911,7 +936,9 @@ static void cached_dev_detach_finish(struct work_struct *w)
+ 
+ 	mutex_lock(&bch_register_lock);
+ 
+-	cancel_delayed_work_sync(&dc->writeback_rate_update);
++	if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
++		cancel_writeback_rate_update_dwork(dc);
++
+ 	if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
+ 		kthread_stop(dc->writeback_thread);
+ 		dc->writeback_thread = NULL;
+@@ -954,6 +981,7 @@ void bch_cached_dev_detach(struct cached_dev *dc)
+ 	closure_get(&dc->disk.cl);
+ 
+ 	bch_writeback_queue(dc);
++
+ 	cached_dev_put(dc);
+ }
+ 
+@@ -1065,7 +1093,6 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
+ 	if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+ 		bch_sectors_dirty_init(&dc->disk);
+ 		atomic_set(&dc->has_dirty, 1);
+-		refcount_inc(&dc->count);
+ 		bch_writeback_queue(dc);
+ 	}
+ 
+@@ -1093,14 +1120,16 @@ static void cached_dev_free(struct closure *cl)
+ {
+ 	struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+ 
+-	cancel_delayed_work_sync(&dc->writeback_rate_update);
++	mutex_lock(&bch_register_lock);
++
++	if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
++		cancel_writeback_rate_update_dwork(dc);
++
+ 	if (!IS_ERR_OR_NULL(dc->writeback_thread))
+ 		kthread_stop(dc->writeback_thread);
+ 	if (dc->writeback_write_wq)
+ 		destroy_workqueue(dc->writeback_write_wq);
+ 
+-	mutex_lock(&bch_register_lock);
+-
+ 	if (atomic_read(&dc->running))
+ 		bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
+ 	bcache_device_free(&dc->disk);
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index 78cd7bd50fdd..55673508628f 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -309,7 +309,8 @@ STORE(bch_cached_dev)
+ 		bch_writeback_queue(dc);
+ 
+ 	if (attr == &sysfs_writeback_percent)
+-		schedule_delayed_work(&dc->writeback_rate_update,
++		if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
++			schedule_delayed_work(&dc->writeback_rate_update,
+ 				      dc->writeback_rate_update_seconds * HZ);
+ 
+ 	mutex_unlock(&bch_register_lock);
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index f1d2fc15abcc..8f98ef1038d3 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -115,6 +115,21 @@ static void update_writeback_rate(struct work_struct *work)
+ 					     struct cached_dev,
+ 					     writeback_rate_update);
+ 
++	/*
++	 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
++	 * cancel_delayed_work_sync().
++	 */
++	set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
++	/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
++	smp_mb();
++
++	if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) {
++		clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
++		/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
++		smp_mb();
++		return;
++	}
++
+ 	down_read(&dc->writeback_lock);
+ 
+ 	if (atomic_read(&dc->has_dirty) &&
+@@ -123,8 +138,18 @@ static void update_writeback_rate(struct work_struct *work)
+ 
+ 	up_read(&dc->writeback_lock);
+ 
+-	schedule_delayed_work(&dc->writeback_rate_update,
++	if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) {
++		schedule_delayed_work(&dc->writeback_rate_update,
+ 			      dc->writeback_rate_update_seconds * HZ);
++	}
++
++	/*
++	 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
++	 * cancel_delayed_work_sync().
++	 */
++	clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
++	/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
++	smp_mb();
+ }
+ 
+ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
+@@ -565,14 +590,20 @@ static int bch_writeback_thread(void *arg)
+ 	while (!kthread_should_stop()) {
+ 		down_write(&dc->writeback_lock);
+ 		set_current_state(TASK_INTERRUPTIBLE);
+-		if (!atomic_read(&dc->has_dirty) ||
+-		    (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
+-		     !dc->writeback_running)) {
++		/*
++		 * If the bache device is detaching, skip here and continue
++		 * to perform writeback. Otherwise, if no dirty data on cache,
++		 * or there is dirty data on cache but writeback is disabled,
++		 * the writeback thread should sleep here and wait for others
++		 * to wake up it.
++		 */
++		if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
++		    (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
+ 			up_write(&dc->writeback_lock);
+ 
+ 			if (kthread_should_stop()) {
+ 				set_current_state(TASK_RUNNING);
+-				return 0;
++				break;
+ 			}
+ 
+ 			schedule();
+@@ -585,9 +616,16 @@ static int bch_writeback_thread(void *arg)
+ 		if (searched_full_index &&
+ 		    RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
+ 			atomic_set(&dc->has_dirty, 0);
+-			cached_dev_put(dc);
+ 			SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
+ 			bch_write_bdev_super(dc, NULL);
++			/*
++			 * If bcache device is detaching via sysfs interface,
++			 * writeback thread should stop after there is no dirty
++			 * data on cache. BCACHE_DEV_DETACHING flag is set in
++			 * bch_cached_dev_detach().
++			 */
++			if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
++				break;
+ 		}
+ 
+ 		up_write(&dc->writeback_lock);
+@@ -606,6 +644,9 @@ static int bch_writeback_thread(void *arg)
+ 		}
+ 	}
+ 
++	dc->writeback_thread = NULL;
++	cached_dev_put(dc);
++
+ 	return 0;
+ }
+ 
+@@ -659,6 +700,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
+ 	dc->writeback_rate_p_term_inverse = 40;
+ 	dc->writeback_rate_i_term_inverse = 10000;
+ 
++	WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
+ 	INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
+ }
+ 
+@@ -669,11 +711,15 @@ int bch_cached_dev_writeback_start(struct cached_dev *dc)
+ 	if (!dc->writeback_write_wq)
+ 		return -ENOMEM;
+ 
++	cached_dev_get(dc);
+ 	dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
+ 					      "bcache_writeback");
+-	if (IS_ERR(dc->writeback_thread))
++	if (IS_ERR(dc->writeback_thread)) {
++		cached_dev_put(dc);
+ 		return PTR_ERR(dc->writeback_thread);
++	}
+ 
++	WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
+ 	schedule_delayed_work(&dc->writeback_rate_update,
+ 			      dc->writeback_rate_update_seconds * HZ);
+ 
+diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
+index 587b25599856..0bba8f1c6cdf 100644
+--- a/drivers/md/bcache/writeback.h
++++ b/drivers/md/bcache/writeback.h
+@@ -105,8 +105,6 @@ static inline void bch_writeback_add(struct cached_dev *dc)
+ {
+ 	if (!atomic_read(&dc->has_dirty) &&
+ 	    !atomic_xchg(&dc->has_dirty, 1)) {
+-		refcount_inc(&dc->count);
+-
+ 		if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
+ 			SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
+ 			/* XXX: should do this synchronously */
+diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
+index 4f015da78f28..4949b8d5a748 100644
+--- a/drivers/misc/cxl/cxl.h
++++ b/drivers/misc/cxl/cxl.h
+@@ -369,6 +369,9 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An     = {0x0A0};
+ #define CXL_PSL_TFC_An_AE (1ull << (63-30)) /* Restart PSL with address error */
+ #define CXL_PSL_TFC_An_R  (1ull << (63-31)) /* Restart PSL transaction */
+ 
++/****** CXL_PSL_DEBUG *****************************************************/
++#define CXL_PSL_DEBUG_CDC  (1ull << (63-27)) /* Coherent Data cache support */
++
+ /****** CXL_XSL9_IERAT_ERAT - CAIA 2 **********************************/
+ #define CXL_XSL9_IERAT_MLPID    (1ull << (63-0))  /* Match LPID */
+ #define CXL_XSL9_IERAT_MPID     (1ull << (63-1))  /* Match PID */
+@@ -669,6 +672,7 @@ struct cxl_native {
+ 	irq_hw_number_t err_hwirq;
+ 	unsigned int err_virq;
+ 	u64 ps_off;
++	bool no_data_cache; /* set if no data cache on the card */
+ 	const struct cxl_service_layer_ops *sl_ops;
+ };
+ 
+diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
+index 1b3d7c65ea3f..98f867fcef24 100644
+--- a/drivers/misc/cxl/native.c
++++ b/drivers/misc/cxl/native.c
+@@ -353,8 +353,17 @@ int cxl_data_cache_flush(struct cxl *adapter)
+ 	u64 reg;
+ 	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
+ 
+-	pr_devel("Flushing data cache\n");
++	/*
++	 * Do a datacache flush only if datacache is available.
++	 * In case of PSL9D datacache absent hence flush operation.
++	 * would timeout.
++	 */
++	if (adapter->native->no_data_cache) {
++		pr_devel("No PSL data cache. Ignoring cache flush req.\n");
++		return 0;
++	}
+ 
++	pr_devel("Flushing data cache\n");
+ 	reg = cxl_p1_read(adapter, CXL_PSL_Control);
+ 	reg |= CXL_PSL_Control_Fr;
+ 	cxl_p1_write(adapter, CXL_PSL_Control, reg);
+diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
+index 758842f65a1b..61de57292e40 100644
+--- a/drivers/misc/cxl/pci.c
++++ b/drivers/misc/cxl/pci.c
+@@ -456,6 +456,7 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
+ 	u64 chipid;
+ 	u32 phb_index;
+ 	u64 capp_unit_id;
++	u64 psl_debug;
+ 	int rc;
+ 
+ 	rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
+@@ -506,6 +507,16 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
+ 	} else
+ 		cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x4000000000000000ULL);
+ 
++	/*
++	 * Check if PSL has data-cache. We need to flush adapter datacache
++	 * when as its about to be removed.
++	 */
++	psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG);
++	if (psl_debug & CXL_PSL_DEBUG_CDC) {
++		dev_dbg(&dev->dev, "No data-cache present\n");
++		adapter->native->no_data_cache = true;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -1449,10 +1460,8 @@ int cxl_pci_reset(struct cxl *adapter)
+ 
+ 	/*
+ 	 * The adapter is about to be reset, so ignore errors.
+-	 * Not supported on P9 DD1
+ 	 */
+-	if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
+-		cxl_data_cache_flush(adapter);
++	cxl_data_cache_flush(adapter);
+ 
+ 	/* pcie_warm_reset requests a fundamental pci reset which includes a
+ 	 * PERST assert/deassert.  PERST triggers a loading of the image
+@@ -1936,10 +1945,8 @@ static void cxl_pci_remove_adapter(struct cxl *adapter)
+ 
+ 	/*
+ 	 * Flush adapter datacache as its about to be removed.
+-	 * Not supported on P9 DD1.
+ 	 */
+-	if ((cxl_is_power8()) || (!(cxl_is_power9_dd1())))
+-		cxl_data_cache_flush(adapter);
++	cxl_data_cache_flush(adapter);
+ 
+ 	cxl_deconfigure_adapter(adapter);
+ 
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 9c6f639d8a57..81501ebd5b26 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -2492,7 +2492,7 @@ static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd,
+ 		break;
+ 	}
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ #ifdef CONFIG_COMPAT
+diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
+index 61666d269771..0cfbdb3ab68a 100644
+--- a/drivers/mmc/host/sdhci-iproc.c
++++ b/drivers/mmc/host/sdhci-iproc.c
+@@ -33,6 +33,8 @@ struct sdhci_iproc_host {
+ 	const struct sdhci_iproc_data *data;
+ 	u32 shadow_cmd;
+ 	u32 shadow_blk;
++	bool is_cmd_shadowed;
++	bool is_blk_shadowed;
+ };
+ 
+ #define REG_OFFSET_IN_BITS(reg) ((reg) << 3 & 0x18)
+@@ -48,8 +50,22 @@ static inline u32 sdhci_iproc_readl(struct sdhci_host *host, int reg)
+ 
+ static u16 sdhci_iproc_readw(struct sdhci_host *host, int reg)
+ {
+-	u32 val = sdhci_iproc_readl(host, (reg & ~3));
+-	u16 word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
++	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++	struct sdhci_iproc_host *iproc_host = sdhci_pltfm_priv(pltfm_host);
++	u32 val;
++	u16 word;
++
++	if ((reg == SDHCI_TRANSFER_MODE) && iproc_host->is_cmd_shadowed) {
++		/* Get the saved transfer mode */
++		val = iproc_host->shadow_cmd;
++	} else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
++		   iproc_host->is_blk_shadowed) {
++		/* Get the saved block info */
++		val = iproc_host->shadow_blk;
++	} else {
++		val = sdhci_iproc_readl(host, (reg & ~3));
++	}
++	word = val >> REG_OFFSET_IN_BITS(reg) & 0xffff;
+ 	return word;
+ }
+ 
+@@ -105,13 +121,15 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
+ 
+ 	if (reg == SDHCI_COMMAND) {
+ 		/* Write the block now as we are issuing a command */
+-		if (iproc_host->shadow_blk != 0) {
++		if (iproc_host->is_blk_shadowed) {
+ 			sdhci_iproc_writel(host, iproc_host->shadow_blk,
+ 				SDHCI_BLOCK_SIZE);
+-			iproc_host->shadow_blk = 0;
++			iproc_host->is_blk_shadowed = false;
+ 		}
+ 		oldval = iproc_host->shadow_cmd;
+-	} else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
++		iproc_host->is_cmd_shadowed = false;
++	} else if ((reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) &&
++		   iproc_host->is_blk_shadowed) {
+ 		/* Block size and count are stored in shadow reg */
+ 		oldval = iproc_host->shadow_blk;
+ 	} else {
+@@ -123,9 +141,11 @@ static void sdhci_iproc_writew(struct sdhci_host *host, u16 val, int reg)
+ 	if (reg == SDHCI_TRANSFER_MODE) {
+ 		/* Save the transfer mode until the command is issued */
+ 		iproc_host->shadow_cmd = newval;
++		iproc_host->is_cmd_shadowed = true;
+ 	} else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) {
+ 		/* Save the block info until the command is issued */
+ 		iproc_host->shadow_blk = newval;
++		iproc_host->is_blk_shadowed = true;
+ 	} else {
+ 		/* Command or other regular 32-bit write */
+ 		sdhci_iproc_writel(host, newval, reg & ~3);
+@@ -166,7 +186,7 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
+ 
+ static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
+ 	.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+-	.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
++	.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+ 	.ops = &sdhci_iproc_32only_ops,
+ };
+ 
+@@ -206,7 +226,6 @@ static const struct sdhci_iproc_data iproc_data = {
+ 	.caps1 = SDHCI_DRIVER_TYPE_C |
+ 		 SDHCI_DRIVER_TYPE_D |
+ 		 SDHCI_SUPPORT_DDR50,
+-	.mmc_caps = MMC_CAP_1_8V_DDR,
+ };
+ 
+ static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
+diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
+index 8eef9fb6b1fe..ad8195b0d161 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.c
++++ b/drivers/net/ethernet/broadcom/bgmac.c
+@@ -533,7 +533,8 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
+ 	int i;
+ 
+ 	for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
+-		int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
++		u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1);
++		unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN;
+ 
+ 		slot = &ring->slots[i];
+ 		dev_kfree_skb(slot->skb);
+diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
+index 4040d846da8e..40d02fec2747 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.h
++++ b/drivers/net/ethernet/broadcom/bgmac.h
+@@ -479,9 +479,9 @@ struct bgmac_rx_header {
+ struct bgmac {
+ 	union {
+ 		struct {
+-			void *base;
+-			void *idm_base;
+-			void *nicpm_base;
++			void __iomem *base;
++			void __iomem *idm_base;
++			void __iomem *nicpm_base;
+ 		} plat;
+ 		struct {
+ 			struct bcma_device *core;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 9442605f4fd4..0b71d3b44933 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -2552,16 +2552,20 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
+ 			return -EOPNOTSUPP;
+ 
+ 		rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
+-		if (!rc)
++		if (!rc) {
+ 			netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
++			*flags = 0;
++		}
+ 	} else if (*flags == ETH_RESET_AP) {
+ 		/* This feature is not supported in older firmware versions */
+ 		if (bp->hwrm_spec_code < 0x10803)
+ 			return -EOPNOTSUPP;
+ 
+ 		rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
+-		if (!rc)
++		if (!rc) {
+ 			netdev_info(dev, "Reset Application Processor request successful.\n");
++			*flags = 0;
++		}
+ 	} else {
+ 		rc = -EINVAL;
+ 	}
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+index 65c2cee35766..9d8aa96044d3 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+@@ -992,8 +992,10 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
+ 
+ 	/* Check if there's another flow using the same tunnel decap.
+ 	 * If not, add this tunnel to the table and resolve the other
+-	 * tunnel header fileds
++	 * tunnel header fileds. Ignore src_port in the tunnel_key,
++	 * since it is not required for decap filters.
+ 	 */
++	decap_key->tp_src = 0;
+ 	decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
+ 					     &tc_info->decap_ht_params,
+ 					     decap_key);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 61022b5f6743..57dcb957f27c 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -833,8 +833,6 @@ static int setup_fw_sge_queues(struct adapter *adap)
+ 
+ 	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
+ 			       adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
+-	if (err)
+-		t4_free_sge_resources(adap);
+ 	return err;
+ }
+ 
+@@ -5474,6 +5472,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if (err)
+ 		goto out_free_dev;
+ 
++	err = setup_fw_sge_queues(adapter);
++	if (err) {
++		dev_err(adapter->pdev_dev,
++			"FW sge queue allocation failed, err %d", err);
++		goto out_free_dev;
++	}
++
+ 	/*
+ 	 * The card is now ready to go.  If any errors occur during device
+ 	 * registration we do not fail the whole card but rather proceed only
+@@ -5522,10 +5527,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		cxgb4_ptp_init(adapter);
+ 
+ 	print_adapter_info(adapter);
+-	setup_fw_sge_queues(adapter);
+ 	return 0;
+ 
+  out_free_dev:
++	t4_free_sge_resources(adapter);
+ 	free_some_resources(adapter);
+ 	if (adapter->flags & USING_MSIX)
+ 		free_msix_info(adapter);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+index 6b5fea4532f3..2d827140a475 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+@@ -342,6 +342,7 @@ static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
+ {
+ 	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ 
++	adap->sge.uld_rxq_info[uld_type] = NULL;
+ 	kfree(rxq_info->rspq_id);
+ 	kfree(rxq_info->uldrxq);
+ 	kfree(rxq_info);
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index f202ba72a811..b91109d967fa 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1898,6 +1898,8 @@ static int enic_open(struct net_device *netdev)
+ 	}
+ 
+ 	for (i = 0; i < enic->rq_count; i++) {
++		/* enable rq before updating rq desc */
++		vnic_rq_enable(&enic->rq[i]);
+ 		vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
+ 		/* Need at least one buffer on ring to get going */
+ 		if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
+@@ -1909,8 +1911,6 @@ static int enic_open(struct net_device *netdev)
+ 
+ 	for (i = 0; i < enic->wq_count; i++)
+ 		vnic_wq_enable(&enic->wq[i]);
+-	for (i = 0; i < enic->rq_count; i++)
+-		vnic_rq_enable(&enic->rq[i]);
+ 
+ 	if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
+ 		enic_dev_add_station_addr(enic);
+@@ -1936,8 +1936,12 @@ static int enic_open(struct net_device *netdev)
+ 	return 0;
+ 
+ err_out_free_rq:
+-	for (i = 0; i < enic->rq_count; i++)
++	for (i = 0; i < enic->rq_count; i++) {
++		err = vnic_rq_disable(&enic->rq[i]);
++		if (err)
++			return err;
+ 		vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
++	}
+ 	enic_dev_notify_unset(enic);
+ err_out_free_intr:
+ 	enic_unset_affinity_hint(enic);
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index e4ec32a9ca15..3615e5f148dd 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -1916,8 +1916,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
+ 		goto csum_failed;
+ 	}
+ 
++	/* SGT[0] is used by the linear part */
+ 	sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
+-	qm_sg_entry_set_len(&sgt[0], skb_headlen(skb));
++	frag_len = skb_headlen(skb);
++	qm_sg_entry_set_len(&sgt[0], frag_len);
+ 	sgt[0].bpid = FSL_DPAA_BPID_INV;
+ 	sgt[0].offset = 0;
+ 	addr = dma_map_single(dev, skb->data,
+@@ -1930,9 +1932,9 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
+ 	qm_sg_entry_set64(&sgt[0], addr);
+ 
+ 	/* populate the rest of SGT entries */
+-	frag = &skb_shinfo(skb)->frags[0];
+-	frag_len = frag->size;
+-	for (i = 1; i <= nr_frags; i++, frag++) {
++	for (i = 0; i < nr_frags; i++) {
++		frag = &skb_shinfo(skb)->frags[i];
++		frag_len = frag->size;
+ 		WARN_ON(!skb_frag_page(frag));
+ 		addr = skb_frag_dma_map(dev, frag, 0,
+ 					frag_len, dma_dir);
+@@ -1942,15 +1944,16 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
+ 			goto sg_map_failed;
+ 		}
+ 
+-		qm_sg_entry_set_len(&sgt[i], frag_len);
+-		sgt[i].bpid = FSL_DPAA_BPID_INV;
+-		sgt[i].offset = 0;
++		qm_sg_entry_set_len(&sgt[i + 1], frag_len);
++		sgt[i + 1].bpid = FSL_DPAA_BPID_INV;
++		sgt[i + 1].offset = 0;
+ 
+ 		/* keep the offset in the address */
+-		qm_sg_entry_set64(&sgt[i], addr);
+-		frag_len = frag->size;
++		qm_sg_entry_set64(&sgt[i + 1], addr);
+ 	}
+-	qm_sg_entry_set_f(&sgt[i - 1], frag_len);
++
++	/* Set the final bit in the last used entry of the SGT */
++	qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
+ 
+ 	qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
+ 
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+index faea674094b9..85306d1b2acf 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+@@ -211,7 +211,7 @@ static int dpaa_set_pauseparam(struct net_device *net_dev,
+ 	if (epause->rx_pause)
+ 		newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ 	if (epause->tx_pause)
+-		newadv |= ADVERTISED_Asym_Pause;
++		newadv ^= ADVERTISED_Asym_Pause;
+ 
+ 	oldadv = phydev->advertising &
+ 			(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 601b6295d3f8..9f6a6a1640d6 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -747,7 +747,7 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
+ {
+ 	/* Config bd buffer end */
+ 	hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
+-		       HNS3_TXD_BDTYPE_M, 0);
++		       HNS3_TXD_BDTYPE_S, 0);
+ 	hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
+ 	hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
+ 	hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index b034c7f24eda..a1e53c671944 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -698,7 +698,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
+ 
+ 	if (!h->ae_algo || !h->ae_algo->ops ||
+ 	    !h->ae_algo->ops->get_rss_key_size)
+-		return -EOPNOTSUPP;
++		return 0;
+ 
+ 	return h->ae_algo->ops->get_rss_key_size(h);
+ }
+@@ -709,7 +709,7 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev)
+ 
+ 	if (!h->ae_algo || !h->ae_algo->ops ||
+ 	    !h->ae_algo->ops->get_rss_indir_size)
+-		return -EOPNOTSUPP;
++		return 0;
+ 
+ 	return h->ae_algo->ops->get_rss_indir_size(h);
+ }
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 1b3cc8bb0705..fd8e6937ee00 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -812,8 +812,6 @@ static void release_resources(struct ibmvnic_adapter *adapter)
+ 	release_tx_pools(adapter);
+ 	release_rx_pools(adapter);
+ 
+-	release_stats_token(adapter);
+-	release_stats_buffers(adapter);
+ 	release_error_buffers(adapter);
+ 
+ 	if (adapter->napi) {
+@@ -953,14 +951,6 @@ static int init_resources(struct ibmvnic_adapter *adapter)
+ 	if (rc)
+ 		return rc;
+ 
+-	rc = init_stats_buffers(adapter);
+-	if (rc)
+-		return rc;
+-
+-	rc = init_stats_token(adapter);
+-	if (rc)
+-		return rc;
+-
+ 	adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
+ 	if (!adapter->vpd)
+ 		return -ENOMEM;
+@@ -1699,12 +1689,14 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+ 			rc = reset_rx_pools(adapter);
+ 			if (rc)
+ 				return rc;
+-
+-			if (reset_state == VNIC_CLOSED)
+-				return 0;
+ 		}
+ 	}
+ 
++	adapter->state = VNIC_CLOSED;
++
++	if (reset_state == VNIC_CLOSED)
++		return 0;
++
+ 	rc = __ibmvnic_open(netdev);
+ 	if (rc) {
+ 		if (list_empty(&adapter->rwi_list))
+@@ -2266,6 +2258,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
+ 	}
+ 
+ 	memset(scrq->msgs, 0, 4 * PAGE_SIZE);
++	atomic_set(&scrq->used, 0);
+ 	scrq->cur = 0;
+ 
+ 	rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
+@@ -4387,6 +4380,14 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
+ 		release_crq_queue(adapter);
+ 	}
+ 
++	rc = init_stats_buffers(adapter);
++	if (rc)
++		return rc;
++
++	rc = init_stats_token(adapter);
++	if (rc)
++		return rc;
++
+ 	return rc;
+ }
+ 
+@@ -4494,6 +4495,9 @@ static int ibmvnic_remove(struct vio_dev *dev)
+ 	release_sub_crqs(adapter);
+ 	release_crq_queue(adapter);
+ 
++	release_stats_token(adapter);
++	release_stats_buffers(adapter);
++
+ 	adapter->state = VNIC_REMOVED;
+ 
+ 	mutex_unlock(&adapter->reset_lock);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index e31adbc75f9c..e50d703d7353 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -9215,6 +9215,17 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ 	}
+ 	i40e_get_oem_version(&pf->hw);
+ 
++	if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
++	    ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
++	     hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
++		/* The following delay is necessary for 4.33 firmware and older
++		 * to recover after EMP reset. 200 ms should suffice but we
++		 * put here 300 ms to be sure that FW is ready to operate
++		 * after reset.
++		 */
++		mdelay(300);
++	}
++
+ 	/* re-verify the eeprom if we just had an EMP reset */
+ 	if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
+ 		i40e_verify_eeprom(pf);
+@@ -14216,7 +14227,13 @@ static int __maybe_unused i40e_suspend(struct device *dev)
+ 	if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
+ 		i40e_enable_mc_magic_wake(pf);
+ 
+-	i40e_prep_for_reset(pf, false);
++	/* Since we're going to destroy queues during the
++	 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
++	 * whole section
++	 */
++	rtnl_lock();
++
++	i40e_prep_for_reset(pf, true);
+ 
+ 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+@@ -14228,6 +14245,8 @@ static int __maybe_unused i40e_suspend(struct device *dev)
+ 	 */
+ 	i40e_clear_interrupt_scheme(pf);
+ 
++	rtnl_unlock();
++
+ 	return 0;
+ }
+ 
+@@ -14245,6 +14264,11 @@ static int __maybe_unused i40e_resume(struct device *dev)
+ 	if (!test_bit(__I40E_SUSPENDED, pf->state))
+ 		return 0;
+ 
++	/* We need to hold the RTNL lock prior to restoring interrupt schemes,
++	 * since we're going to be restoring queues
++	 */
++	rtnl_lock();
++
+ 	/* We cleared the interrupt scheme when we suspended, so we need to
+ 	 * restore it now to resume device functionality.
+ 	 */
+@@ -14255,7 +14279,9 @@ static int __maybe_unused i40e_resume(struct device *dev)
+ 	}
+ 
+ 	clear_bit(__I40E_DOWN, pf->state);
+-	i40e_reset_and_rebuild(pf, false, false);
++	i40e_reset_and_rebuild(pf, false, true);
++
++	rtnl_unlock();
+ 
+ 	/* Clear suspended state last after everything is recovered */
+ 	clear_bit(__I40E_SUSPENDED, pf->state);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 9fc063af233c..85369423452d 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -7711,7 +7711,8 @@ static void ixgbe_service_task(struct work_struct *work)
+ 
+ 	if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
+ 		ixgbe_ptp_overflow_check(adapter);
+-		ixgbe_ptp_rx_hang(adapter);
++		if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
++			ixgbe_ptp_rx_hang(adapter);
+ 		ixgbe_ptp_tx_hang(adapter);
+ 	}
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index e9a1fbcc4adf..3efe45bc2471 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -1802,7 +1802,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
+ 
+ 	cmd->checksum_disabled = 1;
+ 	cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
+-	cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
++	cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
+ 
+ 	cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
+ 	if (cmd->cmdif_rev > CMD_IF_REV) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 9b4827d36e3e..1ae61514b6a9 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -153,26 +153,6 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
+ 	mutex_unlock(&priv->state_lock);
+ }
+ 
+-static void mlx5e_tx_timeout_work(struct work_struct *work)
+-{
+-	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+-					       tx_timeout_work);
+-	int err;
+-
+-	rtnl_lock();
+-	mutex_lock(&priv->state_lock);
+-	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+-		goto unlock;
+-	mlx5e_close_locked(priv->netdev);
+-	err = mlx5e_open_locked(priv->netdev);
+-	if (err)
+-		netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+-			   err);
+-unlock:
+-	mutex_unlock(&priv->state_lock);
+-	rtnl_unlock();
+-}
+-
+ void mlx5e_update_stats(struct mlx5e_priv *priv)
+ {
+ 	int i;
+@@ -3632,13 +3612,19 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
+ 	return true;
+ }
+ 
+-static void mlx5e_tx_timeout(struct net_device *dev)
++static void mlx5e_tx_timeout_work(struct work_struct *work)
+ {
+-	struct mlx5e_priv *priv = netdev_priv(dev);
++	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
++					       tx_timeout_work);
++	struct net_device *dev = priv->netdev;
+ 	bool reopen_channels = false;
+-	int i;
++	int i, err;
+ 
+-	netdev_err(dev, "TX timeout detected\n");
++	rtnl_lock();
++	mutex_lock(&priv->state_lock);
++
++	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
++		goto unlock;
+ 
+ 	for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
+ 		struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
+@@ -3646,7 +3632,9 @@ static void mlx5e_tx_timeout(struct net_device *dev)
+ 
+ 		if (!netif_xmit_stopped(dev_queue))
+ 			continue;
+-		netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
++
++		netdev_err(dev,
++			   "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
+ 			   i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
+ 			   jiffies_to_usecs(jiffies - dev_queue->trans_start));
+ 
+@@ -3659,8 +3647,27 @@ static void mlx5e_tx_timeout(struct net_device *dev)
+ 		}
+ 	}
+ 
+-	if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state))
+-		schedule_work(&priv->tx_timeout_work);
++	if (!reopen_channels)
++		goto unlock;
++
++	mlx5e_close_locked(dev);
++	err = mlx5e_open_locked(dev);
++	if (err)
++		netdev_err(priv->netdev,
++			   "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
++			   err);
++
++unlock:
++	mutex_unlock(&priv->state_lock);
++	rtnl_unlock();
++}
++
++static void mlx5e_tx_timeout(struct net_device *dev)
++{
++	struct mlx5e_priv *priv = netdev_priv(dev);
++
++	netdev_err(dev, "TX timeout detected\n");
++	queue_work(priv->wq, &priv->tx_timeout_work);
+ }
+ 
+ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+index c4949183eef3..3881de91015e 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+@@ -307,6 +307,8 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
+ 	if (data[IFLA_VLAN_ID]) {
+ 		mux_id = nla_get_u16(data[IFLA_VLAN_ID]);
+ 		ep = rmnet_get_endpoint(port, priv->mux_id);
++		if (!ep)
++			return -ENODEV;
+ 
+ 		hlist_del_init_rcu(&ep->hlnode);
+ 		hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 14c839bb09e7..7c9235c9d081 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -763,6 +763,7 @@ static struct sh_eth_cpu_data sh7757_data = {
+ 	.rpadir		= 1,
+ 	.rpadir_value   = 2 << 16,
+ 	.rtrate		= 1,
++	.dual_port	= 1,
+ };
+ 
+ #define SH_GIGA_ETH_BASE	0xfee00000UL
+@@ -841,6 +842,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
+ 	.no_trimd	= 1,
+ 	.no_ade		= 1,
+ 	.tsu		= 1,
++	.dual_port	= 1,
+ };
+ 
+ /* SH7734 */
+@@ -911,6 +913,7 @@ static struct sh_eth_cpu_data sh7763_data = {
+ 	.tsu		= 1,
+ 	.irq_flags	= IRQF_SHARED,
+ 	.magic		= 1,
++	.dual_port	= 1,
+ };
+ 
+ static struct sh_eth_cpu_data sh7619_data = {
+@@ -943,6 +946,7 @@ static struct sh_eth_cpu_data sh771x_data = {
+ 			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ 			  EESIPR_PREIP | EESIPR_CERFIP,
+ 	.tsu		= 1,
++	.dual_port	= 1,
+ };
+ 
+ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
+@@ -2932,7 +2936,7 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
+ /* SuperH's TSU register init function */
+ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
+ {
+-	if (sh_eth_is_rz_fast_ether(mdp)) {
++	if (!mdp->cd->dual_port) {
+ 		sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+ 		sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
+ 				 TSU_FWSLC);	/* Enable POST registers */
+diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
+index e5fe70134690..fdd6d71c03d1 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.h
++++ b/drivers/net/ethernet/renesas/sh_eth.h
+@@ -509,6 +509,7 @@ struct sh_eth_cpu_data {
+ 	unsigned rmiimode:1;	/* EtherC has RMIIMODE register */
+ 	unsigned rtrate:1;	/* EtherC has RTRATE register */
+ 	unsigned magic:1;	/* EtherC has ECMR.MPDE and ECSR.MPD */
++	unsigned dual_port:1;	/* Dual EtherC/E-DMAC */
+ };
+ 
+ struct sh_eth_private {
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 3ea343b45d93..8044563453f9 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -1843,6 +1843,11 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
+ 		if (unlikely(status & tx_dma_own))
+ 			break;
+ 
++		/* Make sure descriptor fields are read after reading
++		 * the own bit.
++		 */
++		dma_rmb();
++
+ 		/* Just consider the last segment and ...*/
+ 		if (likely(!(status & tx_not_ls))) {
+ 			/* ... verify the status error condition */
+@@ -2430,7 +2435,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
+ 			continue;
+ 
+ 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
+-		priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
++		priv->hw->mac->rx_queue_routing(priv->hw, packet, queue);
+ 	}
+ }
+ 
+@@ -2980,8 +2985,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+ 			tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
+ 
+ 	/* If context desc is used to change MSS */
+-	if (mss_desc)
++	if (mss_desc) {
++		/* Make sure that first descriptor has been completely
++		 * written, including its own bit. This is because MSS is
++		 * actually before first descriptor, so we need to make
++		 * sure that MSS's own bit is the last thing written.
++		 */
++		dma_wmb();
+ 		priv->hw->desc->set_tx_owner(mss_desc);
++	}
+ 
+ 	/* The own bit must be the latest setting done when prepare the
+ 	 * descriptor and then barrier is needed to make sure that
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index 7472172823f3..11209e494502 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -1078,10 +1078,14 @@ static int netvsc_receive(struct net_device *ndev,
+ 		void *data = recv_buf
+ 			+ vmxferpage_packet->ranges[i].byte_offset;
+ 		u32 buflen = vmxferpage_packet->ranges[i].byte_count;
++		int ret;
+ 
+ 		/* Pass it to the upper layer */
+-		status = rndis_filter_receive(ndev, net_device,
+-					      channel, data, buflen);
++		ret = rndis_filter_receive(ndev, net_device,
++					   channel, data, buflen);
++
++		if (unlikely(ret != NVSP_STAT_SUCCESS))
++			status = NVSP_STAT_FAIL;
+ 	}
+ 
+ 	enq_receive_complete(ndev, net_device, q_idx,
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 4774766fe20d..2a7752c113df 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -831,7 +831,7 @@ int netvsc_recv_callback(struct net_device *net,
+ 	u64_stats_update_end(&rx_stats->syncp);
+ 
+ 	napi_gro_receive(&nvchan->napi, skb);
+-	return 0;
++	return NVSP_STAT_SUCCESS;
+ }
+ 
+ static void netvsc_get_drvinfo(struct net_device *net,
+diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
+index 95846f0321f3..33138e4f0b5a 100644
+--- a/drivers/net/hyperv/rndis_filter.c
++++ b/drivers/net/hyperv/rndis_filter.c
+@@ -434,10 +434,10 @@ int rndis_filter_receive(struct net_device *ndev,
+ 			"unhandled rndis message (type %u len %u)\n",
+ 			   rndis_msg->ndis_msg_type,
+ 			   rndis_msg->msg_len);
+-		break;
++		return NVSP_STAT_FAIL;
+ 	}
+ 
+-	return 0;
++	return NVSP_STAT_SUCCESS;
+ }
+ 
+ static int rndis_filter_query_device(struct rndis_device *dev,
+diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
+index 377af43b81b3..58299fb666ed 100644
+--- a/drivers/net/ieee802154/ca8210.c
++++ b/drivers/net/ieee802154/ca8210.c
+@@ -2493,13 +2493,14 @@ static ssize_t ca8210_test_int_user_write(
+ 	struct ca8210_priv *priv = filp->private_data;
+ 	u8 command[CA8210_SPI_BUF_SIZE];
+ 
+-	if (len > CA8210_SPI_BUF_SIZE) {
++	memset(command, SPI_IDLE, 6);
++	if (len > CA8210_SPI_BUF_SIZE || len < 2) {
+ 		dev_warn(
+ 			&priv->spi->dev,
+-			"userspace requested erroneously long write (%zu)\n",
++			"userspace requested erroneous write length (%zu)\n",
+ 			len
+ 		);
+-		return -EMSGSIZE;
++		return -EBADE;
+ 	}
+ 
+ 	ret = copy_from_user(command, in_buf, len);
+@@ -2511,6 +2512,13 @@ static ssize_t ca8210_test_int_user_write(
+ 		);
+ 		return -EIO;
+ 	}
++	if (len != command[1] + 2) {
++		dev_err(
++			&priv->spi->dev,
++			"write len does not match packet length field\n"
++		);
++		return -EBADE;
++	}
+ 
+ 	ret = ca8210_test_check_upstream(command, priv->spi);
+ 	if (ret == 0) {
+diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
+index 654f42d00092..a6c87793d899 100644
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -1207,6 +1207,23 @@ static void dp83640_remove(struct phy_device *phydev)
+ 	kfree(dp83640);
+ }
+ 
++static int dp83640_soft_reset(struct phy_device *phydev)
++{
++	int ret;
++
++	ret = genphy_soft_reset(phydev);
++	if (ret < 0)
++		return ret;
++
++	/* From DP83640 datasheet: "Software driver code must wait 3 us
++	 * following a software reset before allowing further serial MII
++	 * operations with the DP83640."
++	 */
++	udelay(10);		/* Taking udelay inaccuracy into account */
++
++	return 0;
++}
++
+ static int dp83640_config_init(struct phy_device *phydev)
+ {
+ 	struct dp83640_private *dp83640 = phydev->priv;
+@@ -1501,6 +1518,7 @@ static struct phy_driver dp83640_driver = {
+ 	.flags		= PHY_HAS_INTERRUPT,
+ 	.probe		= dp83640_probe,
+ 	.remove		= dp83640_remove,
++	.soft_reset	= dp83640_soft_reset,
+ 	.config_init	= dp83640_config_init,
+ 	.ack_interrupt  = dp83640_ack_interrupt,
+ 	.config_intr    = dp83640_config_intr,
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 32cf21716f19..145bb7cbf5b2 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -2083,10 +2083,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
+ 
+ 	dev->fc_autoneg = phydev->autoneg;
+ 
+-	phy_start(phydev);
+-
+-	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
+-
+ 	return 0;
+ 
+ error:
+@@ -2523,9 +2519,9 @@ static int lan78xx_open(struct net_device *net)
+ 	if (ret < 0)
+ 		goto done;
+ 
+-	ret = lan78xx_phy_init(dev);
+-	if (ret < 0)
+-		goto done;
++	phy_start(net->phydev);
++
++	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
+ 
+ 	/* for Link Check */
+ 	if (dev->urb_intr) {
+@@ -2586,13 +2582,8 @@ static int lan78xx_stop(struct net_device *net)
+ 	if (timer_pending(&dev->stat_monitor))
+ 		del_timer_sync(&dev->stat_monitor);
+ 
+-	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
+-	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
+-
+-	phy_stop(net->phydev);
+-	phy_disconnect(net->phydev);
+-
+-	net->phydev = NULL;
++	if (net->phydev)
++		phy_stop(net->phydev);
+ 
+ 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
+ 	netif_stop_queue(net);
+@@ -3507,8 +3498,13 @@ static void lan78xx_disconnect(struct usb_interface *intf)
+ 		return;
+ 
+ 	udev = interface_to_usbdev(intf);
+-
+ 	net = dev->net;
++
++	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
++	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
++
++	phy_disconnect(net->phydev);
++
+ 	unregister_netdev(net);
+ 
+ 	cancel_delayed_work_sync(&dev->wq);
+@@ -3664,8 +3660,14 @@ static int lan78xx_probe(struct usb_interface *intf,
+ 	pm_runtime_set_autosuspend_delay(&udev->dev,
+ 					 DEFAULT_AUTOSUSPEND_DELAY);
+ 
++	ret = lan78xx_phy_init(dev);
++	if (ret < 0)
++		goto out4;
++
+ 	return 0;
+ 
++out4:
++	unregister_netdev(netdev);
+ out3:
+ 	lan78xx_unbind(dev, intf);
+ out2:
+@@ -4013,7 +4015,7 @@ static int lan78xx_reset_resume(struct usb_interface *intf)
+ 
+ 	lan78xx_reset(dev);
+ 
+-	lan78xx_phy_init(dev);
++	phy_start(dev->net->phydev);
+ 
+ 	return lan78xx_resume(intf);
+ }
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index aa21b2225679..16b0c7db431b 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -2874,8 +2874,8 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 
+ 	/* Assume link up if device can't report link status,
+ 	   otherwise get link status from config. */
++	netif_carrier_off(dev);
+ 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
+-		netif_carrier_off(dev);
+ 		schedule_work(&vi->config_work);
+ 	} else {
+ 		vi->status = VIRTIO_NET_S_LINK_UP;
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 800a86e2d671..2d7ef7460780 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -7084,10 +7084,20 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
+ {
+ 	struct ath10k *ar = hw->priv;
+ 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
++	struct ath10k_vif *arvif = (void *)vif->drv_priv;
++	struct ath10k_peer *peer;
+ 	u32 bw, smps;
+ 
+ 	spin_lock_bh(&ar->data_lock);
+ 
++	peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
++	if (!peer) {
++		spin_unlock_bh(&ar->data_lock);
++		ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n",
++			    sta->addr, arvif->vdev_id);
++		return;
++	}
++
+ 	ath10k_dbg(ar, ATH10K_DBG_MAC,
+ 		   "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+ 		   sta->addr, changed, sta->bandwidth, sta->rx_nss,
+@@ -7873,6 +7883,7 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
+ 		.max_interfaces = 8,
+ 		.num_different_channels = 1,
+ 		.beacon_int_infra_match = true,
++		.beacon_int_min_gcd = 1,
+ #ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ 		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ 					BIT(NL80211_CHAN_WIDTH_20) |
+@@ -7996,6 +8007,7 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
+ 		.max_interfaces = 16,
+ 		.num_different_channels = 1,
+ 		.beacon_int_infra_match = true,
++		.beacon_int_min_gcd = 1,
+ #ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ 		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ 					BIT(NL80211_CHAN_WIDTH_20) |
+diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
+index 5e77fe1f5b0d..a41bcbda1d9e 100644
+--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
++++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
+@@ -479,14 +479,16 @@ ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv)
+ {
+ 	int i = 0;
+ 	int ret = 0;
++	struct rchan_buf *buf;
+ 	struct rchan *rc = spec_priv->rfs_chan_spec_scan;
+ 
+-	for_each_online_cpu(i)
+-		ret += relay_buf_full(*per_cpu_ptr(rc->buf, i));
+-
+-	i = num_online_cpus();
++	for_each_possible_cpu(i) {
++		if ((buf = *per_cpu_ptr(rc->buf, i))) {
++			ret += relay_buf_full(buf);
++		}
++	}
+ 
+-	if (ret == i)
++	if (ret)
+ 		return 1;
+ 	else
+ 		return 0;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+index 55d1274c6092..fb5745660509 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+@@ -234,13 +234,15 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
+ 	struct iwl_mvm_sta *mvmsta;
+ 	struct iwl_lq_sta_rs_fw *lq_sta;
+ 
++	rcu_read_lock();
++
+ 	notif = (void *)pkt->data;
+ 	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, notif->sta_id);
+ 
+ 	if (!mvmsta) {
+ 		IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
+ 			notif->sta_id);
+-		return;
++		goto out;
+ 	}
+ 
+ 	lq_sta = &mvmsta->lq_sta.rs_fw;
+@@ -251,6 +253,8 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
+ 		IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n",
+ 			       lq_sta->last_rate_n_flags);
+ 	}
++out:
++	rcu_read_unlock();
+ }
+ 
+ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+index d65e1db7c097..70f8b8eb6117 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+@@ -800,12 +800,19 @@ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+ 		.scd_queue = queue,
+ 		.action = SCD_CFG_DISABLE_QUEUE,
+ 	};
+-	bool remove_mac_queue = true;
++	bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
+ 	int ret;
+ 
++	if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
++		return -EINVAL;
++
+ 	if (iwl_mvm_has_new_tx_api(mvm)) {
+ 		spin_lock_bh(&mvm->queue_info_lock);
+-		mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue);
++
++		if (remove_mac_queue)
++			mvm->hw_queue_to_mac80211[queue] &=
++				~BIT(mac80211_queue);
++
+ 		spin_unlock_bh(&mvm->queue_info_lock);
+ 
+ 		iwl_trans_txq_free(mvm->trans, queue);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
+index 205043b470b2..7d4e308ee6a7 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
+@@ -336,6 +336,17 @@ mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ 	int idx = key->keyidx;
+ 	int ret;
+ 
++	/* fall back to sw encryption for unsupported ciphers */
++	switch (key->cipher) {
++	case WLAN_CIPHER_SUITE_WEP40:
++	case WLAN_CIPHER_SUITE_WEP104:
++	case WLAN_CIPHER_SUITE_TKIP:
++	case WLAN_CIPHER_SUITE_CCMP:
++		break;
++	default:
++		return -EOPNOTSUPP;
++	}
++
+ 	/*
+ 	 * The hardware does not support per-STA RX GTK, fall back
+ 	 * to software mode for these.
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
+index 534e4bf9a34c..e46eafc4c436 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x2_tx.c
+@@ -36,9 +36,12 @@ void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+ 
+ 		msta = (struct mt76x2_sta *) control->sta->drv_priv;
+ 		wcid = &msta->wcid;
++		/* sw encrypted frames */
++		if (!info->control.hw_key && wcid->hw_key_idx != -1)
++			control->sta = NULL;
+ 	}
+ 
+-	if (vif || (!info->control.hw_key && wcid->hw_key_idx != -1)) {
++	if (vif && !control->sta) {
+ 		struct mt76x2_vif *mvif;
+ 
+ 		mvif = (struct mt76x2_vif *) vif->drv_priv;
+diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
+index b0cf41195051..96fc3c84d7d2 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
++++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
+@@ -636,11 +636,14 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr,
+ 				    u32 *read_buf, u16 size)
+ {
+ 	u32 addr_on_bus, *data;
+-	u32 align[2] = {};
+ 	u16 ms_addr;
+ 	int status;
+ 
+-	data = PTR_ALIGN(&align[0], 8);
++	data = kzalloc(RSI_MASTER_REG_BUF_SIZE, GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
++
++	data = PTR_ALIGN(data, 8);
+ 
+ 	ms_addr = (addr >> 16);
+ 	status = rsi_sdio_master_access_msword(adapter, ms_addr);
+@@ -648,7 +651,7 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr,
+ 		rsi_dbg(ERR_ZONE,
+ 			"%s: Unable to set ms word to common reg\n",
+ 			__func__);
+-		return status;
++		goto err;
+ 	}
+ 	addr &= 0xFFFF;
+ 
+@@ -666,7 +669,7 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr,
+ 					 (u8 *)data, 4);
+ 	if (status < 0) {
+ 		rsi_dbg(ERR_ZONE, "%s: AHB register read failed\n", __func__);
+-		return status;
++		goto err;
+ 	}
+ 	if (size == 2) {
+ 		if ((addr & 0x3) == 0)
+@@ -688,17 +691,23 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr,
+ 		*read_buf = *data;
+ 	}
+ 
+-	return 0;
++err:
++	kfree(data);
++	return status;
+ }
+ 
+ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter,
+ 				     unsigned long addr,
+ 				     unsigned long data, u16 size)
+ {
+-	unsigned long data1[2], *data_aligned;
++	unsigned long *data_aligned;
+ 	int status;
+ 
+-	data_aligned = PTR_ALIGN(&data1[0], 8);
++	data_aligned = kzalloc(RSI_MASTER_REG_BUF_SIZE, GFP_KERNEL);
++	if (!data_aligned)
++		return -ENOMEM;
++
++	data_aligned = PTR_ALIGN(data_aligned, 8);
+ 
+ 	if (size == 2) {
+ 		*data_aligned = ((data << 16) | (data & 0xFFFF));
+@@ -717,6 +726,7 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter,
+ 		rsi_dbg(ERR_ZONE,
+ 			"%s: Unable to set ms word to common reg\n",
+ 			__func__);
++		kfree(data_aligned);
+ 		return -EIO;
+ 	}
+ 	addr = addr & 0xFFFF;
+@@ -726,12 +736,12 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter,
+ 					(adapter,
+ 					 (addr | RSI_SD_REQUEST_MASTER),
+ 					 (u8 *)data_aligned, size);
+-	if (status < 0) {
++	if (status < 0)
+ 		rsi_dbg(ERR_ZONE,
+ 			"%s: Unable to do AHB reg write\n", __func__);
+-		return status;
+-	}
+-	return 0;
++
++	kfree(data_aligned);
++	return status;
+ }
+ 
+ /**
+diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
+index 49c549ba6682..34242d84bd7b 100644
+--- a/drivers/net/wireless/rsi/rsi_sdio.h
++++ b/drivers/net/wireless/rsi/rsi_sdio.h
+@@ -46,6 +46,8 @@ enum sdio_interrupt_type {
+ #define PKT_BUFF_AVAILABLE                      1
+ #define FW_ASSERT_IND                           2
+ 
++#define RSI_MASTER_REG_BUF_SIZE			12
++
+ #define RSI_DEVICE_BUFFER_STATUS_REGISTER       0xf3
+ #define RSI_FN1_INT_REGISTER                    0xf9
+ #define RSI_INT_ENABLE_REGISTER			0x04
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index f81773570dfd..f5259912f049 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -379,6 +379,15 @@ static void nvme_put_ns(struct nvme_ns *ns)
+ 	kref_put(&ns->kref, nvme_free_ns);
+ }
+ 
++static inline void nvme_clear_nvme_request(struct request *req)
++{
++	if (!(req->rq_flags & RQF_DONTPREP)) {
++		nvme_req(req)->retries = 0;
++		nvme_req(req)->flags = 0;
++		req->rq_flags |= RQF_DONTPREP;
++	}
++}
++
+ struct request *nvme_alloc_request(struct request_queue *q,
+ 		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
+ {
+@@ -395,6 +404,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
+ 		return req;
+ 
+ 	req->cmd_flags |= REQ_FAILFAST_DRIVER;
++	nvme_clear_nvme_request(req);
+ 	nvme_req(req)->cmd = cmd;
+ 
+ 	return req;
+@@ -611,11 +621,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+ {
+ 	blk_status_t ret = BLK_STS_OK;
+ 
+-	if (!(req->rq_flags & RQF_DONTPREP)) {
+-		nvme_req(req)->retries = 0;
+-		nvme_req(req)->flags = 0;
+-		req->rq_flags |= RQF_DONTPREP;
+-	}
++	nvme_clear_nvme_request(req);
+ 
+ 	switch (req_op(req)) {
+ 	case REQ_OP_DRV_IN:
+@@ -745,6 +751,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
+ 		return PTR_ERR(req);
+ 
+ 	req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
++	nvme_req(req)->flags |= NVME_REQ_USERCMD;
+ 
+ 	if (ubuffer && bufflen) {
+ 		ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
+diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
+index 8f0f34d06d46..124c458806df 100644
+--- a/drivers/nvme/host/fabrics.c
++++ b/drivers/nvme/host/fabrics.c
+@@ -536,6 +536,85 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
+ 	return NULL;
+ }
+ 
++blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl, struct request *rq,
++		bool queue_live, bool is_connected)
++{
++	struct nvme_command *cmd = nvme_req(rq)->cmd;
++
++	if (likely(ctrl->state == NVME_CTRL_LIVE && is_connected))
++		return BLK_STS_OK;
++
++	switch (ctrl->state) {
++	case NVME_CTRL_DELETING:
++		goto reject_io;
++
++	case NVME_CTRL_NEW:
++	case NVME_CTRL_CONNECTING:
++		if (!is_connected)
++			/*
++			 * This is the case of starting a new
++			 * association but connectivity was lost
++			 * before it was fully created. We need to
++			 * error the commands used to initialize the
++			 * controller so the reconnect can go into a
++			 * retry attempt. The commands should all be
++			 * marked REQ_FAILFAST_DRIVER, which will hit
++			 * the reject path below. Anything else will
++			 * be queued while the state settles.
++			 */
++			goto reject_or_queue_io;
++
++		if ((queue_live &&
++		     !(nvme_req(rq)->flags & NVME_REQ_USERCMD)) ||
++		    (!queue_live && blk_rq_is_passthrough(rq) &&
++		     cmd->common.opcode == nvme_fabrics_command &&
++		     cmd->fabrics.fctype == nvme_fabrics_type_connect))
++			/*
++			 * If queue is live, allow only commands that
++			 * are internally generated pass through. These
++			 * are commands on the admin queue to initialize
++			 * the controller. This will reject any ioctl
++			 * admin cmds received while initializing.
++			 *
++			 * If the queue is not live, allow only a
++			 * connect command. This will reject any ioctl
++			 * admin cmd as well as initialization commands
++			 * if the controller reverted the queue to non-live.
++			 */
++			return BLK_STS_OK;
++
++		/*
++		 * fall-thru to the reject_or_queue_io clause
++		 */
++		break;
++
++	/* these cases fall-thru
++	 * case NVME_CTRL_LIVE:
++	 * case NVME_CTRL_RESETTING:
++	 */
++	default:
++		break;
++	}
++
++reject_or_queue_io:
++	/*
++	 * Any other new io is something we're not in a state to send
++	 * to the device. Default action is to busy it and retry it
++	 * after the controller state is recovered. However, anything
++	 * marked for failfast or nvme multipath is immediately failed.
++	 * Note: commands used to initialize the controller will be
++	 *  marked for failfast.
++	 * Note: nvme cli/ioctl commands are marked for failfast.
++	 */
++	if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
++		return BLK_STS_RESOURCE;
++
++reject_io:
++	nvme_req(rq)->status = NVME_SC_ABORT_REQ;
++	return BLK_STS_IOERR;
++}
++EXPORT_SYMBOL_GPL(nvmf_check_if_ready);
++
+ static const match_table_t opt_tokens = {
+ 	{ NVMF_OPT_TRANSPORT,		"transport=%s"		},
+ 	{ NVMF_OPT_TRADDR,		"traddr=%s"		},
+@@ -608,8 +687,10 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
+ 			opts->discovery_nqn =
+ 				!(strcmp(opts->subsysnqn,
+ 					 NVME_DISC_SUBSYS_NAME));
+-			if (opts->discovery_nqn)
++			if (opts->discovery_nqn) {
++				opts->kato = 0;
+ 				opts->nr_io_queues = 0;
++			}
+ 			break;
+ 		case NVMF_OPT_TRADDR:
+ 			p = match_strdup(args);
+diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
+index a3145d90c1d2..ef46c915b7b5 100644
+--- a/drivers/nvme/host/fabrics.h
++++ b/drivers/nvme/host/fabrics.h
+@@ -157,36 +157,7 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
+ void nvmf_free_options(struct nvmf_ctrl_options *opts);
+ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
+ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
+-
+-static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
+-		struct request *rq)
+-{
+-	struct nvme_command *cmd = nvme_req(rq)->cmd;
+-
+-	/*
+-	 * We cannot accept any other command until the connect command has
+-	 * completed, so only allow connect to pass.
+-	 */
+-	if (!blk_rq_is_passthrough(rq) ||
+-	    cmd->common.opcode != nvme_fabrics_command ||
+-	    cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+-		/*
+-		 * Connecting state means transport disruption or initial
+-		 * establishment, which can take a long time and even might
+-		 * fail permanently, fail fast to give upper layers a chance
+-		 * to failover.
+-		 * Deleting state means that the ctrl will never accept commands
+-		 * again, fail it permanently.
+-		 */
+-		if (ctrl->state == NVME_CTRL_CONNECTING ||
+-		    ctrl->state == NVME_CTRL_DELETING) {
+-			nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+-			return BLK_STS_IOERR;
+-		}
+-		return BLK_STS_RESOURCE; /* try again later */
+-	}
+-
+-	return BLK_STS_OK;
+-}
++blk_status_t nvmf_check_if_ready(struct nvme_ctrl *ctrl,
++	struct request *rq, bool queue_live, bool is_connected);
+ 
+ #endif /* _NVME_FABRICS_H */
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 1dc1387b7134..6044f891c3ce 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -2191,7 +2191,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
+ 	struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
+ 	struct nvme_command *sqe = &cmdiu->sqe;
+ 	u32 csn;
+-	int ret;
++	int ret, opstate;
+ 
+ 	/*
+ 	 * before attempting to send the io, check to see if we believe
+@@ -2269,6 +2269,9 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
+ 					queue->lldd_handle, &op->fcp_req);
+ 
+ 	if (ret) {
++		opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
++		__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
++
+ 		if (!(op->flags & FCOP_FLAGS_AEN))
+ 			nvme_fc_unmap_data(ctrl, op->rq, op);
+ 
+@@ -2284,14 +2287,6 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
+ 	return BLK_STS_OK;
+ }
+ 
+-static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
+-		struct request *rq)
+-{
+-	if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
+-		return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+-	return BLK_STS_OK;
+-}
+-
+ static blk_status_t
+ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 			const struct blk_mq_queue_data *bd)
+@@ -2307,7 +2302,9 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 	u32 data_len;
+ 	blk_status_t ret;
+ 
+-	ret = nvme_fc_is_ready(queue, rq);
++	ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
++		test_bit(NVME_FC_Q_LIVE, &queue->flags),
++		ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE);
+ 	if (unlikely(ret))
+ 		return ret;
+ 
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 013380641ddf..0133f3d2ce94 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -109,6 +109,7 @@ struct nvme_request {
+ 
+ enum {
+ 	NVME_REQ_CANCELLED		= (1 << 0),
++	NVME_REQ_USERCMD		= (1 << 1),
+ };
+ 
+ static inline struct nvme_request *nvme_req(struct request *req)
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index f6648610d153..dba797b57d73 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2470,10 +2470,13 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
+ 	} else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
+ 		/*
+ 		 * Samsung SSD 960 EVO drops off the PCIe bus after system
+-		 * suspend on a Ryzen board, ASUS PRIME B350M-A.
++		 * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as
++		 * within few minutes after bootup on a Coffee Lake board -
++		 * ASUS PRIME Z370-A
+ 		 */
+ 		if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
+-		    dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
++		    (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") ||
++		     dmi_match(DMI_BOARD_NAME, "PRIME Z370-A")))
+ 			return NVME_QUIRK_NO_APST;
+ 	}
+ 
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 4d84a73ee12d..02dd232951b9 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -1594,17 +1594,6 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
+ 	return BLK_EH_HANDLED;
+ }
+ 
+-/*
+- * We cannot accept any other command until the Connect command has completed.
+- */
+-static inline blk_status_t
+-nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
+-{
+-	if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
+-		return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+-	return BLK_STS_OK;
+-}
+-
+ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 		const struct blk_mq_queue_data *bd)
+ {
+@@ -1620,7 +1609,8 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 
+ 	WARN_ON_ONCE(rq->tag < 0);
+ 
+-	ret = nvme_rdma_is_ready(queue, rq);
++	ret = nvmf_check_if_ready(&queue->ctrl->ctrl, rq,
++		test_bit(NVME_RDMA_Q_LIVE, &queue->flags), true);
+ 	if (unlikely(ret))
+ 		return ret;
+ 
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index 861d1509b22b..e10987f87603 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -149,14 +149,6 @@ nvme_loop_timeout(struct request *rq, bool reserved)
+ 	return BLK_EH_HANDLED;
+ }
+ 
+-static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
+-		struct request *rq)
+-{
+-	if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
+-		return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+-	return BLK_STS_OK;
+-}
+-
+ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 		const struct blk_mq_queue_data *bd)
+ {
+@@ -166,7 +158,8 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
+ 	blk_status_t ret;
+ 
+-	ret = nvme_loop_is_ready(queue, req);
++	ret = nvmf_check_if_ready(&queue->ctrl->ctrl, req,
++		test_bit(NVME_LOOP_Q_LIVE, &queue->flags), true);
+ 	if (unlikely(ret))
+ 		return ret;
+ 
+diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
+index 41b740aed3a3..69bd98421eb1 100644
+--- a/drivers/parisc/lba_pci.c
++++ b/drivers/parisc/lba_pci.c
+@@ -1403,9 +1403,27 @@ lba_hw_init(struct lba_device *d)
+ 		WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
+ 	}
+ 
+-	/* Set HF mode as the default (vs. -1 mode). */
++
++	/*
++	 * Hard Fail vs. Soft Fail on PCI "Master Abort".
++	 *
++	 * "Master Abort" means the MMIO transaction timed out - usually due to
++	 * the device not responding to an MMIO read. We would like HF to be
++	 * enabled to find driver problems, though it means the system will
++	 * crash with a HPMC.
++	 *
++	 * In SoftFail mode "~0L" is returned as a result of a timeout on the
++	 * pci bus. This is like how PCI busses on x86 and most other
++	 * architectures behave.  In order to increase compatibility with
++	 * existing (x86) PCI hardware and existing Linux drivers we enable
++	 * Soft Faul mode on PA-RISC now too.
++	 */
+         stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
++#if defined(ENABLE_HARDFAIL)
+ 	WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
++#else
++	WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
++#endif
+ 
+ 	/*
+ 	** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index eede34e5ada2..98da1e137071 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -1225,11 +1225,14 @@ static int pci_pm_runtime_suspend(struct device *dev)
+ 	int error;
+ 
+ 	/*
+-	 * If pci_dev->driver is not set (unbound), the device should
+-	 * always remain in D0 regardless of the runtime PM status
++	 * If pci_dev->driver is not set (unbound), we leave the device in D0,
++	 * but it may go to D3cold when the bridge above it runtime suspends.
++	 * Save its config space in case that happens.
+ 	 */
+-	if (!pci_dev->driver)
++	if (!pci_dev->driver) {
++		pci_save_state(pci_dev);
+ 		return 0;
++	}
+ 
+ 	if (!pm || !pm->runtime_suspend)
+ 		return -ENOSYS;
+@@ -1277,16 +1280,18 @@ static int pci_pm_runtime_resume(struct device *dev)
+ 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ 
+ 	/*
+-	 * If pci_dev->driver is not set (unbound), the device should
+-	 * always remain in D0 regardless of the runtime PM status
++	 * Restoring config space is necessary even if the device is not bound
++	 * to a driver because although we left it in D0, it may have gone to
++	 * D3cold when the bridge above it runtime suspended.
+ 	 */
++	pci_restore_standard_config(pci_dev);
++
+ 	if (!pci_dev->driver)
+ 		return 0;
+ 
+ 	if (!pm || !pm->runtime_resume)
+ 		return -ENOSYS;
+ 
+-	pci_restore_standard_config(pci_dev);
+ 	pci_fixup_device(pci_fixup_resume_early, pci_dev);
+ 	pci_enable_wake(pci_dev, PCI_D0, false);
+ 	pci_fixup_device(pci_fixup_resume, pci_dev);
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 81241f981ad7..88598dbdc1b0 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -3903,6 +3903,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
+ 			 quirk_dma_func1_alias);
++/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
++			 quirk_dma_func1_alias);
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
+ 			 quirk_dma_func1_alias);
+diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
+index c3b615c94b4b..8c8caec3a72c 100644
+--- a/drivers/pcmcia/cs.c
++++ b/drivers/pcmcia/cs.c
+@@ -452,17 +452,20 @@ static int socket_insert(struct pcmcia_socket *skt)
+ 
+ static int socket_suspend(struct pcmcia_socket *skt)
+ {
+-	if (skt->state & SOCKET_SUSPEND)
++	if ((skt->state & SOCKET_SUSPEND) && !(skt->state & SOCKET_IN_RESUME))
+ 		return -EBUSY;
+ 
+ 	mutex_lock(&skt->ops_mutex);
+-	skt->suspended_state = skt->state;
++	/* store state on first suspend, but not after spurious wakeups */
++	if (!(skt->state & SOCKET_IN_RESUME))
++		skt->suspended_state = skt->state;
+ 
+ 	skt->socket = dead_socket;
+ 	skt->ops->set_socket(skt, &skt->socket);
+ 	if (skt->ops->suspend)
+ 		skt->ops->suspend(skt);
+ 	skt->state |= SOCKET_SUSPEND;
++	skt->state &= ~SOCKET_IN_RESUME;
+ 	mutex_unlock(&skt->ops_mutex);
+ 	return 0;
+ }
+@@ -475,6 +478,7 @@ static int socket_early_resume(struct pcmcia_socket *skt)
+ 	skt->ops->set_socket(skt, &skt->socket);
+ 	if (skt->state & SOCKET_PRESENT)
+ 		skt->resume_status = socket_setup(skt, resume_delay);
++	skt->state |= SOCKET_IN_RESUME;
+ 	mutex_unlock(&skt->ops_mutex);
+ 	return 0;
+ }
+@@ -484,7 +488,7 @@ static int socket_late_resume(struct pcmcia_socket *skt)
+ 	int ret = 0;
+ 
+ 	mutex_lock(&skt->ops_mutex);
+-	skt->state &= ~SOCKET_SUSPEND;
++	skt->state &= ~(SOCKET_SUSPEND | SOCKET_IN_RESUME);
+ 	mutex_unlock(&skt->ops_mutex);
+ 
+ 	if (!(skt->state & SOCKET_PRESENT)) {
+diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
+index 6765beadea95..03ec43802909 100644
+--- a/drivers/pcmcia/cs_internal.h
++++ b/drivers/pcmcia/cs_internal.h
+@@ -70,6 +70,7 @@ struct pccard_resource_ops {
+ /* Flags in socket state */
+ #define SOCKET_PRESENT		0x0008
+ #define SOCKET_INUSE		0x0010
++#define SOCKET_IN_RESUME	0x0040
+ #define SOCKET_SUSPEND		0x0080
+ #define SOCKET_WIN_REQ(i)	(0x0100<<(i))
+ #define SOCKET_CARDBUS		0x8000
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
+index e17f0351ccc2..2526971f9929 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
+@@ -751,8 +751,6 @@ static int qcom_qmp_phy_poweroff(struct phy *phy)
+ 	struct qmp_phy *qphy = phy_get_drvdata(phy);
+ 	struct qcom_qmp *qmp = qphy->qmp;
+ 
+-	clk_disable_unprepare(qphy->pipe_clk);
+-
+ 	regulator_bulk_disable(qmp->cfg->num_vregs, qmp->vregs);
+ 
+ 	return 0;
+@@ -936,6 +934,8 @@ static int qcom_qmp_phy_exit(struct phy *phy)
+ 	const struct qmp_phy_cfg *cfg = qmp->cfg;
+ 	int i = cfg->num_clks;
+ 
++	clk_disable_unprepare(qphy->pipe_clk);
++
+ 	/* PHY reset */
+ 	qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ 
+diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
+index f1b24f18e9b2..b0d10934413f 100644
+--- a/drivers/phy/rockchip/phy-rockchip-emmc.c
++++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
+@@ -76,6 +76,10 @@
+ #define PHYCTRL_OTAPDLYSEL_MASK		0xf
+ #define PHYCTRL_OTAPDLYSEL_SHIFT	0x7
+ 
++#define PHYCTRL_IS_CALDONE(x) \
++	((((x) >> PHYCTRL_CALDONE_SHIFT) & \
++	  PHYCTRL_CALDONE_MASK) == PHYCTRL_CALDONE_DONE)
++
+ struct rockchip_emmc_phy {
+ 	unsigned int	reg_offset;
+ 	struct regmap	*reg_base;
+@@ -90,6 +94,7 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off)
+ 	unsigned int freqsel = PHYCTRL_FREQSEL_200M;
+ 	unsigned long rate;
+ 	unsigned long timeout;
++	int ret;
+ 
+ 	/*
+ 	 * Keep phyctrl_pdb and phyctrl_endll low to allow
+@@ -160,17 +165,19 @@ static int rockchip_emmc_phy_power(struct phy *phy, bool on_off)
+ 				   PHYCTRL_PDB_SHIFT));
+ 
+ 	/*
+-	 * According to the user manual, it asks driver to
+-	 * wait 5us for calpad busy trimming
++	 * According to the user manual, it asks driver to wait 5us for
++	 * calpad busy trimming. However it is documented that this value is
++	 * PVT(A.K.A process,voltage and temperature) relevant, so some
++	 * failure cases are found which indicates we should be more tolerant
++	 * to calpad busy trimming.
+ 	 */
+-	udelay(5);
+-	regmap_read(rk_phy->reg_base,
+-		    rk_phy->reg_offset + GRF_EMMCPHY_STATUS,
+-		    &caldone);
+-	caldone = (caldone >> PHYCTRL_CALDONE_SHIFT) & PHYCTRL_CALDONE_MASK;
+-	if (caldone != PHYCTRL_CALDONE_DONE) {
+-		pr_err("rockchip_emmc_phy_power: caldone timeout.\n");
+-		return -ETIMEDOUT;
++	ret = regmap_read_poll_timeout(rk_phy->reg_base,
++				       rk_phy->reg_offset + GRF_EMMCPHY_STATUS,
++				       caldone, PHYCTRL_IS_CALDONE(caldone),
++				       0, 50);
++	if (ret) {
++		pr_err("%s: caldone failed, ret=%d\n", __func__, ret);
++		return ret;
+ 	}
+ 
+ 	/* Set the frequency of the DLL operation */
+diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
+index 1ff6c3573493..b601039d6c69 100644
+--- a/drivers/pinctrl/devicetree.c
++++ b/drivers/pinctrl/devicetree.c
+@@ -122,8 +122,10 @@ static int dt_to_map_one_config(struct pinctrl *p,
+ 			/* OK let's just assume this will appear later then */
+ 			return -EPROBE_DEFER;
+ 		}
+-		if (!pctldev)
+-			pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
++		/* If we're creating a hog we can use the passed pctldev */
++		if (pctldev && (np_pctldev == p->dev->of_node))
++			break;
++		pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
+ 		if (pctldev)
+ 			break;
+ 		/* Do not defer probing of hogs (circular loop) */
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
+index 644c5beb05cb..e86d23279ac1 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
+@@ -771,6 +771,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
+ {
+ 	int status, ret;
+ 	bool mirror = false;
++	struct regmap_config *one_regmap_config = NULL;
+ 
+ 	mutex_init(&mcp->lock);
+ 
+@@ -791,22 +792,36 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
+ 	switch (type) {
+ #ifdef CONFIG_SPI_MASTER
+ 	case MCP_TYPE_S08:
+-		mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
+-					       &mcp23x08_regmap);
+-		mcp->reg_shift = 0;
+-		mcp->chip.ngpio = 8;
+-		mcp->chip.label = "mcp23s08";
+-		break;
+-
+ 	case MCP_TYPE_S17:
++		switch (type) {
++		case MCP_TYPE_S08:
++			one_regmap_config =
++				devm_kmemdup(dev, &mcp23x08_regmap,
++					sizeof(struct regmap_config), GFP_KERNEL);
++			mcp->reg_shift = 0;
++			mcp->chip.ngpio = 8;
++			mcp->chip.label = "mcp23s08";
++			break;
++		case MCP_TYPE_S17:
++			one_regmap_config =
++				devm_kmemdup(dev, &mcp23x17_regmap,
++					sizeof(struct regmap_config), GFP_KERNEL);
++			mcp->reg_shift = 1;
++			mcp->chip.ngpio = 16;
++			mcp->chip.label = "mcp23s17";
++			break;
++		}
++		if (!one_regmap_config)
++			return -ENOMEM;
++
++		one_regmap_config->name = devm_kasprintf(dev, GFP_KERNEL, "%d", (addr & ~0x40) >> 1);
+ 		mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
+-					       &mcp23x17_regmap);
+-		mcp->reg_shift = 1;
+-		mcp->chip.ngpio = 16;
+-		mcp->chip.label = "mcp23s17";
++					       one_regmap_config);
+ 		break;
+ 
+ 	case MCP_TYPE_S18:
++		if (!one_regmap_config)
++			return -ENOMEM;
+ 		mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
+ 					       &mcp23x17_regmap);
+ 		mcp->reg_shift = 1;
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
+index 495432f3341b..95e5c5ea40af 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
+@@ -818,7 +818,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
+ 		return -EINVAL;
+ 
+ 	chip = &pctrl->chip;
+-	chip->base = 0;
++	chip->base = -1;
+ 	chip->ngpio = ngpio;
+ 	chip->label = dev_name(pctrl->dev);
+ 	chip->parent = pctrl->dev;
+diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+index e5807d1ce0dc..74ee48303156 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
++++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+@@ -1,7 +1,7 @@
+ /*
+  * R8A7796 processor support - PFC hardware block.
+  *
+- * Copyright (C) 2016 Renesas Electronics Corp.
++ * Copyright (C) 2016-2017 Renesas Electronics Corp.
+  *
+  * This file is based on the drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+  *
+@@ -477,7 +477,7 @@ FM(IP16_31_28)	IP16_31_28	FM(IP17_31_28)	IP17_31_28
+ #define MOD_SEL1_26		FM(SEL_TIMER_TMU_0)	FM(SEL_TIMER_TMU_1)
+ #define MOD_SEL1_25_24		FM(SEL_SSP1_1_0)	FM(SEL_SSP1_1_1)	FM(SEL_SSP1_1_2)	FM(SEL_SSP1_1_3)
+ #define MOD_SEL1_23_22_21	FM(SEL_SSP1_0_0)	FM(SEL_SSP1_0_1)	FM(SEL_SSP1_0_2)	FM(SEL_SSP1_0_3)	FM(SEL_SSP1_0_4)	F_(0, 0)		F_(0, 0)		F_(0, 0)
+-#define MOD_SEL1_20		FM(SEL_SSI_0)		FM(SEL_SSI_1)
++#define MOD_SEL1_20		FM(SEL_SSI1_0)		FM(SEL_SSI1_1)
+ #define MOD_SEL1_19		FM(SEL_SPEED_PULSE_0)	FM(SEL_SPEED_PULSE_1)
+ #define MOD_SEL1_18_17		FM(SEL_SIMCARD_0)	FM(SEL_SIMCARD_1)	FM(SEL_SIMCARD_2)	FM(SEL_SIMCARD_3)
+ #define MOD_SEL1_16		FM(SEL_SDHI2_0)		FM(SEL_SDHI2_1)
+@@ -1218,7 +1218,7 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_GPSR(IP13_11_8,	HSCK0),
+ 	PINMUX_IPSR_MSEL(IP13_11_8,	MSIOF1_SCK_D,		SEL_MSIOF1_3),
+ 	PINMUX_IPSR_MSEL(IP13_11_8,	AUDIO_CLKB_A,		SEL_ADG_B_0),
+-	PINMUX_IPSR_MSEL(IP13_11_8,	SSI_SDATA1_B,		SEL_SSI_1),
++	PINMUX_IPSR_MSEL(IP13_11_8,	SSI_SDATA1_B,		SEL_SSI1_1),
+ 	PINMUX_IPSR_MSEL(IP13_11_8,	TS_SCK0_D,		SEL_TSIF0_3),
+ 	PINMUX_IPSR_MSEL(IP13_11_8,	STP_ISCLK_0_D,		SEL_SSP1_0_3),
+ 	PINMUX_IPSR_MSEL(IP13_11_8,	RIF0_CLK_C,		SEL_DRIF0_2),
+@@ -1226,14 +1226,14 @@ static const u16 pinmux_data[] = {
+ 
+ 	PINMUX_IPSR_GPSR(IP13_15_12,	HRX0),
+ 	PINMUX_IPSR_MSEL(IP13_15_12,	MSIOF1_RXD_D,		SEL_MSIOF1_3),
+-	PINMUX_IPSR_MSEL(IP13_15_12,	SSI_SDATA2_B,		SEL_SSI_1),
++	PINMUX_IPSR_MSEL(IP13_15_12,	SSI_SDATA2_B,		SEL_SSI2_1),
+ 	PINMUX_IPSR_MSEL(IP13_15_12,	TS_SDEN0_D,		SEL_TSIF0_3),
+ 	PINMUX_IPSR_MSEL(IP13_15_12,	STP_ISEN_0_D,		SEL_SSP1_0_3),
+ 	PINMUX_IPSR_MSEL(IP13_15_12,	RIF0_D0_C,		SEL_DRIF0_2),
+ 
+ 	PINMUX_IPSR_GPSR(IP13_19_16,	HTX0),
+ 	PINMUX_IPSR_MSEL(IP13_19_16,	MSIOF1_TXD_D,		SEL_MSIOF1_3),
+-	PINMUX_IPSR_MSEL(IP13_19_16,	SSI_SDATA9_B,		SEL_SSI_1),
++	PINMUX_IPSR_MSEL(IP13_19_16,	SSI_SDATA9_B,		SEL_SSI9_1),
+ 	PINMUX_IPSR_MSEL(IP13_19_16,	TS_SDAT0_D,		SEL_TSIF0_3),
+ 	PINMUX_IPSR_MSEL(IP13_19_16,	STP_ISD_0_D,		SEL_SSP1_0_3),
+ 	PINMUX_IPSR_MSEL(IP13_19_16,	RIF0_D1_C,		SEL_DRIF0_2),
+@@ -1241,7 +1241,7 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_GPSR(IP13_23_20,	HCTS0_N),
+ 	PINMUX_IPSR_MSEL(IP13_23_20,	RX2_B,			SEL_SCIF2_1),
+ 	PINMUX_IPSR_MSEL(IP13_23_20,	MSIOF1_SYNC_D,		SEL_MSIOF1_3),
+-	PINMUX_IPSR_MSEL(IP13_23_20,	SSI_SCK9_A,		SEL_SSI_0),
++	PINMUX_IPSR_MSEL(IP13_23_20,	SSI_SCK9_A,		SEL_SSI9_0),
+ 	PINMUX_IPSR_MSEL(IP13_23_20,	TS_SPSYNC0_D,		SEL_TSIF0_3),
+ 	PINMUX_IPSR_MSEL(IP13_23_20,	STP_ISSYNC_0_D,		SEL_SSP1_0_3),
+ 	PINMUX_IPSR_MSEL(IP13_23_20,	RIF0_SYNC_C,		SEL_DRIF0_2),
+@@ -1250,7 +1250,7 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_GPSR(IP13_27_24,	HRTS0_N),
+ 	PINMUX_IPSR_MSEL(IP13_27_24,	TX2_B,			SEL_SCIF2_1),
+ 	PINMUX_IPSR_MSEL(IP13_27_24,	MSIOF1_SS1_D,		SEL_MSIOF1_3),
+-	PINMUX_IPSR_MSEL(IP13_27_24,	SSI_WS9_A,		SEL_SSI_0),
++	PINMUX_IPSR_MSEL(IP13_27_24,	SSI_WS9_A,		SEL_SSI9_0),
+ 	PINMUX_IPSR_MSEL(IP13_27_24,	STP_IVCXO27_0_D,	SEL_SSP1_0_3),
+ 	PINMUX_IPSR_MSEL(IP13_27_24,	BPFCLK_A,		SEL_FM_0),
+ 	PINMUX_IPSR_GPSR(IP13_27_24,	AUDIO_CLKOUT2_A),
+@@ -1265,7 +1265,7 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_MSEL(IP14_3_0,	RX5_A,			SEL_SCIF5_0),
+ 	PINMUX_IPSR_MSEL(IP14_3_0,	NFWP_N_A,		SEL_NDF_0),
+ 	PINMUX_IPSR_MSEL(IP14_3_0,	AUDIO_CLKA_C,		SEL_ADG_A_2),
+-	PINMUX_IPSR_MSEL(IP14_3_0,	SSI_SCK2_A,		SEL_SSI_0),
++	PINMUX_IPSR_MSEL(IP14_3_0,	SSI_SCK2_A,		SEL_SSI2_0),
+ 	PINMUX_IPSR_MSEL(IP14_3_0,	STP_IVCXO27_0_C,	SEL_SSP1_0_2),
+ 	PINMUX_IPSR_GPSR(IP14_3_0,	AUDIO_CLKOUT3_A),
+ 	PINMUX_IPSR_MSEL(IP14_3_0,	TCLK1_B,		SEL_TIMER_TMU_1),
+@@ -1274,7 +1274,7 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_MSEL(IP14_7_4,	TX5_A,			SEL_SCIF5_0),
+ 	PINMUX_IPSR_MSEL(IP14_7_4,	MSIOF1_SS2_D,		SEL_MSIOF1_3),
+ 	PINMUX_IPSR_MSEL(IP14_7_4,	AUDIO_CLKC_A,		SEL_ADG_C_0),
+-	PINMUX_IPSR_MSEL(IP14_7_4,	SSI_WS2_A,		SEL_SSI_0),
++	PINMUX_IPSR_MSEL(IP14_7_4,	SSI_WS2_A,		SEL_SSI2_0),
+ 	PINMUX_IPSR_MSEL(IP14_7_4,	STP_OPWM_0_D,		SEL_SSP1_0_3),
+ 	PINMUX_IPSR_GPSR(IP14_7_4,	AUDIO_CLKOUT_D),
+ 	PINMUX_IPSR_MSEL(IP14_7_4,	SPEEDIN_B,		SEL_SPEED_PULSE_1),
+@@ -1302,10 +1302,10 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_MSEL(IP14_31_28,	MSIOF1_SS2_F,		SEL_MSIOF1_5),
+ 
+ 	/* IPSR15 */
+-	PINMUX_IPSR_MSEL(IP15_3_0,	SSI_SDATA1_A,		SEL_SSI_0),
++	PINMUX_IPSR_MSEL(IP15_3_0,	SSI_SDATA1_A,		SEL_SSI1_0),
+ 
+-	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SDATA2_A,		SEL_SSI_0),
+-	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SCK1_B,		SEL_SSI_1),
++	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SDATA2_A,		SEL_SSI2_0),
++	PINMUX_IPSR_MSEL(IP15_7_4,	SSI_SCK1_B,		SEL_SSI1_1),
+ 
+ 	PINMUX_IPSR_GPSR(IP15_11_8,	SSI_SCK349),
+ 	PINMUX_IPSR_MSEL(IP15_11_8,	MSIOF1_SS1_A,		SEL_MSIOF1_0),
+@@ -1391,11 +1391,11 @@ static const u16 pinmux_data[] = {
+ 	PINMUX_IPSR_MSEL(IP16_27_24,	RIF1_D1_A,		SEL_DRIF1_0),
+ 	PINMUX_IPSR_MSEL(IP16_27_24,	RIF3_D1_A,		SEL_DRIF3_0),
+ 
+-	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_SDATA9_A,		SEL_SSI_0),
++	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_SDATA9_A,		SEL_SSI9_0),
+ 	PINMUX_IPSR_MSEL(IP16_31_28,	HSCK2_B,		SEL_HSCIF2_1),
+ 	PINMUX_IPSR_MSEL(IP16_31_28,	MSIOF1_SS1_C,		SEL_MSIOF1_2),
+ 	PINMUX_IPSR_MSEL(IP16_31_28,	HSCK1_A,		SEL_HSCIF1_0),
+-	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_WS1_B,		SEL_SSI_1),
++	PINMUX_IPSR_MSEL(IP16_31_28,	SSI_WS1_B,		SEL_SSI1_1),
+ 	PINMUX_IPSR_GPSR(IP16_31_28,	SCK1),
+ 	PINMUX_IPSR_MSEL(IP16_31_28,	STP_IVCXO27_1_A,	SEL_SSP1_1_0),
+ 	PINMUX_IPSR_MSEL(IP16_31_28,	SCK5_A,			SEL_SCIF5_0),
+@@ -1427,7 +1427,7 @@ static const u16 pinmux_data[] = {
+ 
+ 	PINMUX_IPSR_GPSR(IP17_19_16,	USB1_PWEN),
+ 	PINMUX_IPSR_MSEL(IP17_19_16,	SIM0_CLK_C,		SEL_SIMCARD_2),
+-	PINMUX_IPSR_MSEL(IP17_19_16,	SSI_SCK1_A,		SEL_SSI_0),
++	PINMUX_IPSR_MSEL(IP17_19_16,	SSI_SCK1_A,		SEL_SSI1_0),
+ 	PINMUX_IPSR_MSEL(IP17_19_16,	TS_SCK0_E,		SEL_TSIF0_4),
+ 	PINMUX_IPSR_MSEL(IP17_19_16,	STP_ISCLK_0_E,		SEL_SSP1_0_4),
+ 	PINMUX_IPSR_MSEL(IP17_19_16,	FMCLK_B,		SEL_FM_1),
+@@ -1437,7 +1437,7 @@ static const u16 pinmux_data[] = {
+ 
+ 	PINMUX_IPSR_GPSR(IP17_23_20,	USB1_OVC),
+ 	PINMUX_IPSR_MSEL(IP17_23_20,	MSIOF1_SS2_C,		SEL_MSIOF1_2),
+-	PINMUX_IPSR_MSEL(IP17_23_20,	SSI_WS1_A,		SEL_SSI_0),
++	PINMUX_IPSR_MSEL(IP17_23_20,	SSI_WS1_A,		SEL_SSI1_0),
+ 	PINMUX_IPSR_MSEL(IP17_23_20,	TS_SDAT0_E,		SEL_TSIF0_4),
+ 	PINMUX_IPSR_MSEL(IP17_23_20,	STP_ISD_0_E,		SEL_SSP1_0_4),
+ 	PINMUX_IPSR_MSEL(IP17_23_20,	FMIN_B,			SEL_FM_1),
+@@ -1447,7 +1447,7 @@ static const u16 pinmux_data[] = {
+ 
+ 	PINMUX_IPSR_GPSR(IP17_27_24,	USB30_PWEN),
+ 	PINMUX_IPSR_GPSR(IP17_27_24,	AUDIO_CLKOUT_B),
+-	PINMUX_IPSR_MSEL(IP17_27_24,	SSI_SCK2_B,		SEL_SSI_1),
++	PINMUX_IPSR_MSEL(IP17_27_24,	SSI_SCK2_B,		SEL_SSI2_1),
+ 	PINMUX_IPSR_MSEL(IP17_27_24,	TS_SDEN1_D,		SEL_TSIF1_3),
+ 	PINMUX_IPSR_MSEL(IP17_27_24,	STP_ISEN_1_D,		SEL_SSP1_1_3),
+ 	PINMUX_IPSR_MSEL(IP17_27_24,	STP_OPWM_0_E,		SEL_SSP1_0_4),
+@@ -1459,7 +1459,7 @@ static const u16 pinmux_data[] = {
+ 
+ 	PINMUX_IPSR_GPSR(IP17_31_28,	USB30_OVC),
+ 	PINMUX_IPSR_GPSR(IP17_31_28,	AUDIO_CLKOUT1_B),
+-	PINMUX_IPSR_MSEL(IP17_31_28,	SSI_WS2_B,		SEL_SSI_1),
++	PINMUX_IPSR_MSEL(IP17_31_28,	SSI_WS2_B,		SEL_SSI2_1),
+ 	PINMUX_IPSR_MSEL(IP17_31_28,	TS_SPSYNC1_D,		SEL_TSIF1_3),
+ 	PINMUX_IPSR_MSEL(IP17_31_28,	STP_ISSYNC_1_D,		SEL_SSP1_1_3),
+ 	PINMUX_IPSR_MSEL(IP17_31_28,	STP_IVCXO27_0_E,	SEL_SSP1_0_4),
+@@ -1470,7 +1470,7 @@ static const u16 pinmux_data[] = {
+ 	/* IPSR18 */
+ 	PINMUX_IPSR_GPSR(IP18_3_0,	GP6_30),
+ 	PINMUX_IPSR_GPSR(IP18_3_0,	AUDIO_CLKOUT2_B),
+-	PINMUX_IPSR_MSEL(IP18_3_0,	SSI_SCK9_B,		SEL_SSI_1),
++	PINMUX_IPSR_MSEL(IP18_3_0,	SSI_SCK9_B,		SEL_SSI9_1),
+ 	PINMUX_IPSR_MSEL(IP18_3_0,	TS_SDEN0_E,		SEL_TSIF0_4),
+ 	PINMUX_IPSR_MSEL(IP18_3_0,	STP_ISEN_0_E,		SEL_SSP1_0_4),
+ 	PINMUX_IPSR_MSEL(IP18_3_0,	RIF2_D0_B,		SEL_DRIF2_1),
+@@ -1480,7 +1480,7 @@ static const u16 pinmux_data[] = {
+ 
+ 	PINMUX_IPSR_GPSR(IP18_7_4,	GP6_31),
+ 	PINMUX_IPSR_GPSR(IP18_7_4,	AUDIO_CLKOUT3_B),
+-	PINMUX_IPSR_MSEL(IP18_7_4,	SSI_WS9_B,		SEL_SSI_1),
++	PINMUX_IPSR_MSEL(IP18_7_4,	SSI_WS9_B,		SEL_SSI9_1),
+ 	PINMUX_IPSR_MSEL(IP18_7_4,	TS_SPSYNC0_E,		SEL_TSIF0_4),
+ 	PINMUX_IPSR_MSEL(IP18_7_4,	STP_ISSYNC_0_E,		SEL_SSP1_0_4),
+ 	PINMUX_IPSR_MSEL(IP18_7_4,	RIF2_D1_B,		SEL_DRIF2_1),
+diff --git a/drivers/platform/x86/dell-smbios-base.c b/drivers/platform/x86/dell-smbios-base.c
+index 2485c80a9fdd..33fb2a20458a 100644
+--- a/drivers/platform/x86/dell-smbios-base.c
++++ b/drivers/platform/x86/dell-smbios-base.c
+@@ -514,7 +514,7 @@ static int build_tokens_sysfs(struct platform_device *dev)
+ 		continue;
+ 
+ loop_fail_create_value:
+-		kfree(value_name);
++		kfree(location_name);
+ 		goto out_unwind_strings;
+ 	}
+ 	smbios_attribute_group.attrs = token_attrs;
+@@ -525,7 +525,7 @@ static int build_tokens_sysfs(struct platform_device *dev)
+ 	return 0;
+ 
+ out_unwind_strings:
+-	for (i = i-1; i > 0; i--) {
++	while (i--) {
+ 		kfree(token_location_attrs[i].attr.name);
+ 		kfree(token_value_attrs[i].attr.name);
+ 	}
+diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
+index 4cfa3f0cd689..cc7c516bb417 100644
+--- a/drivers/power/supply/ltc2941-battery-gauge.c
++++ b/drivers/power/supply/ltc2941-battery-gauge.c
+@@ -317,15 +317,15 @@ static int ltc294x_get_temperature(const struct ltc294x_info *info, int *val)
+ 
+ 	if (info->id == LTC2942_ID) {
+ 		reg = LTC2942_REG_TEMPERATURE_MSB;
+-		value = 60000;	/* Full-scale is 600 Kelvin */
++		value = 6000;	/* Full-scale is 600 Kelvin */
+ 	} else {
+ 		reg = LTC2943_REG_TEMPERATURE_MSB;
+-		value = 51000;	/* Full-scale is 510 Kelvin */
++		value = 5100;	/* Full-scale is 510 Kelvin */
+ 	}
+ 	ret = ltc294x_read_regs(info->client, reg, &datar[0], 2);
+ 	value *= (datar[0] << 8) | datar[1];
+-	/* Convert to centidegrees  */
+-	*val = value / 0xFFFF - 27215;
++	/* Convert to tenths of degree Celsius */
++	*val = value / 0xFFFF - 2722;
+ 	return ret;
+ }
+ 
+diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
+index 35dde81b1c9b..1a568df383db 100644
+--- a/drivers/power/supply/max17042_battery.c
++++ b/drivers/power/supply/max17042_battery.c
+@@ -1053,6 +1053,7 @@ static int max17042_probe(struct i2c_client *client,
+ 
+ 	i2c_set_clientdata(client, chip);
+ 	psy_cfg.drv_data = chip;
++	psy_cfg.of_node = dev->of_node;
+ 
+ 	/* When current is not measured,
+ 	 * CURRENT_NOW and CURRENT_AVG properties should be invisible. */
+diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
+index 0fce06acfaec..a2eb50719c7b 100644
+--- a/drivers/regulator/gpio-regulator.c
++++ b/drivers/regulator/gpio-regulator.c
+@@ -271,8 +271,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
+ 	drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
+ 	if (drvdata->desc.name == NULL) {
+ 		dev_err(&pdev->dev, "Failed to allocate supply name\n");
+-		ret = -ENOMEM;
+-		goto err;
++		return -ENOMEM;
+ 	}
+ 
+ 	if (config->nr_gpios != 0) {
+@@ -292,7 +291,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
+ 				dev_err(&pdev->dev,
+ 					"Could not obtain regulator setting GPIOs: %d\n",
+ 					ret);
+-			goto err_memstate;
++			goto err_memgpio;
+ 		}
+ 	}
+ 
+@@ -303,7 +302,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
+ 	if (drvdata->states == NULL) {
+ 		dev_err(&pdev->dev, "Failed to allocate state data\n");
+ 		ret = -ENOMEM;
+-		goto err_memgpio;
++		goto err_stategpio;
+ 	}
+ 	drvdata->nr_states = config->nr_states;
+ 
+@@ -324,7 +323,7 @@ static int gpio_regulator_probe(struct platform_device *pdev)
+ 	default:
+ 		dev_err(&pdev->dev, "No regulator type set\n");
+ 		ret = -EINVAL;
+-		goto err_memgpio;
++		goto err_memstate;
+ 	}
+ 
+ 	/* build initial state from gpio init data. */
+@@ -361,22 +360,21 @@ static int gpio_regulator_probe(struct platform_device *pdev)
+ 	if (IS_ERR(drvdata->dev)) {
+ 		ret = PTR_ERR(drvdata->dev);
+ 		dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
+-		goto err_stategpio;
++		goto err_memstate;
+ 	}
+ 
+ 	platform_set_drvdata(pdev, drvdata);
+ 
+ 	return 0;
+ 
+-err_stategpio:
+-	gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
+ err_memstate:
+ 	kfree(drvdata->states);
++err_stategpio:
++	gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
+ err_memgpio:
+ 	kfree(drvdata->gpios);
+ err_name:
+ 	kfree(drvdata->desc.name);
+-err:
+ 	return ret;
+ }
+ 
+diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
+index 092ed6efb3ec..f47264fa1940 100644
+--- a/drivers/regulator/of_regulator.c
++++ b/drivers/regulator/of_regulator.c
+@@ -321,6 +321,7 @@ int of_regulator_match(struct device *dev, struct device_node *node,
+ 				dev_err(dev,
+ 					"failed to parse DT for regulator %s\n",
+ 					child->name);
++				of_node_put(child);
+ 				return -EINVAL;
+ 			}
+ 			match->of_node = of_node_get(child);
+diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
+index 633268e9d550..05bcbce2013a 100644
+--- a/drivers/remoteproc/imx_rproc.c
++++ b/drivers/remoteproc/imx_rproc.c
+@@ -339,8 +339,10 @@ static int imx_rproc_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	dcfg = of_device_get_match_data(dev);
+-	if (!dcfg)
+-		return -EINVAL;
++	if (!dcfg) {
++		ret = -EINVAL;
++		goto err_put_rproc;
++	}
+ 
+ 	priv = rproc->priv;
+ 	priv->rproc = rproc;
+diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
+index e96b85579f21..3c800642134e 100644
+--- a/drivers/s390/cio/vfio_ccw_fsm.c
++++ b/drivers/s390/cio/vfio_ccw_fsm.c
+@@ -129,6 +129,11 @@ static void fsm_io_request(struct vfio_ccw_private *private,
+ 	if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
+ 		orb = (union orb *)io_region->orb_area;
+ 
++		/* Don't try to build a cp if transport mode is specified. */
++		if (orb->tm.b) {
++			io_region->ret_code = -EOPNOTSUPP;
++			goto err_out;
++		}
+ 		io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
+ 					      orb);
+ 		if (io_region->ret_code)
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index 9be34d37c356..3f3cb72e0c0c 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -525,6 +525,8 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode)
+ 	struct scsi_cd *cd;
+ 	int ret = -ENXIO;
+ 
++	check_disk_change(bdev);
++
+ 	mutex_lock(&sr_mutex);
+ 	cd = scsi_cd_get(bdev->bd_disk);
+ 	if (cd) {
+@@ -585,18 +587,28 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+ static unsigned int sr_block_check_events(struct gendisk *disk,
+ 					  unsigned int clearing)
+ {
+-	struct scsi_cd *cd = scsi_cd(disk);
++	unsigned int ret = 0;
++	struct scsi_cd *cd;
+ 
+-	if (atomic_read(&cd->device->disk_events_disable_depth))
++	cd = scsi_cd_get(disk);
++	if (!cd)
+ 		return 0;
+ 
+-	return cdrom_check_events(&cd->cdi, clearing);
++	if (!atomic_read(&cd->device->disk_events_disable_depth))
++		ret = cdrom_check_events(&cd->cdi, clearing);
++
++	scsi_cd_put(cd);
++	return ret;
+ }
+ 
+ static int sr_block_revalidate_disk(struct gendisk *disk)
+ {
+-	struct scsi_cd *cd = scsi_cd(disk);
+ 	struct scsi_sense_hdr sshdr;
++	struct scsi_cd *cd;
++
++	cd = scsi_cd_get(disk);
++	if (!cd)
++		return -ENXIO;
+ 
+ 	/* if the unit is not ready, nothing more to do */
+ 	if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))
+@@ -605,6 +617,7 @@ static int sr_block_revalidate_disk(struct gendisk *disk)
+ 	sr_cd_check(&cd->cdi);
+ 	get_sectorsize(cd);
+ out:
++	scsi_cd_put(cd);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
+index 2a21f2d48592..35fab1e18adc 100644
+--- a/drivers/scsi/sr_ioctl.c
++++ b/drivers/scsi/sr_ioctl.c
+@@ -188,9 +188,13 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
+ 	struct scsi_device *SDev;
+ 	struct scsi_sense_hdr sshdr;
+ 	int result, err = 0, retries = 0;
++	unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL;
+ 
+ 	SDev = cd->device;
+ 
++	if (cgc->sense)
++		senseptr = sense_buffer;
++
+       retry:
+ 	if (!scsi_block_when_processing_errors(SDev)) {
+ 		err = -ENODEV;
+@@ -198,10 +202,12 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
+ 	}
+ 
+ 	result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
+-			      cgc->buffer, cgc->buflen,
+-			      (unsigned char *)cgc->sense, &sshdr,
++			      cgc->buffer, cgc->buflen, senseptr, &sshdr,
+ 			      cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
+ 
++	if (cgc->sense)
++		memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense));
++
+ 	/* Minimal error checking.  Ignore cases we know about, and report the rest. */
+ 	if (driver_byte(result) != 0) {
+ 		switch (sshdr.sense_key) {
+diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
+index 2bdeebc48901..2625ef06c10e 100644
+--- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
++++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
+@@ -224,7 +224,11 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
+ 
+ static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev)
+ {
+-	meson_gx_pwrc_vpu_power_off(&vpu_hdmi_pd.genpd);
++	bool powered_off;
++
++	powered_off = meson_gx_pwrc_vpu_get_power(&vpu_hdmi_pd);
++	if (!powered_off)
++		meson_gx_pwrc_vpu_power_off(&vpu_hdmi_pd.genpd);
+ }
+ 
+ static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = {
+diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
+index d008e5b82db4..df3ccb30bc2d 100644
+--- a/drivers/soc/qcom/wcnss_ctrl.c
++++ b/drivers/soc/qcom/wcnss_ctrl.c
+@@ -249,7 +249,7 @@ static int wcnss_download_nv(struct wcnss_ctrl *wcnss, bool *expect_cbc)
+ 		/* Increment for next fragment */
+ 		req->seq++;
+ 
+-		data += req->hdr.len;
++		data += NV_FRAGMENT_SIZE;
+ 		left -= NV_FRAGMENT_SIZE;
+ 	} while (left > 0);
+ 
+diff --git a/drivers/soc/renesas/r8a77970-sysc.c b/drivers/soc/renesas/r8a77970-sysc.c
+index 8c614164718e..caf894f193ed 100644
+--- a/drivers/soc/renesas/r8a77970-sysc.c
++++ b/drivers/soc/renesas/r8a77970-sysc.c
+@@ -25,12 +25,12 @@ static const struct rcar_sysc_area r8a77970_areas[] __initconst = {
+ 	  PD_CPU_NOCR },
+ 	{ "cr7",	0x240, 0, R8A77970_PD_CR7,	R8A77970_PD_ALWAYS_ON },
+ 	{ "a3ir",	0x180, 0, R8A77970_PD_A3IR,	R8A77970_PD_ALWAYS_ON },
+-	{ "a2ir0",	0x400, 0, R8A77970_PD_A2IR0,	R8A77970_PD_ALWAYS_ON },
+-	{ "a2ir1",	0x400, 1, R8A77970_PD_A2IR1,	R8A77970_PD_A2IR0 },
+-	{ "a2ir2",	0x400, 2, R8A77970_PD_A2IR2,	R8A77970_PD_A2IR0 },
+-	{ "a2ir3",	0x400, 3, R8A77970_PD_A2IR3,	R8A77970_PD_A2IR0 },
+-	{ "a2sc0",	0x400, 4, R8A77970_PD_A2SC0,	R8A77970_PD_ALWAYS_ON },
+-	{ "a2sc1",	0x400, 5, R8A77970_PD_A2SC1,	R8A77970_PD_A2SC0 },
++	{ "a2ir0",	0x400, 0, R8A77970_PD_A2IR0,	R8A77970_PD_A3IR },
++	{ "a2ir1",	0x400, 1, R8A77970_PD_A2IR1,	R8A77970_PD_A3IR },
++	{ "a2ir2",	0x400, 2, R8A77970_PD_A2IR2,	R8A77970_PD_A3IR },
++	{ "a2ir3",	0x400, 3, R8A77970_PD_A2IR3,	R8A77970_PD_A3IR },
++	{ "a2sc0",	0x400, 4, R8A77970_PD_A2SC0,	R8A77970_PD_A3IR },
++	{ "a2sc1",	0x400, 5, R8A77970_PD_A2SC1,	R8A77970_PD_A3IR },
+ };
+ 
+ const struct rcar_sysc_info r8a77970_sysc_info __initconst = {
+diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
+index ff01f865a173..6573152ce893 100644
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -1255,7 +1255,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 		qspi->base[MSPI]  = devm_ioremap_resource(dev, res);
+ 		if (IS_ERR(qspi->base[MSPI])) {
+ 			ret = PTR_ERR(qspi->base[MSPI]);
+-			goto qspi_probe_err;
++			goto qspi_resource_err;
+ 		}
+ 	} else {
+ 		goto qspi_resource_err;
+@@ -1266,7 +1266,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
+ 		qspi->base[BSPI]  = devm_ioremap_resource(dev, res);
+ 		if (IS_ERR(qspi->base[BSPI])) {
+ 			ret = PTR_ERR(qspi->base[BSPI]);
+-			goto qspi_probe_err;
++			goto qspi_resource_err;
+ 		}
+ 		qspi->bspi_mode = true;
+ 	} else {
+diff --git a/drivers/watchdog/asm9260_wdt.c b/drivers/watchdog/asm9260_wdt.c
+index 7dd0da644a7f..2cf56b459d84 100644
+--- a/drivers/watchdog/asm9260_wdt.c
++++ b/drivers/watchdog/asm9260_wdt.c
+@@ -292,14 +292,14 @@ static int asm9260_wdt_probe(struct platform_device *pdev)
+ 	if (IS_ERR(priv->iobase))
+ 		return PTR_ERR(priv->iobase);
+ 
+-	ret = asm9260_wdt_get_dt_clks(priv);
+-	if (ret)
+-		return ret;
+-
+ 	priv->rst = devm_reset_control_get_exclusive(&pdev->dev, "wdt_rst");
+ 	if (IS_ERR(priv->rst))
+ 		return PTR_ERR(priv->rst);
+ 
++	ret = asm9260_wdt_get_dt_clks(priv);
++	if (ret)
++		return ret;
++
+ 	wdd = &priv->wdd;
+ 	wdd->info = &asm9260_wdt_ident;
+ 	wdd->ops = &asm9260_wdt_ops;
+diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
+index ca5b91e2eb92..a5b8eb21201f 100644
+--- a/drivers/watchdog/aspeed_wdt.c
++++ b/drivers/watchdog/aspeed_wdt.c
+@@ -46,6 +46,7 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
+ #define WDT_RELOAD_VALUE	0x04
+ #define WDT_RESTART		0x08
+ #define WDT_CTRL		0x0C
++#define   WDT_CTRL_BOOT_SECONDARY	BIT(7)
+ #define   WDT_CTRL_RESET_MODE_SOC	(0x00 << 5)
+ #define   WDT_CTRL_RESET_MODE_FULL_CHIP	(0x01 << 5)
+ #define   WDT_CTRL_RESET_MODE_ARM_CPU	(0x10 << 5)
+@@ -158,6 +159,7 @@ static int aspeed_wdt_restart(struct watchdog_device *wdd,
+ {
+ 	struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+ 
++	wdt->ctrl &= ~WDT_CTRL_BOOT_SECONDARY;
+ 	aspeed_wdt_enable(wdt, 128 * WDT_RATE_1MHZ / 1000);
+ 
+ 	mdelay(1000);
+@@ -232,16 +234,21 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
+ 		wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | WDT_CTRL_RESET_SYSTEM;
+ 	} else {
+ 		if (!strcmp(reset_type, "cpu"))
+-			wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU;
++			wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU |
++				     WDT_CTRL_RESET_SYSTEM;
+ 		else if (!strcmp(reset_type, "soc"))
+-			wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC;
++			wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC |
++				     WDT_CTRL_RESET_SYSTEM;
+ 		else if (!strcmp(reset_type, "system"))
+-			wdt->ctrl |= WDT_CTRL_RESET_SYSTEM;
++			wdt->ctrl |= WDT_CTRL_RESET_MODE_FULL_CHIP |
++				     WDT_CTRL_RESET_SYSTEM;
+ 		else if (strcmp(reset_type, "none"))
+ 			return -EINVAL;
+ 	}
+ 	if (of_property_read_bool(np, "aspeed,external-signal"))
+ 		wdt->ctrl |= WDT_CTRL_WDT_EXT;
++	if (of_property_read_bool(np, "aspeed,alt-boot"))
++		wdt->ctrl |= WDT_CTRL_BOOT_SECONDARY;
+ 
+ 	if (readl(wdt->base + WDT_CTRL) & WDT_CTRL_ENABLE)  {
+ 		/*
+diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
+index 3e4c592c239f..6c6594261cb7 100644
+--- a/drivers/watchdog/davinci_wdt.c
++++ b/drivers/watchdog/davinci_wdt.c
+@@ -236,15 +236,22 @@ static int davinci_wdt_probe(struct platform_device *pdev)
+ 
+ 	wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	davinci_wdt->base = devm_ioremap_resource(dev, wdt_mem);
+-	if (IS_ERR(davinci_wdt->base))
+-		return PTR_ERR(davinci_wdt->base);
++	if (IS_ERR(davinci_wdt->base)) {
++		ret = PTR_ERR(davinci_wdt->base);
++		goto err_clk_disable;
++	}
+ 
+ 	ret = watchdog_register_device(wdd);
+-	if (ret < 0) {
+-		clk_disable_unprepare(davinci_wdt->clk);
++	if (ret) {
+ 		dev_err(dev, "cannot register watchdog device\n");
++		goto err_clk_disable;
+ 	}
+ 
++	return 0;
++
++err_clk_disable:
++	clk_disable_unprepare(davinci_wdt->clk);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
+index c2f4ff516230..918357bccf5e 100644
+--- a/drivers/watchdog/dw_wdt.c
++++ b/drivers/watchdog/dw_wdt.c
+@@ -34,6 +34,7 @@
+ 
+ #define WDOG_CONTROL_REG_OFFSET		    0x00
+ #define WDOG_CONTROL_REG_WDT_EN_MASK	    0x01
++#define WDOG_CONTROL_REG_RESP_MODE_MASK	    0x02
+ #define WDOG_TIMEOUT_RANGE_REG_OFFSET	    0x04
+ #define WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT    4
+ #define WDOG_CURRENT_COUNT_REG_OFFSET	    0x08
+@@ -121,14 +122,23 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s)
+ 	return 0;
+ }
+ 
++static void dw_wdt_arm_system_reset(struct dw_wdt *dw_wdt)
++{
++	u32 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
++
++	/* Disable interrupt mode; always perform system reset. */
++	val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK;
++	/* Enable watchdog. */
++	val |= WDOG_CONTROL_REG_WDT_EN_MASK;
++	writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
++}
++
+ static int dw_wdt_start(struct watchdog_device *wdd)
+ {
+ 	struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
+ 
+ 	dw_wdt_set_timeout(wdd, wdd->timeout);
+-
+-	writel(WDOG_CONTROL_REG_WDT_EN_MASK,
+-	       dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
++	dw_wdt_arm_system_reset(dw_wdt);
+ 
+ 	return 0;
+ }
+@@ -152,16 +162,13 @@ static int dw_wdt_restart(struct watchdog_device *wdd,
+ 			  unsigned long action, void *data)
+ {
+ 	struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
+-	u32 val;
+ 
+ 	writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
+-	val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
+-	if (val & WDOG_CONTROL_REG_WDT_EN_MASK)
++	if (dw_wdt_is_enabled(dw_wdt))
+ 		writel(WDOG_COUNTER_RESTART_KICK_VALUE,
+ 		       dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET);
+ 	else
+-		writel(WDOG_CONTROL_REG_WDT_EN_MASK,
+-		       dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
++		dw_wdt_arm_system_reset(dw_wdt);
+ 
+ 	/* wait for reset to assert... */
+ 	mdelay(500);
+diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
+index a8b280ff33e0..b4d484a42b70 100644
+--- a/drivers/watchdog/sprd_wdt.c
++++ b/drivers/watchdog/sprd_wdt.c
+@@ -154,8 +154,10 @@ static int sprd_wdt_enable(struct sprd_wdt *wdt)
+ 	if (ret)
+ 		return ret;
+ 	ret = clk_prepare_enable(wdt->rtc_enable);
+-	if (ret)
++	if (ret) {
++		clk_disable_unprepare(wdt->enable);
+ 		return ret;
++	}
+ 
+ 	sprd_wdt_unlock(wdt->base);
+ 	val = readl_relaxed(wdt->base + SPRD_WDT_CTRL);
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 5bb72d3f8337..3530a196d959 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -365,7 +365,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+ 	 * physical address */
+ 	phys = xen_bus_to_phys(dev_addr);
+ 
+-	if (((dev_addr + size - 1 > dma_mask)) ||
++	if (((dev_addr + size - 1 <= dma_mask)) ||
+ 	    range_straddles_page_boundary(phys, size))
+ 		xen_destroy_contiguous_region(phys, order);
+ 
+diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
+index 23e391d3ec01..22863f5f2474 100644
+--- a/drivers/xen/xen-acpi-processor.c
++++ b/drivers/xen/xen-acpi-processor.c
+@@ -362,9 +362,9 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
+ 	}
+ 	/* There are more ACPI Processor objects than in x2APIC or MADT.
+ 	 * This can happen with incorrect ACPI SSDT declerations. */
+-	if (acpi_id > nr_acpi_bits) {
+-		pr_debug("We only have %u, trying to set %u\n",
+-			 nr_acpi_bits, acpi_id);
++	if (acpi_id >= nr_acpi_bits) {
++		pr_debug("max acpi id %u, trying to set %u\n",
++			 nr_acpi_bits - 1, acpi_id);
+ 		return AE_OK;
+ 	}
+ 	/* OK, There is a ACPI Processor object */
+diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
+index cc1b1ac57d61..47728477297e 100644
+--- a/drivers/zorro/zorro.c
++++ b/drivers/zorro/zorro.c
+@@ -16,6 +16,7 @@
+ #include <linux/bitops.h>
+ #include <linux/string.h>
+ #include <linux/platform_device.h>
++#include <linux/dma-mapping.h>
+ #include <linux/slab.h>
+ 
+ #include <asm/byteorder.h>
+@@ -185,6 +186,17 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
+ 		z->dev.parent = &bus->dev;
+ 		z->dev.bus = &zorro_bus_type;
+ 		z->dev.id = i;
++		switch (z->rom.er_Type & ERT_TYPEMASK) {
++		case ERT_ZORROIII:
++			z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
++			break;
++
++		case ERT_ZORROII:
++		default:
++			z->dev.coherent_dma_mask = DMA_BIT_MASK(24);
++			break;
++		}
++		z->dev.dma_mask = &z->dev.coherent_dma_mask;
+ 	}
+ 
+ 	/* ... then register them */
+diff --git a/fs/affs/namei.c b/fs/affs/namei.c
+index d8aa0ae3d037..1ed0fa4c4d48 100644
+--- a/fs/affs/namei.c
++++ b/fs/affs/namei.c
+@@ -206,9 +206,10 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
+ 
+ 	affs_lock_dir(dir);
+ 	bh = affs_find_entry(dir, dentry);
+-	affs_unlock_dir(dir);
+-	if (IS_ERR(bh))
++	if (IS_ERR(bh)) {
++		affs_unlock_dir(dir);
+ 		return ERR_CAST(bh);
++	}
+ 	if (bh) {
+ 		u32 ino = bh->b_blocknr;
+ 
+@@ -222,10 +223,13 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
+ 		}
+ 		affs_brelse(bh);
+ 		inode = affs_iget(sb, ino);
+-		if (IS_ERR(inode))
++		if (IS_ERR(inode)) {
++			affs_unlock_dir(dir);
+ 			return ERR_CAST(inode);
++		}
+ 	}
+ 	d_add(dentry, inode);
++	affs_unlock_dir(dir);
+ 	return NULL;
+ }
+ 
+diff --git a/fs/aio.c b/fs/aio.c
+index 6bcd3fb5265a..63c0437ab135 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -1087,8 +1087,8 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
+ 
+ 	ctx = rcu_dereference(table->table[id]);
+ 	if (ctx && ctx->user_id == ctx_id) {
+-		percpu_ref_get(&ctx->users);
+-		ret = ctx;
++		if (percpu_ref_tryget_live(&ctx->users))
++			ret = ctx;
+ 	}
+ out:
+ 	rcu_read_unlock();
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index 7efbc4d1128b..f5247ad86970 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -307,7 +307,7 @@ void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info)
+ 
+ static char* btrfs_dev_name(struct btrfs_device *device)
+ {
+-	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
++	if (!device || test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
+ 		return "<missing disk>";
+ 	else
+ 		return rcu_str_deref(device->name);
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index fea78d138073..02e39a7f22ec 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1108,7 +1108,7 @@ static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
+ 	if (!writers)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
++	ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
+ 	if (ret < 0) {
+ 		kfree(writers);
+ 		return ERR_PTR(ret);
+@@ -3735,7 +3735,8 @@ void close_ctree(struct btrfs_fs_info *fs_info)
+ 			btrfs_err(fs_info, "commit super ret %d", ret);
+ 	}
+ 
+-	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
++	if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state) ||
++	    test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state))
+ 		btrfs_error_commit_super(fs_info);
+ 
+ 	kthread_stop(fs_info->transaction_kthread);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 53ddfafa440b..b45b840c2217 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -4657,6 +4657,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
+ 	if (wait_for_alloc) {
+ 		mutex_unlock(&fs_info->chunk_mutex);
+ 		wait_for_alloc = 0;
++		cond_resched();
+ 		goto again;
+ 	}
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index f370bdc126b8..8b031f40a2f5 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -6632,8 +6632,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
+ 		goto out_unlock_inode;
+ 	} else {
+ 		btrfs_update_inode(trans, root, inode);
+-		unlock_new_inode(inode);
+-		d_instantiate(dentry, inode);
++		d_instantiate_new(dentry, inode);
+ 	}
+ 
+ out_unlock:
+@@ -6709,8 +6708,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
+ 		goto out_unlock_inode;
+ 
+ 	BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ 
+ out_unlock:
+ 	btrfs_end_transaction(trans);
+@@ -6855,12 +6853,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ 	if (err)
+ 		goto out_fail_inode;
+ 
+-	d_instantiate(dentry, inode);
+-	/*
+-	 * mkdir is special.  We're unlocking after we call d_instantiate
+-	 * to avoid a race with nfsd calling d_instantiate.
+-	 */
+-	unlock_new_inode(inode);
++	d_instantiate_new(dentry, inode);
+ 	drop_on_err = 0;
+ 
+ out_fail:
+@@ -9238,7 +9231,8 @@ static int btrfs_truncate(struct inode *inode)
+ 						 BTRFS_EXTENT_DATA_KEY);
+ 		trans->block_rsv = &fs_info->trans_block_rsv;
+ 		if (ret != -ENOSPC && ret != -EAGAIN) {
+-			err = ret;
++			if (ret < 0)
++				err = ret;
+ 			break;
+ 		}
+ 
+@@ -10372,8 +10366,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
+ 		goto out_unlock_inode;
+ 	}
+ 
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ 
+ out_unlock:
+ 	btrfs_end_transaction(trans);
+diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
+index 90204b166643..160eb2fba726 100644
+--- a/fs/btrfs/tests/qgroup-tests.c
++++ b/fs/btrfs/tests/qgroup-tests.c
+@@ -63,7 +63,7 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
+ 	btrfs_set_extent_generation(leaf, item, 1);
+ 	btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK);
+ 	block_info = (struct btrfs_tree_block_info *)(item + 1);
+-	btrfs_set_tree_block_level(leaf, block_info, 1);
++	btrfs_set_tree_block_level(leaf, block_info, 0);
+ 	iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
+ 	if (parent > 0) {
+ 		btrfs_set_extent_inline_ref_type(leaf, iref,
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 04f07144b45c..c070ce7fecc6 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -319,7 +319,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
+ 	if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
+ 	    root->last_trans < trans->transid) || force) {
+ 		WARN_ON(root == fs_info->extent_root);
+-		WARN_ON(root->commit_root != root->node);
++		WARN_ON(!force && root->commit_root != root->node);
+ 
+ 		/*
+ 		 * see below for IN_TRANS_SETUP usage rules
+@@ -1365,6 +1365,14 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
+ 	if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ 		return 0;
+ 
++	/*
++	 * Ensure dirty @src will be commited.  Or, after comming
++	 * commit_fs_roots() and switch_commit_roots(), any dirty but not
++	 * recorded root will never be updated again, causing an outdated root
++	 * item.
++	 */
++	record_root_in_trans(trans, src, 1);
++
+ 	/*
+ 	 * We are going to commit transaction, see btrfs_commit_transaction()
+ 	 * comment for reason locking tree_log_mutex
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index ac6ea1503cd6..eb53c21b223a 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -2356,8 +2356,10 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
+ 			nritems = btrfs_header_nritems(path->nodes[0]);
+ 			if (path->slots[0] >= nritems) {
+ 				ret = btrfs_next_leaf(root, path);
+-				if (ret)
++				if (ret == 1)
+ 					break;
++				else if (ret < 0)
++					goto out;
+ 			}
+ 			btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+ 					      path->slots[0]);
+@@ -2461,13 +2463,41 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
+ 			if (ret)
+ 				break;
+ 
+-			/* for regular files, make sure corresponding
+-			 * orphan item exist. extents past the new EOF
+-			 * will be truncated later by orphan cleanup.
++			/*
++			 * Before replaying extents, truncate the inode to its
++			 * size. We need to do it now and not after log replay
++			 * because before an fsync we can have prealloc extents
++			 * added beyond the inode's i_size. If we did it after,
++			 * through orphan cleanup for example, we would drop
++			 * those prealloc extents just after replaying them.
+ 			 */
+ 			if (S_ISREG(mode)) {
+-				ret = insert_orphan_item(wc->trans, root,
+-							 key.objectid);
++				struct inode *inode;
++				u64 from;
++
++				inode = read_one_inode(root, key.objectid);
++				if (!inode) {
++					ret = -EIO;
++					break;
++				}
++				from = ALIGN(i_size_read(inode),
++					     root->fs_info->sectorsize);
++				ret = btrfs_drop_extents(wc->trans, root, inode,
++							 from, (u64)-1, 1);
++				/*
++				 * If the nlink count is zero here, the iput
++				 * will free the inode.  We bump it to make
++				 * sure it doesn't get freed until the link
++				 * count fixup is done.
++				 */
++				if (!ret) {
++					if (inode->i_nlink == 0)
++						inc_nlink(inode);
++					/* Update link count and nbytes. */
++					ret = btrfs_update_inode(wc->trans,
++								 root, inode);
++				}
++				iput(inode);
+ 				if (ret)
+ 					break;
+ 			}
+@@ -3518,8 +3548,11 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
+ 		 * from this directory and from this transaction
+ 		 */
+ 		ret = btrfs_next_leaf(root, path);
+-		if (ret == 1) {
+-			last_offset = (u64)-1;
++		if (ret) {
++			if (ret == 1)
++				last_offset = (u64)-1;
++			else
++				err = ret;
+ 			goto done;
+ 		}
+ 		btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
+@@ -3972,6 +4005,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
+ 			ASSERT(ret == 0);
+ 			src = src_path->nodes[0];
+ 			i = 0;
++			need_find_last_extent = true;
+ 		}
+ 
+ 		btrfs_item_key_to_cpu(src, &key, i);
+@@ -4321,6 +4355,31 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
+ 		num++;
+ 	}
+ 
++	/*
++	 * Add all prealloc extents beyond the inode's i_size to make sure we
++	 * don't lose them after doing a fast fsync and replaying the log.
++	 */
++	if (inode->flags & BTRFS_INODE_PREALLOC) {
++		struct rb_node *node;
++
++		for (node = rb_last(&tree->map); node; node = rb_prev(node)) {
++			em = rb_entry(node, struct extent_map, rb_node);
++			if (em->start < i_size_read(&inode->vfs_inode))
++				break;
++			if (!list_empty(&em->list))
++				continue;
++			/* Same as above loop. */
++			if (++num > 32768) {
++				list_del_init(&tree->modified_extents);
++				ret = -EFBIG;
++				goto process;
++			}
++			refcount_inc(&em->refs);
++			set_bit(EXTENT_FLAG_LOGGING, &em->flags);
++			list_add_tail(&em->list, &extents);
++		}
++	}
++
+ 	list_sort(NULL, &extents, extent_cmp);
+ 	btrfs_get_logged_extents(inode, logged_list, logged_start, logged_end);
+ 	/*
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 8945e6cabd93..06463b780e57 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1865,6 +1865,28 @@ void d_instantiate(struct dentry *entry, struct inode * inode)
+ }
+ EXPORT_SYMBOL(d_instantiate);
+ 
++/*
++ * This should be equivalent to d_instantiate() + unlock_new_inode(),
++ * with lockdep-related part of unlock_new_inode() done before
++ * anything else.  Use that instead of open-coding d_instantiate()/
++ * unlock_new_inode() combinations.
++ */
++void d_instantiate_new(struct dentry *entry, struct inode *inode)
++{
++	BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
++	BUG_ON(!inode);
++	lockdep_annotate_inode_mutex_key(inode);
++	security_d_instantiate(entry, inode);
++	spin_lock(&inode->i_lock);
++	__d_instantiate(entry, inode);
++	WARN_ON(!(inode->i_state & I_NEW));
++	inode->i_state &= ~I_NEW;
++	smp_mb();
++	wake_up_bit(&inode->i_state, __I_NEW);
++	spin_unlock(&inode->i_lock);
++}
++EXPORT_SYMBOL(d_instantiate_new);
++
+ /**
+  * d_instantiate_no_diralias - instantiate a non-aliased dentry
+  * @entry: dentry to complete
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index 847904aa63a9..7bba8f2693b2 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -283,8 +283,7 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
+ 		iget_failed(ecryptfs_inode);
+ 		goto out;
+ 	}
+-	unlock_new_inode(ecryptfs_inode);
+-	d_instantiate(ecryptfs_dentry, ecryptfs_inode);
++	d_instantiate_new(ecryptfs_dentry, ecryptfs_inode);
+ out:
+ 	return rc;
+ }
+diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
+index e078075dc66f..aa6ec191cac0 100644
+--- a/fs/ext2/namei.c
++++ b/fs/ext2/namei.c
+@@ -41,8 +41,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
+ {
+ 	int err = ext2_add_link(dentry, inode);
+ 	if (!err) {
+-		unlock_new_inode(inode);
+-		d_instantiate(dentry, inode);
++		d_instantiate_new(dentry, inode);
+ 		return 0;
+ 	}
+ 	inode_dec_link_count(inode);
+@@ -269,8 +268,7 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
+ 	if (err)
+ 		goto out_fail;
+ 
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ out:
+ 	return err;
+ 
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index b1f21e3a0763..4a09063ce1d2 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2411,8 +2411,7 @@ static int ext4_add_nondir(handle_t *handle,
+ 	int err = ext4_add_entry(handle, dentry, inode);
+ 	if (!err) {
+ 		ext4_mark_inode_dirty(handle, inode);
+-		unlock_new_inode(inode);
+-		d_instantiate(dentry, inode);
++		d_instantiate_new(dentry, inode);
+ 		return 0;
+ 	}
+ 	drop_nlink(inode);
+@@ -2651,8 +2650,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ 	err = ext4_mark_inode_dirty(handle, dir);
+ 	if (err)
+ 		goto out_clear_inode;
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ 	if (IS_DIRSYNC(dir))
+ 		ext4_handle_sync(handle);
+ 
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index b8dace7abe09..4c4ff4b3593c 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3663,6 +3663,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 			ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
+ 				 "using the ext4 subsystem");
+ 		else {
++			/*
++			 * If we're probing be silent, if this looks like
++			 * it's actually an ext[34] filesystem.
++			 */
++			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
++				goto failed_mount;
+ 			ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
+ 				 "to feature incompatibilities");
+ 			goto failed_mount;
+@@ -3674,6 +3680,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 			ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
+ 				 "using the ext4 subsystem");
+ 		else {
++			/*
++			 * If we're probing be silent, if this looks like
++			 * it's actually an ext4 filesystem.
++			 */
++			if (silent && ext4_feature_set_ok(sb, sb_rdonly(sb)))
++				goto failed_mount;
+ 			ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
+ 				 "to feature incompatibilities");
+ 			goto failed_mount;
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index 512dca8abc7d..e77271c2144d 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -1136,6 +1136,8 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 
+ 	if (cpc->reason & CP_TRIMMED)
+ 		__set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
++	else
++		__clear_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
+ 
+ 	if (cpc->reason & CP_UMOUNT)
+ 		__set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+@@ -1162,6 +1164,39 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
+ }
+ 
++static void commit_checkpoint(struct f2fs_sb_info *sbi,
++	void *src, block_t blk_addr)
++{
++	struct writeback_control wbc = {
++		.for_reclaim = 0,
++	};
++
++	/*
++	 * pagevec_lookup_tag and lock_page again will take
++	 * some extra time. Therefore, update_meta_pages and
++	 * sync_meta_pages are combined in this function.
++	 */
++	struct page *page = grab_meta_page(sbi, blk_addr);
++	int err;
++
++	memcpy(page_address(page), src, PAGE_SIZE);
++	set_page_dirty(page);
++
++	f2fs_wait_on_page_writeback(page, META, true);
++	f2fs_bug_on(sbi, PageWriteback(page));
++	if (unlikely(!clear_page_dirty_for_io(page)))
++		f2fs_bug_on(sbi, 1);
++
++	/* writeout cp pack 2 page */
++	err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
++	f2fs_bug_on(sbi, err);
++
++	f2fs_put_page(page, 0);
++
++	/* submit checkpoint (with barrier if NOBARRIER is not set) */
++	f2fs_submit_merged_write(sbi, META_FLUSH);
++}
++
+ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ {
+ 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+@@ -1264,16 +1299,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 		}
+ 	}
+ 
+-	/* need to wait for end_io results */
+-	wait_on_all_pages_writeback(sbi);
+-	if (unlikely(f2fs_cp_error(sbi)))
+-		return -EIO;
+-
+-	/* flush all device cache */
+-	err = f2fs_flush_device_cache(sbi);
+-	if (err)
+-		return err;
+-
+ 	/* write out checkpoint buffer at block 0 */
+ 	update_meta_page(sbi, ckpt, start_blk++);
+ 
+@@ -1301,26 +1326,26 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+ 		start_blk += NR_CURSEG_NODE_TYPE;
+ 	}
+ 
+-	/* writeout checkpoint block */
+-	update_meta_page(sbi, ckpt, start_blk);
++	/* update user_block_counts */
++	sbi->last_valid_block_count = sbi->total_valid_block_count;
++	percpu_counter_set(&sbi->alloc_valid_block_count, 0);
+ 
+-	/* wait for previous submitted node/meta pages writeback */
++	/* Here, we have one bio having CP pack except cp pack 2 page */
++	sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
++
++	/* wait for previous submitted meta pages writeback */
+ 	wait_on_all_pages_writeback(sbi);
+ 
+ 	if (unlikely(f2fs_cp_error(sbi)))
+ 		return -EIO;
+ 
+-	filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX);
+-	filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX);
+-
+-	/* update user_block_counts */
+-	sbi->last_valid_block_count = sbi->total_valid_block_count;
+-	percpu_counter_set(&sbi->alloc_valid_block_count, 0);
+-
+-	/* Here, we only have one bio having CP pack */
+-	sync_meta_pages(sbi, META_FLUSH, LONG_MAX, FS_CP_META_IO);
++	/* flush all device cache */
++	err = f2fs_flush_device_cache(sbi);
++	if (err)
++		return err;
+ 
+-	/* wait for previous submitted meta pages writeback */
++	/* barrier and flush checkpoint cp pack 2 page if it can */
++	commit_checkpoint(sbi, ckpt, start_blk);
+ 	wait_on_all_pages_writeback(sbi);
+ 
+ 	release_ino_entry(sbi, false);
+diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
+index ff2352a0ed15..aff6c2ed1c02 100644
+--- a/fs/f2fs/extent_cache.c
++++ b/fs/f2fs/extent_cache.c
+@@ -706,6 +706,9 @@ void f2fs_drop_extent_tree(struct inode *inode)
+ 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ 	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+ 
++	if (!f2fs_may_extent_tree(inode))
++		return;
++
+ 	set_inode_flag(inode, FI_NO_EXTENT);
+ 
+ 	write_lock(&et->lock);
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 672a542e5464..c59b7888d356 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1348,8 +1348,12 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
+ 	}
+ 
+ out:
+-	if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
+-		f2fs_i_size_write(inode, new_size);
++	if (new_size > i_size_read(inode)) {
++		if (mode & FALLOC_FL_KEEP_SIZE)
++			file_set_keep_isize(inode);
++		else
++			f2fs_i_size_write(inode, new_size);
++	}
+ out_sem:
+ 	up_write(&F2FS_I(inode)->i_mmap_sem);
+ 
+diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
+index b68e7b03959f..860c9dd4bb42 100644
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -218,8 +218,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ 
+ 	alloc_nid_done(sbi, ino);
+ 
+-	d_instantiate(dentry, inode);
+-	unlock_new_inode(inode);
++	d_instantiate_new(dentry, inode);
+ 
+ 	if (IS_DIRSYNC(dir))
+ 		f2fs_sync_fs(sbi->sb, 1);
+@@ -526,8 +525,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
+ 	err = page_symlink(inode, disk_link.name, disk_link.len);
+ 
+ err_out:
+-	d_instantiate(dentry, inode);
+-	unlock_new_inode(inode);
++	d_instantiate_new(dentry, inode);
+ 
+ 	/*
+ 	 * Let's flush symlink data in order to avoid broken symlink as much as
+@@ -590,8 +588,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ 
+ 	alloc_nid_done(sbi, inode->i_ino);
+ 
+-	d_instantiate(dentry, inode);
+-	unlock_new_inode(inode);
++	d_instantiate_new(dentry, inode);
+ 
+ 	if (IS_DIRSYNC(dir))
+ 		f2fs_sync_fs(sbi->sb, 1);
+@@ -642,8 +639,7 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
+ 
+ 	alloc_nid_done(sbi, inode->i_ino);
+ 
+-	d_instantiate(dentry, inode);
+-	unlock_new_inode(inode);
++	d_instantiate_new(dentry, inode);
+ 
+ 	if (IS_DIRSYNC(dir))
+ 		f2fs_sync_fs(sbi->sb, 1);
+diff --git a/fs/fscache/page.c b/fs/fscache/page.c
+index 961029e04027..da2fb58f2ecb 100644
+--- a/fs/fscache/page.c
++++ b/fs/fscache/page.c
+@@ -776,6 +776,7 @@ static void fscache_write_op(struct fscache_operation *_op)
+ 
+ 	_enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
+ 
++again:
+ 	spin_lock(&object->lock);
+ 	cookie = object->cookie;
+ 
+@@ -816,10 +817,6 @@ static void fscache_write_op(struct fscache_operation *_op)
+ 		goto superseded;
+ 	page = results[0];
+ 	_debug("gang %d [%lx]", n, page->index);
+-	if (page->index >= op->store_limit) {
+-		fscache_stat(&fscache_n_store_pages_over_limit);
+-		goto superseded;
+-	}
+ 
+ 	radix_tree_tag_set(&cookie->stores, page->index,
+ 			   FSCACHE_COOKIE_STORING_TAG);
+@@ -829,6 +826,9 @@ static void fscache_write_op(struct fscache_operation *_op)
+ 	spin_unlock(&cookie->stores_lock);
+ 	spin_unlock(&object->lock);
+ 
++	if (page->index >= op->store_limit)
++		goto discard_page;
++
+ 	fscache_stat(&fscache_n_store_pages);
+ 	fscache_stat(&fscache_n_cop_write_page);
+ 	ret = object->cache->ops->write_page(op, page);
+@@ -844,6 +844,11 @@ static void fscache_write_op(struct fscache_operation *_op)
+ 	_leave("");
+ 	return;
+ 
++discard_page:
++	fscache_stat(&fscache_n_store_pages_over_limit);
++	fscache_end_page_write(object, page);
++	goto again;
++
+ superseded:
+ 	/* this writer is going away and there aren't any more things to
+ 	 * write */
+diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
+index 51f940e76c5e..de28800691c6 100644
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -1344,6 +1344,7 @@ static inline bool walk_done(struct gfs2_sbd *sdp,
+ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
+ {
+ 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
++	u64 maxsize = sdp->sd_heightsize[ip->i_height];
+ 	struct metapath mp = {};
+ 	struct buffer_head *dibh, *bh;
+ 	struct gfs2_holder rd_gh;
+@@ -1359,6 +1360,14 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
+ 	u64 prev_bnr = 0;
+ 	__be64 *start, *end;
+ 
++	if (offset >= maxsize) {
++		/*
++		 * The starting point lies beyond the allocated meta-data;
++		 * there are no blocks do deallocate.
++		 */
++		return 0;
++	}
++
+ 	/*
+ 	 * The start position of the hole is defined by lblock, start_list, and
+ 	 * start_aligned.  The end position of the hole is defined by lend,
+@@ -1372,7 +1381,6 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
+ 	 */
+ 
+ 	if (length) {
+-		u64 maxsize = sdp->sd_heightsize[ip->i_height];
+ 		u64 end_offset = offset + length;
+ 		u64 lend;
+ 
+diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
+index 4f88e201b3f0..2edd3a9a7b79 100644
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -809,7 +809,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
+ 	struct gfs2_inode *ip = GFS2_I(inode);
+ 	struct gfs2_alloc_parms ap = { .aflags = 0, };
+ 	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
+-	loff_t bytes, max_bytes, max_blks = UINT_MAX;
++	loff_t bytes, max_bytes, max_blks;
+ 	int error;
+ 	const loff_t pos = offset;
+ 	const loff_t count = len;
+@@ -861,7 +861,8 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
+ 			return error;
+ 		/* ap.allowed tells us how many blocks quota will allow
+ 		 * us to write. Check if this reduces max_blks */
+-		if (ap.allowed && ap.allowed < max_blks)
++		max_blks = UINT_MAX;
++		if (ap.allowed)
+ 			max_blks = ap.allowed;
+ 
+ 		error = gfs2_inplace_reserve(ip, &ap);
+diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
+index 5e47c935a515..836f29480be6 100644
+--- a/fs/gfs2/quota.h
++++ b/fs/gfs2/quota.h
+@@ -45,6 +45,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
+ {
+ 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ 	int ret;
++
++	ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
+ 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
+ 		return 0;
+ 	ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
+diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
+index 0a754f38462e..e5a6deb38e1e 100644
+--- a/fs/jffs2/dir.c
++++ b/fs/jffs2/dir.c
+@@ -209,8 +209,7 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
+ 		  __func__, inode->i_ino, inode->i_mode, inode->i_nlink,
+ 		  f->inocache->pino_nlink, inode->i_mapping->nrpages);
+ 
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ 	return 0;
+ 
+  fail:
+@@ -430,8 +429,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
+ 	mutex_unlock(&dir_f->sem);
+ 	jffs2_complete_reservation(c);
+ 
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ 	return 0;
+ 
+  fail:
+@@ -575,8 +573,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode
+ 	mutex_unlock(&dir_f->sem);
+ 	jffs2_complete_reservation(c);
+ 
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ 	return 0;
+ 
+  fail:
+@@ -747,8 +744,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode
+ 	mutex_unlock(&dir_f->sem);
+ 	jffs2_complete_reservation(c);
+ 
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ 	return 0;
+ 
+  fail:
+diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
+index b41596d71858..56c3fcbfe80e 100644
+--- a/fs/jfs/namei.c
++++ b/fs/jfs/namei.c
+@@ -178,8 +178,7 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
+ 		unlock_new_inode(ip);
+ 		iput(ip);
+ 	} else {
+-		unlock_new_inode(ip);
+-		d_instantiate(dentry, ip);
++		d_instantiate_new(dentry, ip);
+ 	}
+ 
+       out2:
+@@ -313,8 +312,7 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
+ 		unlock_new_inode(ip);
+ 		iput(ip);
+ 	} else {
+-		unlock_new_inode(ip);
+-		d_instantiate(dentry, ip);
++		d_instantiate_new(dentry, ip);
+ 	}
+ 
+       out2:
+@@ -1059,8 +1057,7 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
+ 		unlock_new_inode(ip);
+ 		iput(ip);
+ 	} else {
+-		unlock_new_inode(ip);
+-		d_instantiate(dentry, ip);
++		d_instantiate_new(dentry, ip);
+ 	}
+ 
+       out2:
+@@ -1447,8 +1444,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
+ 		unlock_new_inode(ip);
+ 		iput(ip);
+ 	} else {
+-		unlock_new_inode(ip);
+-		d_instantiate(dentry, ip);
++		d_instantiate_new(dentry, ip);
+ 	}
+ 
+       out1:
+diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
+index 1a2894aa0194..dd52d3f82e8d 100644
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -46,8 +46,7 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode)
+ 	int err = nilfs_add_link(dentry, inode);
+ 
+ 	if (!err) {
+-		d_instantiate(dentry, inode);
+-		unlock_new_inode(inode);
++		d_instantiate_new(dentry, inode);
+ 		return 0;
+ 	}
+ 	inode_dec_link_count(inode);
+@@ -243,8 +242,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ 		goto out_fail;
+ 
+ 	nilfs_mark_inode_dirty(inode);
+-	d_instantiate(dentry, inode);
+-	unlock_new_inode(inode);
++	d_instantiate_new(dentry, inode);
+ out:
+ 	if (!err)
+ 		err = nilfs_transaction_commit(dir->i_sb);
+diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
+index e1fea149f50b..25b76f0d082b 100644
+--- a/fs/ocfs2/dlm/dlmdomain.c
++++ b/fs/ocfs2/dlm/dlmdomain.c
+@@ -675,20 +675,6 @@ static void dlm_leave_domain(struct dlm_ctxt *dlm)
+ 	spin_unlock(&dlm->spinlock);
+ }
+ 
+-int dlm_shutting_down(struct dlm_ctxt *dlm)
+-{
+-	int ret = 0;
+-
+-	spin_lock(&dlm_domain_lock);
+-
+-	if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
+-		ret = 1;
+-
+-	spin_unlock(&dlm_domain_lock);
+-
+-	return ret;
+-}
+-
+ void dlm_unregister_domain(struct dlm_ctxt *dlm)
+ {
+ 	int leave = 0;
+diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h
+index fd6122a38dbd..8a9281411c18 100644
+--- a/fs/ocfs2/dlm/dlmdomain.h
++++ b/fs/ocfs2/dlm/dlmdomain.h
+@@ -28,7 +28,30 @@
+ extern spinlock_t dlm_domain_lock;
+ extern struct list_head dlm_domains;
+ 
+-int dlm_shutting_down(struct dlm_ctxt *dlm);
++static inline int dlm_joined(struct dlm_ctxt *dlm)
++{
++	int ret = 0;
++
++	spin_lock(&dlm_domain_lock);
++	if (dlm->dlm_state == DLM_CTXT_JOINED)
++		ret = 1;
++	spin_unlock(&dlm_domain_lock);
++
++	return ret;
++}
++
++static inline int dlm_shutting_down(struct dlm_ctxt *dlm)
++{
++	int ret = 0;
++
++	spin_lock(&dlm_domain_lock);
++	if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
++		ret = 1;
++	spin_unlock(&dlm_domain_lock);
++
++	return ret;
++}
++
+ void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
+ 					int node_num);
+ 
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index ec8f75813beb..505ab4281f36 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -1378,6 +1378,15 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
+ 	if (!dlm_grab(dlm))
+ 		return -EINVAL;
+ 
++	if (!dlm_joined(dlm)) {
++		mlog(ML_ERROR, "Domain %s not joined! "
++			  "lockres %.*s, master %u\n",
++			  dlm->name, mres->lockname_len,
++			  mres->lockname, mres->master);
++		dlm_put(dlm);
++		return -EINVAL;
++	}
++
+ 	BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
+ 
+ 	real_master = mres->master;
+diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
+index 6e3134e6d98a..1b5707c44c3f 100644
+--- a/fs/orangefs/namei.c
++++ b/fs/orangefs/namei.c
+@@ -75,8 +75,7 @@ static int orangefs_create(struct inode *dir,
+ 		     get_khandle_from_ino(inode),
+ 		     dentry);
+ 
+-	d_instantiate(dentry, inode);
+-	unlock_new_inode(inode);
++	d_instantiate_new(dentry, inode);
+ 	orangefs_set_timeout(dentry);
+ 	ORANGEFS_I(inode)->getattr_time = jiffies - 1;
+ 	ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
+@@ -332,8 +331,7 @@ static int orangefs_symlink(struct inode *dir,
+ 		     "Assigned symlink inode new number of %pU\n",
+ 		     get_khandle_from_ino(inode));
+ 
+-	d_instantiate(dentry, inode);
+-	unlock_new_inode(inode);
++	d_instantiate_new(dentry, inode);
+ 	orangefs_set_timeout(dentry);
+ 	ORANGEFS_I(inode)->getattr_time = jiffies - 1;
+ 	ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
+@@ -402,8 +400,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
+ 		     "Assigned dir inode new number of %pU\n",
+ 		     get_khandle_from_ino(inode));
+ 
+-	d_instantiate(dentry, inode);
+-	unlock_new_inode(inode);
++	d_instantiate_new(dentry, inode);
+ 	orangefs_set_timeout(dentry);
+ 	ORANGEFS_I(inode)->getattr_time = jiffies - 1;
+ 	ORANGEFS_I(inode)->getattr_mask = STATX_BASIC_STATS;
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index c41ab261397d..7da10e595297 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -707,7 +707,10 @@ static bool proc_sys_link_fill_cache(struct file *file,
+ 				    struct ctl_table *table)
+ {
+ 	bool ret = true;
++
+ 	head = sysctl_head_grab(head);
++	if (IS_ERR(head))
++		return false;
+ 
+ 	if (S_ISLNK(table->mode)) {
+ 		/* It is not an error if we can not follow the link ignore it */
+diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
+index bd39a998843d..5089dac02660 100644
+--- a/fs/reiserfs/namei.c
++++ b/fs/reiserfs/namei.c
+@@ -687,8 +687,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod
+ 	reiserfs_update_inode_transaction(inode);
+ 	reiserfs_update_inode_transaction(dir);
+ 
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ 	retval = journal_end(&th);
+ 
+ out_failed:
+@@ -771,8 +770,7 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
+ 		goto out_failed;
+ 	}
+ 
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ 	retval = journal_end(&th);
+ 
+ out_failed:
+@@ -871,8 +869,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
+ 	/* the above add_entry did not update dir's stat data */
+ 	reiserfs_update_sd(&th, dir);
+ 
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ 	retval = journal_end(&th);
+ out_failed:
+ 	reiserfs_write_unlock(dir->i_sb);
+@@ -1187,8 +1184,7 @@ static int reiserfs_symlink(struct inode *parent_dir,
+ 		goto out_failed;
+ 	}
+ 
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ 	retval = journal_end(&th);
+ out_failed:
+ 	reiserfs_write_unlock(parent_dir->i_sb);
+diff --git a/fs/super.c b/fs/super.c
+index afbf4d220c27..f25717f9b691 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -120,13 +120,23 @@ static unsigned long super_cache_count(struct shrinker *shrink,
+ 	sb = container_of(shrink, struct super_block, s_shrink);
+ 
+ 	/*
+-	 * Don't call trylock_super as it is a potential
+-	 * scalability bottleneck. The counts could get updated
+-	 * between super_cache_count and super_cache_scan anyway.
+-	 * Call to super_cache_count with shrinker_rwsem held
+-	 * ensures the safety of call to list_lru_shrink_count() and
+-	 * s_op->nr_cached_objects().
++	 * We don't call trylock_super() here as it is a scalability bottleneck,
++	 * so we're exposed to partial setup state. The shrinker rwsem does not
++	 * protect filesystem operations backing list_lru_shrink_count() or
++	 * s_op->nr_cached_objects(). Counts can change between
++	 * super_cache_count and super_cache_scan, so we really don't need locks
++	 * here.
++	 *
++	 * However, if we are currently mounting the superblock, the underlying
++	 * filesystem might be in a state of partial construction and hence it
++	 * is dangerous to access it.  trylock_super() uses a SB_BORN check to
++	 * avoid this situation, so do the same here. The memory barrier is
++	 * matched with the one in mount_fs() as we don't hold locks here.
+ 	 */
++	if (!(sb->s_flags & SB_BORN))
++		return 0;
++	smp_rmb();
++
+ 	if (sb->s_op && sb->s_op->nr_cached_objects)
+ 		total_objects = sb->s_op->nr_cached_objects(sb, sc);
+ 
+@@ -1226,6 +1236,14 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
+ 	sb = root->d_sb;
+ 	BUG_ON(!sb);
+ 	WARN_ON(!sb->s_bdi);
++
++	/*
++	 * Write barrier is for super_cache_count(). We place it before setting
++	 * SB_BORN as the data dependency between the two functions is the
++	 * superblock structure contents that we just set up, not the SB_BORN
++	 * flag.
++	 */
++	smp_wmb();
+ 	sb->s_flags |= SB_BORN;
+ 
+ 	error = security_sb_kern_mount(sb, flags, secdata);
+diff --git a/fs/udf/namei.c b/fs/udf/namei.c
+index 0458dd47e105..c586026508db 100644
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -622,8 +622,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
+ 	if (fibh.sbh != fibh.ebh)
+ 		brelse(fibh.ebh);
+ 	brelse(fibh.sbh);
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ 
+ 	return 0;
+ }
+@@ -733,8 +732,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ 	inc_nlink(dir);
+ 	dir->i_ctime = dir->i_mtime = current_time(dir);
+ 	mark_inode_dirty(dir);
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ 	if (fibh.sbh != fibh.ebh)
+ 		brelse(fibh.ebh);
+ 	brelse(fibh.sbh);
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index f73239a9a97d..8e5d6d29b6cf 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -2091,8 +2091,9 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
+ 	bool lvid_open = false;
+ 
+ 	uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
+-	uopt.uid = INVALID_UID;
+-	uopt.gid = INVALID_GID;
++	/* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
++	uopt.uid = make_kuid(current_user_ns(), overflowuid);
++	uopt.gid = make_kgid(current_user_ns(), overflowgid);
+ 	uopt.umask = 0;
+ 	uopt.fmode = UDF_INVALID_MODE;
+ 	uopt.dmode = UDF_INVALID_MODE;
+diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
+index 32545cd00ceb..d5f43ba76c59 100644
+--- a/fs/ufs/namei.c
++++ b/fs/ufs/namei.c
+@@ -39,8 +39,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
+ {
+ 	int err = ufs_add_link(dentry, inode);
+ 	if (!err) {
+-		unlock_new_inode(inode);
+-		d_instantiate(dentry, inode);
++		d_instantiate_new(dentry, inode);
+ 		return 0;
+ 	}
+ 	inode_dec_link_count(inode);
+@@ -193,8 +192,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
+ 	if (err)
+ 		goto out_fail;
+ 
+-	unlock_new_inode(inode);
+-	d_instantiate(dentry, inode);
++	d_instantiate_new(dentry, inode);
+ 	return 0;
+ 
+ out_fail:
+diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
+index b2cde5426182..7b68e6c9a474 100644
+--- a/fs/xfs/xfs_discard.c
++++ b/fs/xfs/xfs_discard.c
+@@ -50,19 +50,19 @@ xfs_trim_extents(
+ 
+ 	pag = xfs_perag_get(mp, agno);
+ 
+-	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
+-	if (error || !agbp)
+-		goto out_put_perag;
+-
+-	cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
+-
+ 	/*
+ 	 * Force out the log.  This means any transactions that might have freed
+-	 * space before we took the AGF buffer lock are now on disk, and the
++	 * space before we take the AGF buffer lock are now on disk, and the
+ 	 * volatile disk cache is flushed.
+ 	 */
+ 	xfs_log_force(mp, XFS_LOG_SYNC);
+ 
++	error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
++	if (error || !agbp)
++		goto out_put_perag;
++
++	cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);
++
+ 	/*
+ 	 * Look up the longest btree in the AGF and start with it.
+ 	 */
+diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
+index 848b463a0af5..a4c3b0a0a197 100644
+--- a/include/drm/drm_vblank.h
++++ b/include/drm/drm_vblank.h
+@@ -179,7 +179,7 @@ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
+ void drm_crtc_vblank_off(struct drm_crtc *crtc);
+ void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+ void drm_crtc_vblank_on(struct drm_crtc *crtc);
+-u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
++u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
+ 
+ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ 					   unsigned int pipe, int *max_error,
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index 82a99d366aec..9e9bc9f33c03 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -226,6 +226,7 @@ extern seqlock_t rename_lock;
+  * These are the low-level FS interfaces to the dcache..
+  */
+ extern void d_instantiate(struct dentry *, struct inode *);
++extern void d_instantiate_new(struct dentry *, struct inode *);
+ extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
+ extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
+ extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
+diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
+index 23159dd5be18..a1fd63871d17 100644
+--- a/include/rdma/ib_umem.h
++++ b/include/rdma/ib_umem.h
+@@ -48,7 +48,6 @@ struct ib_umem {
+ 	int                     writable;
+ 	int                     hugetlb;
+ 	struct work_struct	work;
+-	struct pid             *pid;
+ 	struct mm_struct       *mm;
+ 	unsigned long		diff;
+ 	struct ib_umem_odp     *odp_data;
+diff --git a/ipc/shm.c b/ipc/shm.c
+index f68420b1ad93..61b477e48e9b 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -1320,14 +1320,17 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
+ 
+ 	if (addr) {
+ 		if (addr & (shmlba - 1)) {
+-			/*
+-			 * Round down to the nearest multiple of shmlba.
+-			 * For sane do_mmap_pgoff() parameters, avoid
+-			 * round downs that trigger nil-page and MAP_FIXED.
+-			 */
+-			if ((shmflg & SHM_RND) && addr >= shmlba)
+-				addr &= ~(shmlba - 1);
+-			else
++			if (shmflg & SHM_RND) {
++				addr &= ~(shmlba - 1);  /* round down */
++
++				/*
++				 * Ensure that the round-down is non-nil
++				 * when remapping. This can happen for
++				 * cases when addr < shmlba.
++				 */
++				if (!addr && (shmflg & SHM_REMAP))
++					goto out;
++			} else
+ #ifndef __ARCH_FORCE_SHMLBA
+ 				if (addr & ~PAGE_MASK)
+ #endif
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 227db99b0f19..bc169f2a4766 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -1058,6 +1058,8 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
+ 		return;
+ 
+ 	ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
++	if (!ab)
++		return;
+ 	audit_log_task_info(ab, current);
+ 	audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
+ 			 audit_feature_names[which], !!old_feature, !!new_feature,
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index dbb0781a0533..90327d7cfe24 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -1566,6 +1566,7 @@ static int kdb_md(int argc, const char **argv)
+ 	int symbolic = 0;
+ 	int valid = 0;
+ 	int phys = 0;
++	int raw = 0;
+ 
+ 	kdbgetintenv("MDCOUNT", &mdcount);
+ 	kdbgetintenv("RADIX", &radix);
+@@ -1575,9 +1576,10 @@ static int kdb_md(int argc, const char **argv)
+ 	repeat = mdcount * 16 / bytesperword;
+ 
+ 	if (strcmp(argv[0], "mdr") == 0) {
+-		if (argc != 2)
++		if (argc == 2 || (argc == 0 && last_addr != 0))
++			valid = raw = 1;
++		else
+ 			return KDB_ARGCOUNT;
+-		valid = 1;
+ 	} else if (isdigit(argv[0][2])) {
+ 		bytesperword = (int)(argv[0][2] - '0');
+ 		if (bytesperword == 0) {
+@@ -1613,7 +1615,10 @@ static int kdb_md(int argc, const char **argv)
+ 		radix = last_radix;
+ 		bytesperword = last_bytesperword;
+ 		repeat = last_repeat;
+-		mdcount = ((repeat * bytesperword) + 15) / 16;
++		if (raw)
++			mdcount = repeat;
++		else
++			mdcount = ((repeat * bytesperword) + 15) / 16;
+ 	}
+ 
+ 	if (argc) {
+@@ -1630,7 +1635,10 @@ static int kdb_md(int argc, const char **argv)
+ 			diag = kdbgetularg(argv[nextarg], &val);
+ 			if (!diag) {
+ 				mdcount = (int) val;
+-				repeat = mdcount * 16 / bytesperword;
++				if (raw)
++					repeat = mdcount;
++				else
++					repeat = mdcount * 16 / bytesperword;
+ 			}
+ 		}
+ 		if (argc >= nextarg+1) {
+@@ -1640,8 +1648,15 @@ static int kdb_md(int argc, const char **argv)
+ 		}
+ 	}
+ 
+-	if (strcmp(argv[0], "mdr") == 0)
+-		return kdb_mdr(addr, mdcount);
++	if (strcmp(argv[0], "mdr") == 0) {
++		int ret;
++		last_addr = addr;
++		ret = kdb_mdr(addr, mdcount);
++		last_addr += mdcount;
++		last_repeat = mdcount;
++		last_bytesperword = bytesperword; // to make REPEAT happy
++		return ret;
++	}
+ 
+ 	switch (radix) {
+ 	case 10:
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index ca7298760c83..cc6a96303b7e 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -948,27 +948,39 @@ list_update_cgroup_event(struct perf_event *event,
+ 	if (!is_cgroup_event(event))
+ 		return;
+ 
+-	if (add && ctx->nr_cgroups++)
+-		return;
+-	else if (!add && --ctx->nr_cgroups)
+-		return;
+ 	/*
+ 	 * Because cgroup events are always per-cpu events,
+ 	 * this will always be called from the right CPU.
+ 	 */
+ 	cpuctx = __get_cpu_context(ctx);
+-	cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
+-	/* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
+-	if (add) {
++
++	/*
++	 * Since setting cpuctx->cgrp is conditional on the current @cgrp
++	 * matching the event's cgroup, we must do this for every new event,
++	 * because if the first would mismatch, the second would not try again
++	 * and we would leave cpuctx->cgrp unset.
++	 */
++	if (add && !cpuctx->cgrp) {
+ 		struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
+ 
+-		list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
+ 		if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
+ 			cpuctx->cgrp = cgrp;
+-	} else {
+-		list_del(cpuctx_entry);
+-		cpuctx->cgrp = NULL;
+ 	}
++
++	if (add && ctx->nr_cgroups++)
++		return;
++	else if (!add && --ctx->nr_cgroups)
++		return;
++
++	/* no cgroup running */
++	if (!add)
++		cpuctx->cgrp = NULL;
++
++	cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
++	if (add)
++		list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
++	else
++		list_del(cpuctx_entry);
+ }
+ 
+ #else /* !CONFIG_CGROUP_PERF */
+@@ -2328,6 +2340,18 @@ static int  __perf_install_in_context(void *info)
+ 		raw_spin_lock(&task_ctx->lock);
+ 	}
+ 
++#ifdef CONFIG_CGROUP_PERF
++	if (is_cgroup_event(event)) {
++		/*
++		 * If the current cgroup doesn't match the event's
++		 * cgroup, we should not try to schedule it.
++		 */
++		struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
++		reprogram = cgroup_is_descendant(cgrp->css.cgroup,
++					event->cgrp->css.cgroup);
++	}
++#endif
++
+ 	if (reprogram) {
+ 		ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+ 		add_event_to_ctx(event, ctx);
+@@ -5746,7 +5770,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
+ 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ 		values[n++] = running;
+ 
+-	if (leader != event)
++	if ((leader != event) &&
++	    (leader->state == PERF_EVENT_STATE_ACTIVE))
+ 		leader->pmu->read(leader);
+ 
+ 	values[n++] = perf_event_count(leader);
+diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
+index a37a3b4b6342..e0665549af59 100644
+--- a/kernel/irq/affinity.c
++++ b/kernel/irq/affinity.c
+@@ -108,7 +108,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
+ 	int affv = nvecs - affd->pre_vectors - affd->post_vectors;
+ 	int last_affv = affv + affd->pre_vectors;
+ 	nodemask_t nodemsk = NODE_MASK_NONE;
+-	struct cpumask *masks;
++	struct cpumask *masks = NULL;
+ 	cpumask_var_t nmsk, *node_to_possible_cpumask;
+ 
+ 	/*
+@@ -121,13 +121,13 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
+ 	if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+ 		return NULL;
+ 
+-	masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
+-	if (!masks)
+-		goto out;
+-
+ 	node_to_possible_cpumask = alloc_node_to_possible_cpumask();
+ 	if (!node_to_possible_cpumask)
+-		goto out;
++		goto outcpumsk;
++
++	masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
++	if (!masks)
++		goto outnodemsk;
+ 
+ 	/* Fill out vectors at the beginning that don't need affinity */
+ 	for (curvec = 0; curvec < affd->pre_vectors; curvec++)
+@@ -192,8 +192,9 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
+ 	/* Fill out vectors at the end that don't need affinity */
+ 	for (; curvec < nvecs; curvec++)
+ 		cpumask_copy(masks + curvec, irq_default_affinity);
++outnodemsk:
+ 	free_node_to_possible_cpumask(node_to_possible_cpumask);
+-out:
++outcpumsk:
+ 	free_cpumask_var(nmsk);
+ 	return masks;
+ }
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index fb88a028deec..1973e8d44250 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -560,8 +560,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
+ 	}
+ 	t = list_entry(rnp->gp_tasks->prev,
+ 		       struct task_struct, rcu_node_entry);
+-	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
++	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
++		/*
++		 * We could be printing a lot while holding a spinlock.
++		 * Avoid triggering hard lockup.
++		 */
++		touch_nmi_watchdog();
+ 		sched_show_task(t);
++	}
+ 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ }
+ 
+@@ -1677,6 +1683,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
+ 	char *ticks_title;
+ 	unsigned long ticks_value;
+ 
++	/*
++	 * We could be printing a lot while holding a spinlock.  Avoid
++	 * triggering hard lockup.
++	 */
++	touch_nmi_watchdog();
++
+ 	if (rsp->gpnum == rdp->gpnum) {
+ 		ticks_title = "ticks this GP";
+ 		ticks_value = rdp->ticks_this_gp;
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index aad49451584e..84bf1a24a55a 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -843,6 +843,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
+ 			continue;
+ 
+ 		raw_spin_lock(&rq->lock);
++		update_rq_clock(rq);
++
+ 		if (rt_rq->rt_time) {
+ 			u64 runtime;
+ 
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 9afc4cb5acf5..de3143bbcd74 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1401,6 +1401,7 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
+ 	if (resource >= RLIM_NLIMITS)
+ 		return -EINVAL;
+ 
++	resource = array_index_nospec(resource, RLIM_NLIMITS);
+ 	task_lock(current->group_leader);
+ 	x = current->signal->rlim[resource];
+ 	task_unlock(current->group_leader);
+@@ -1420,6 +1421,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
+ 	if (resource >= RLIM_NLIMITS)
+ 		return -EINVAL;
+ 
++	resource = array_index_nospec(resource, RLIM_NLIMITS);
+ 	task_lock(current->group_leader);
+ 	r = current->signal->rlim[resource];
+ 	task_unlock(current->group_leader);
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index a7705b0f139c..25f13dc22997 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -2034,10 +2034,12 @@ void *radix_tree_delete_item(struct radix_tree_root *root,
+ 			     unsigned long index, void *item)
+ {
+ 	struct radix_tree_node *node = NULL;
+-	void __rcu **slot;
++	void __rcu **slot = NULL;
+ 	void *entry;
+ 
+ 	entry = __radix_tree_lookup(root, index, &node, &slot);
++	if (!slot)
++		return NULL;
+ 	if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
+ 						get_slot_offset(node, slot))))
+ 		return NULL;
+diff --git a/lib/test_kasan.c b/lib/test_kasan.c
+index 98854a64b014..ec657105edbf 100644
+--- a/lib/test_kasan.c
++++ b/lib/test_kasan.c
+@@ -567,7 +567,15 @@ static noinline void __init kmem_cache_invalid_free(void)
+ 		return;
+ 	}
+ 
++	/* Trigger invalid free, the object doesn't get freed */
+ 	kmem_cache_free(cache, p + 1);
++
++	/*
++	 * Properly free the object to prevent the "Objects remaining in
++	 * test_cache on __kmem_cache_shutdown" BUG failure.
++	 */
++	kmem_cache_free(cache, p);
++
+ 	kmem_cache_destroy(cache);
+ }
+ 
+diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
+index e13d911251e7..e9070890b28c 100644
+--- a/mm/kasan/kasan.c
++++ b/mm/kasan/kasan.c
+@@ -791,6 +791,40 @@ DEFINE_ASAN_SET_SHADOW(f5);
+ DEFINE_ASAN_SET_SHADOW(f8);
+ 
+ #ifdef CONFIG_MEMORY_HOTPLUG
++static bool shadow_mapped(unsigned long addr)
++{
++	pgd_t *pgd = pgd_offset_k(addr);
++	p4d_t *p4d;
++	pud_t *pud;
++	pmd_t *pmd;
++	pte_t *pte;
++
++	if (pgd_none(*pgd))
++		return false;
++	p4d = p4d_offset(pgd, addr);
++	if (p4d_none(*p4d))
++		return false;
++	pud = pud_offset(p4d, addr);
++	if (pud_none(*pud))
++		return false;
++
++	/*
++	 * We can't use pud_large() or pud_huge(), the first one is
++	 * arch-specific, the last one depends on HUGETLB_PAGE.  So let's abuse
++	 * pud_bad(), if pud is bad then it's bad because it's huge.
++	 */
++	if (pud_bad(*pud))
++		return true;
++	pmd = pmd_offset(pud, addr);
++	if (pmd_none(*pmd))
++		return false;
++
++	if (pmd_bad(*pmd))
++		return true;
++	pte = pte_offset_kernel(pmd, addr);
++	return !pte_none(*pte);
++}
++
+ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
+ 			unsigned long action, void *data)
+ {
+@@ -812,6 +846,14 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
+ 	case MEM_GOING_ONLINE: {
+ 		void *ret;
+ 
++		/*
++		 * If shadow is mapped already than it must have been mapped
++		 * during the boot. This could happen if we onlining previously
++		 * offlined memory.
++		 */
++		if (shadow_mapped(shadow_start))
++			return NOTIFY_OK;
++
+ 		ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
+ 					shadow_end, GFP_KERNEL,
+ 					PAGE_KERNEL, VM_NO_GUARD,
+@@ -823,8 +865,26 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
+ 		kmemleak_ignore(ret);
+ 		return NOTIFY_OK;
+ 	}
+-	case MEM_OFFLINE:
+-		vfree((void *)shadow_start);
++	case MEM_CANCEL_ONLINE:
++	case MEM_OFFLINE: {
++		struct vm_struct *vm;
++
++		/*
++		 * shadow_start was either mapped during boot by kasan_init()
++		 * or during memory online by __vmalloc_node_range().
++		 * In the latter case we can use vfree() to free shadow.
++		 * Non-NULL result of the find_vm_area() will tell us if
++		 * that was the second case.
++		 *
++		 * Currently it's not possible to free shadow mapped
++		 * during boot by kasan_init(). It's because the code
++		 * to do that hasn't been written yet. So we'll just
++		 * leak the memory.
++		 */
++		vm = find_vm_area((void *)shadow_start);
++		if (vm)
++			vfree((void *)shadow_start);
++	}
+ 	}
+ 
+ 	return NOTIFY_OK;
+@@ -837,5 +897,5 @@ static int __init kasan_memhotplug_init(void)
+ 	return 0;
+ }
+ 
+-module_init(kasan_memhotplug_init);
++core_initcall(kasan_memhotplug_init);
+ #endif
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 2d6b35234926..d5f37b26e695 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -2089,8 +2089,22 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
+ 	tree_rmap_item =
+ 		unstable_tree_search_insert(rmap_item, page, &tree_page);
+ 	if (tree_rmap_item) {
++		bool split;
++
+ 		kpage = try_to_merge_two_pages(rmap_item, page,
+ 						tree_rmap_item, tree_page);
++		/*
++		 * If both pages we tried to merge belong to the same compound
++		 * page, then we actually ended up increasing the reference
++		 * count of the same compound page twice, and split_huge_page
++		 * failed.
++		 * Here we set a flag if that happened, and we use it later to
++		 * try split_huge_page again. Since we call put_page right
++		 * afterwards, the reference count will be correct and
++		 * split_huge_page should succeed.
++		 */
++		split = PageTransCompound(page)
++			&& compound_head(page) == compound_head(tree_page);
+ 		put_page(tree_page);
+ 		if (kpage) {
+ 			/*
+@@ -2117,6 +2131,20 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
+ 				break_cow(tree_rmap_item);
+ 				break_cow(rmap_item);
+ 			}
++		} else if (split) {
++			/*
++			 * We are here if we tried to merge two pages and
++			 * failed because they both belonged to the same
++			 * compound page. We will split the page now, but no
++			 * merging will take place.
++			 * We do not want to add the cost of a full lock; if
++			 * the page is locked, it is better to skip it and
++			 * perhaps try again later.
++			 */
++			if (!trylock_page(page))
++				return;
++			split_huge_page(page);
++			unlock_page(page);
+ 		}
+ 	}
+ }
+diff --git a/mm/page_idle.c b/mm/page_idle.c
+index 0a49374e6931..e412a63b2b74 100644
+--- a/mm/page_idle.c
++++ b/mm/page_idle.c
+@@ -65,11 +65,15 @@ static bool page_idle_clear_pte_refs_one(struct page *page,
+ 	while (page_vma_mapped_walk(&pvmw)) {
+ 		addr = pvmw.address;
+ 		if (pvmw.pte) {
+-			referenced = ptep_clear_young_notify(vma, addr,
+-					pvmw.pte);
++			/*
++			 * For PTE-mapped THP, one sub page is referenced,
++			 * the whole THP is referenced.
++			 */
++			if (ptep_clear_young_notify(vma, addr, pvmw.pte))
++				referenced = true;
+ 		} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+-			referenced = pmdp_clear_young_notify(vma, addr,
+-					pvmw.pmd);
++			if (pmdp_clear_young_notify(vma, addr, pvmw.pmd))
++				referenced = true;
+ 		} else {
+ 			/* unexpected pmd-mapped page? */
+ 			WARN_ON_ONCE(1);
+diff --git a/mm/slub.c b/mm/slub.c
+index e381728a3751..8442b3c54870 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1362,10 +1362,8 @@ static __always_inline void kfree_hook(void *x)
+ 	kasan_kfree_large(x, _RET_IP_);
+ }
+ 
+-static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
++static __always_inline bool slab_free_hook(struct kmem_cache *s, void *x)
+ {
+-	void *freeptr;
+-
+ 	kmemleak_free_recursive(x, s->flags);
+ 
+ 	/*
+@@ -1385,17 +1383,12 @@ static __always_inline void *slab_free_hook(struct kmem_cache *s, void *x)
+ 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
+ 		debug_check_no_obj_freed(x, s->object_size);
+ 
+-	freeptr = get_freepointer(s, x);
+-	/*
+-	 * kasan_slab_free() may put x into memory quarantine, delaying its
+-	 * reuse. In this case the object's freelist pointer is changed.
+-	 */
+-	kasan_slab_free(s, x, _RET_IP_);
+-	return freeptr;
++	/* KASAN might put x into memory quarantine, delaying its reuse */
++	return kasan_slab_free(s, x, _RET_IP_);
+ }
+ 
+-static inline void slab_free_freelist_hook(struct kmem_cache *s,
+-					   void *head, void *tail)
++static inline bool slab_free_freelist_hook(struct kmem_cache *s,
++					   void **head, void **tail)
+ {
+ /*
+  * Compiler cannot detect this function can be removed if slab_free_hook()
+@@ -1406,13 +1399,33 @@ static inline void slab_free_freelist_hook(struct kmem_cache *s,
+ 	defined(CONFIG_DEBUG_OBJECTS_FREE) ||	\
+ 	defined(CONFIG_KASAN)
+ 
+-	void *object = head;
+-	void *tail_obj = tail ? : head;
+-	void *freeptr;
++	void *object;
++	void *next = *head;
++	void *old_tail = *tail ? *tail : *head;
++
++	/* Head and tail of the reconstructed freelist */
++	*head = NULL;
++	*tail = NULL;
+ 
+ 	do {
+-		freeptr = slab_free_hook(s, object);
+-	} while ((object != tail_obj) && (object = freeptr));
++		object = next;
++		next = get_freepointer(s, object);
++		/* If object's reuse doesn't have to be delayed */
++		if (!slab_free_hook(s, object)) {
++			/* Move object to the new freelist */
++			set_freepointer(s, object, *head);
++			*head = object;
++			if (!*tail)
++				*tail = object;
++		}
++	} while (object != old_tail);
++
++	if (*head == *tail)
++		*tail = NULL;
++
++	return *head != NULL;
++#else
++	return true;
+ #endif
+ }
+ 
+@@ -2965,14 +2978,12 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
+ 				      void *head, void *tail, int cnt,
+ 				      unsigned long addr)
+ {
+-	slab_free_freelist_hook(s, head, tail);
+ 	/*
+-	 * slab_free_freelist_hook() could have put the items into quarantine.
+-	 * If so, no need to free them.
++	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
++	 * to remove objects, whose reuse must be delayed.
+ 	 */
+-	if (s->flags & SLAB_KASAN && !(s->flags & SLAB_TYPESAFE_BY_RCU))
+-		return;
+-	do_slab_free(s, page, head, tail, cnt, addr);
++	if (slab_free_freelist_hook(s, &head, &tail))
++		do_slab_free(s, page, head, tail, cnt, addr);
+ }
+ 
+ #ifdef CONFIG_KASAN
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index c7a33717d079..a134d1e86795 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2961,6 +2961,10 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
+ 	maxpages = swp_offset(pte_to_swp_entry(
+ 			swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
+ 	last_page = swap_header->info.last_page;
++	if (!last_page) {
++		pr_warn("Empty swap-file\n");
++		return 0;
++	}
+ 	if (last_page > maxpages) {
+ 		pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
+ 			maxpages << (PAGE_SHIFT - 10),
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index f6a1587f9f31..a47621fa8496 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -3896,7 +3896,13 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
+  */
+ int page_evictable(struct page *page)
+ {
+-	return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
++	int ret;
++
++	/* Prevent address_space of inode and swap cache from being freed */
++	rcu_read_lock();
++	ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
++	rcu_read_unlock();
++	return ret;
+ }
+ 
+ #ifdef CONFIG_SHMEM
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index 36d31d3593e1..95c9e90f8fda 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -469,6 +469,8 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
+ 	spin_lock_init(&pool->lock);
+ 	spin_lock_init(&pool->stale_lock);
+ 	pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
++	if (!pool->unbuddied)
++		goto out_pool;
+ 	for_each_possible_cpu(cpu) {
+ 		struct list_head *unbuddied =
+ 				per_cpu_ptr(pool->unbuddied, cpu);
+@@ -481,7 +483,7 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
+ 	pool->name = name;
+ 	pool->compact_wq = create_singlethread_workqueue(pool->name);
+ 	if (!pool->compact_wq)
+-		goto out;
++		goto out_unbuddied;
+ 	pool->release_wq = create_singlethread_workqueue(pool->name);
+ 	if (!pool->release_wq)
+ 		goto out_wq;
+@@ -491,8 +493,11 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
+ 
+ out_wq:
+ 	destroy_workqueue(pool->compact_wq);
+-out:
++out_unbuddied:
++	free_percpu(pool->unbuddied);
++out_pool:
+ 	kfree(pool);
++out:
+ 	return NULL;
+ }
+ 
+diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
+index 22dc1b9d6362..c070dfc0190a 100644
+--- a/net/netlabel/netlabel_unlabeled.c
++++ b/net/netlabel/netlabel_unlabeled.c
+@@ -1472,6 +1472,16 @@ int netlbl_unlabel_getattr(const struct sk_buff *skb,
+ 		iface = rcu_dereference(netlbl_unlhsh_def);
+ 	if (iface == NULL || !iface->valid)
+ 		goto unlabel_getattr_nolabel;
++
++#if IS_ENABLED(CONFIG_IPV6)
++	/* When resolving a fallback label, check the sk_buff version as
++	 * it is possible (e.g. SCTP) to have family = PF_INET6 while
++	 * receiving ip_hdr(skb)->version = 4.
++	 */
++	if (family == PF_INET6 && ip_hdr(skb)->version == 4)
++		family = PF_INET;
++#endif /* IPv6 */
++
+ 	switch (family) {
+ 	case PF_INET: {
+ 		struct iphdr *hdr4;
+diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
+index ad2ab1103189..67b6f2428d46 100644
+--- a/net/rxrpc/call_event.c
++++ b/net/rxrpc/call_event.c
+@@ -225,7 +225,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
+ 				       ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
+ 	}
+ 
+-	resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now)));
++	resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
+ 	resend_at += jiffies + rxrpc_resend_timeout;
+ 	WRITE_ONCE(call->resend_at, resend_at);
+ 
+diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
+index 6fc61400337f..34db634594c4 100644
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -1240,16 +1240,19 @@ void rxrpc_data_ready(struct sock *udp_sk)
+ 			goto discard_unlock;
+ 
+ 		if (sp->hdr.callNumber == chan->last_call) {
+-			/* For the previous service call, if completed successfully, we
+-			 * discard all further packets.
++			if (chan->call ||
++			    sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
++				goto discard_unlock;
++
++			/* For the previous service call, if completed
++			 * successfully, we discard all further packets.
+ 			 */
+ 			if (rxrpc_conn_is_service(conn) &&
+-			    (chan->last_type == RXRPC_PACKET_TYPE_ACK ||
+-			     sp->hdr.type == RXRPC_PACKET_TYPE_ABORT))
++			    chan->last_type == RXRPC_PACKET_TYPE_ACK)
+ 				goto discard_unlock;
+ 
+-			/* But otherwise we need to retransmit the final packet from
+-			 * data cached in the connection record.
++			/* But otherwise we need to retransmit the final packet
++			 * from data cached in the connection record.
+ 			 */
+ 			rxrpc_post_packet_to_conn(conn, skb);
+ 			goto out_unlock;
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 09f2a3e05221..7a94ce92ffdc 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -130,7 +130,9 @@ static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix)
+ 	spin_lock_bh(&call->lock);
+ 
+ 	if (call->state < RXRPC_CALL_COMPLETE) {
+-		call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS;
++		call->rxtx_annotations[ix] =
++			(call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) |
++			RXRPC_TX_ANNO_RETRANS;
+ 		if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
+ 			rxrpc_queue_call(call);
+ 	}
+diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
+index 2a8957bd6d38..26df554f7588 100644
+--- a/net/smc/smc_ib.c
++++ b/net/smc/smc_ib.c
+@@ -23,6 +23,8 @@
+ #include "smc_wr.h"
+ #include "smc.h"
+ 
++#define SMC_MAX_CQE 32766	/* max. # of completion queue elements */
++
+ #define SMC_QP_MIN_RNR_TIMER		5
+ #define SMC_QP_TIMEOUT			15 /* 4096 * 2 ** timeout usec */
+ #define SMC_QP_RETRY_CNT			7 /* 7: infinite */
+@@ -438,9 +440,15 @@ int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
+ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
+ {
+ 	struct ib_cq_init_attr cqattr =	{
+-		.cqe = SMC_WR_MAX_CQE, .comp_vector = 0 };
++		.cqe = SMC_MAX_CQE, .comp_vector = 0 };
++	int cqe_size_order, smc_order;
+ 	long rc;
+ 
++	/* the calculated number of cq entries fits to mlx5 cq allocation */
++	cqe_size_order = cache_line_size() == 128 ? 7 : 6;
++	smc_order = MAX_ORDER - cqe_size_order - 1;
++	if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
++		cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
+ 	smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
+ 					      smc_wr_tx_cq_handler, NULL,
+ 					      smcibdev, &cqattr);
+diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
+index ef0c3494c9cb..210bec3c3ebe 100644
+--- a/net/smc/smc_wr.h
++++ b/net/smc/smc_wr.h
+@@ -19,7 +19,6 @@
+ #include "smc.h"
+ #include "smc_core.h"
+ 
+-#define SMC_WR_MAX_CQE 32768	/* max. # of completion queue elements */
+ #define SMC_WR_BUF_CNT 16	/* # of ctrl buffers per link */
+ 
+ #define SMC_WR_TX_WAIT_FREE_SLOT_TIME	(10 * HZ)
+diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
+index 35ef69312811..6a8f67714c83 100644
+--- a/security/integrity/ima/Kconfig
++++ b/security/integrity/ima/Kconfig
+@@ -10,6 +10,7 @@ config IMA
+ 	select CRYPTO_HASH_INFO
+ 	select TCG_TPM if HAS_IOMEM && !UML
+ 	select TCG_TIS if TCG_TPM && X86
++	select TCG_CRB if TCG_TPM && ACPI
+ 	select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
+ 	help
+ 	  The Trusted Computing Group(TCG) runtime Integrity
+diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
+index 205bc69361ea..4e085a17124f 100644
+--- a/security/integrity/ima/ima_crypto.c
++++ b/security/integrity/ima/ima_crypto.c
+@@ -73,6 +73,8 @@ int __init ima_init_crypto(void)
+ 		       hash_algo_name[ima_hash_algo], rc);
+ 		return rc;
+ 	}
++	pr_info("Allocated hash algorithm: %s\n",
++		hash_algo_name[ima_hash_algo]);
+ 	return 0;
+ }
+ 
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index 2cfb0c714967..c678d3801a61 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -16,6 +16,9 @@
+  *	implements the IMA hooks: ima_bprm_check, ima_file_mmap,
+  *	and ima_file_check.
+  */
++
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
+ #include <linux/module.h>
+ #include <linux/file.h>
+ #include <linux/binfmts.h>
+@@ -472,6 +475,16 @@ static int __init init_ima(void)
+ 	ima_init_template_list();
+ 	hash_setup(CONFIG_IMA_DEFAULT_HASH);
+ 	error = ima_init();
++
++	if (error && strcmp(hash_algo_name[ima_hash_algo],
++			    CONFIG_IMA_DEFAULT_HASH) != 0) {
++		pr_info("Allocating %s failed, going to use default hash algorithm %s\n",
++			hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH);
++		hash_setup_done = 0;
++		hash_setup(CONFIG_IMA_DEFAULT_HASH);
++		error = ima_init();
++	}
++
+ 	if (!error) {
+ 		ima_initialized = 1;
+ 		ima_update_policy_flag();
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index 915f5572c6ff..f3508e6db5f7 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -384,7 +384,7 @@ int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
+ 		action |= entry->action & IMA_DO_MASK;
+ 		if (entry->action & IMA_APPRAISE) {
+ 			action |= get_subaction(entry, func);
+-			action ^= IMA_HASH;
++			action &= ~IMA_HASH;
+ 		}
+ 
+ 		if (entry->action & IMA_DO_MASK)
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 8644d864e3c1..3d40fd252780 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -1532,8 +1532,15 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
+ 			/* Called from d_instantiate or d_splice_alias. */
+ 			dentry = dget(opt_dentry);
+ 		} else {
+-			/* Called from selinux_complete_init, try to find a dentry. */
++			/*
++			 * Called from selinux_complete_init, try to find a dentry.
++			 * Some filesystems really want a connected one, so try
++			 * that first.  We could split SECURITY_FS_USE_XATTR in
++			 * two, depending upon that...
++			 */
+ 			dentry = d_find_alias(inode);
++			if (!dentry)
++				dentry = d_find_any_alias(inode);
+ 		}
+ 		if (!dentry) {
+ 			/*
+@@ -1636,14 +1643,19 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
+ 		if ((sbsec->flags & SE_SBGENFS) && !S_ISLNK(inode->i_mode)) {
+ 			/* We must have a dentry to determine the label on
+ 			 * procfs inodes */
+-			if (opt_dentry)
++			if (opt_dentry) {
+ 				/* Called from d_instantiate or
+ 				 * d_splice_alias. */
+ 				dentry = dget(opt_dentry);
+-			else
++			} else {
+ 				/* Called from selinux_complete_init, try to
+-				 * find a dentry. */
++				 * find a dentry.  Some filesystems really want
++				 * a connected one, so try that first.
++				 */
+ 				dentry = d_find_alias(inode);
++				if (!dentry)
++					dentry = d_find_any_alias(inode);
++			}
+ 			/*
+ 			 * This can be hit on boot when a file is accessed
+ 			 * before the policy is loaded.  When we load policy we
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index dc87728c5b74..0ddcae495838 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -592,7 +592,7 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
+ 	else
+ 		timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
+ 	snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
+-			  SNDRV_TIMER_EVENT_CONTINUE);
++			  SNDRV_TIMER_EVENT_PAUSE);
+  unlock:
+ 	spin_unlock_irqrestore(&timer->lock, flags);
+ 	return result;
+@@ -614,7 +614,7 @@ static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
+ 		list_del_init(&timeri->ack_list);
+ 		list_del_init(&timeri->active_list);
+ 		snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
+-				  SNDRV_TIMER_EVENT_CONTINUE);
++				  SNDRV_TIMER_EVENT_PAUSE);
+ 		spin_unlock(&timeri->timer->lock);
+ 	}
+ 	spin_unlock_irqrestore(&slave_active_lock, flags);
+diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
+index 8632301489fa..b67de2bb06a2 100644
+--- a/sound/core/vmaster.c
++++ b/sound/core/vmaster.c
+@@ -68,10 +68,13 @@ static int slave_update(struct link_slave *slave)
+ 		return -ENOMEM;
+ 	uctl->id = slave->slave.id;
+ 	err = slave->slave.get(&slave->slave, uctl);
++	if (err < 0)
++		goto error;
+ 	for (ch = 0; ch < slave->info.count; ch++)
+ 		slave->vals[ch] = uctl->value.integer.value[ch];
++ error:
+ 	kfree(uctl);
+-	return 0;
++	return err < 0 ? err : 0;
+ }
+ 
+ /* get the slave ctl info and save the initial values */
+diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
+index 457a1521f32f..785f4e95148c 100644
+--- a/tools/hv/hv_fcopy_daemon.c
++++ b/tools/hv/hv_fcopy_daemon.c
+@@ -23,13 +23,14 @@
+ #include <unistd.h>
+ #include <errno.h>
+ #include <linux/hyperv.h>
++#include <linux/limits.h>
+ #include <syslog.h>
+ #include <sys/stat.h>
+ #include <fcntl.h>
+ #include <getopt.h>
+ 
+ static int target_fd;
+-static char target_fname[W_MAX_PATH];
++static char target_fname[PATH_MAX];
+ static unsigned long long filesize;
+ 
+ static int hv_start_fcopy(struct hv_start_fcopy *smsg)
+diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
+index b2b4ebffab8c..34031a297f02 100644
+--- a/tools/hv/hv_vss_daemon.c
++++ b/tools/hv/hv_vss_daemon.c
+@@ -22,6 +22,7 @@
+ #include <sys/poll.h>
+ #include <sys/ioctl.h>
+ #include <sys/stat.h>
++#include <sys/sysmacros.h>
+ #include <fcntl.h>
+ #include <stdio.h>
+ #include <mntent.h>
+diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
+index 012328038594..b100e4d8f9fb 100644
+--- a/tools/perf/Makefile.perf
++++ b/tools/perf/Makefile.perf
+@@ -364,7 +364,8 @@ LIBS = -Wl,--whole-archive $(PERFLIBS) $(EXTRA_PERFLIBS) -Wl,--no-whole-archive
+ 
+ ifeq ($(USE_CLANG), 1)
+   CLANGLIBS_LIST = AST Basic CodeGen Driver Frontend Lex Tooling Edit Sema Analysis Parse Serialization
+-  LIBCLANG = $(foreach l,$(CLANGLIBS_LIST),$(wildcard $(shell $(LLVM_CONFIG) --libdir)/libclang$(l).a))
++  CLANGLIBS_NOEXT_LIST = $(foreach l,$(CLANGLIBS_LIST),$(shell $(LLVM_CONFIG) --libdir)/libclang$(l))
++  LIBCLANG = $(foreach l,$(CLANGLIBS_NOEXT_LIST),$(wildcard $(l).a $(l).so))
+   LIBS += -Wl,--start-group $(LIBCLANG) -Wl,--end-group
+ endif
+ 
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 54a4c152edb3..9204cdfed73d 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -2274,11 +2274,16 @@ static int add_default_attributes(void)
+ 		return 0;
+ 
+ 	if (transaction_run) {
++		struct parse_events_error errinfo;
++
+ 		if (pmu_have_event("cpu", "cycles-ct") &&
+ 		    pmu_have_event("cpu", "el-start"))
+-			err = parse_events(evsel_list, transaction_attrs, NULL);
++			err = parse_events(evsel_list, transaction_attrs,
++					   &errinfo);
+ 		else
+-			err = parse_events(evsel_list, transaction_limited_attrs, NULL);
++			err = parse_events(evsel_list,
++					   transaction_limited_attrs,
++					   &errinfo);
+ 		if (err) {
+ 			fprintf(stderr, "Cannot set up transaction events\n");
+ 			return -1;
+diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
+index 35ac016fcb98..fd6e238b5cc8 100644
+--- a/tools/perf/builtin-top.c
++++ b/tools/perf/builtin-top.c
+@@ -1224,8 +1224,10 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
+ 
+ static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
+ {
+-	if (!strcmp(var, "top.call-graph"))
+-		var = "call-graph.record-mode"; /* fall-through */
++	if (!strcmp(var, "top.call-graph")) {
++		var = "call-graph.record-mode";
++		return perf_default_config(var, value, cb);
++	}
+ 	if (!strcmp(var, "top.children")) {
+ 		symbol_conf.cumulate_callchain = perf_config_bool(var, value);
+ 		return 0;
+diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
+index 260418969120..2f008067d989 100644
+--- a/tools/perf/tests/dwarf-unwind.c
++++ b/tools/perf/tests/dwarf-unwind.c
+@@ -37,6 +37,19 @@ static int init_live_machine(struct machine *machine)
+ 						  mmap_handler, machine, true, 500);
+ }
+ 
++/*
++ * We need to keep these functions global, despite the
++ * fact that they are used only locally in this object,
++ * in order to keep them around even if the binary is
++ * stripped. If they are gone, the unwind check for
++ * symbol fails.
++ */
++int test_dwarf_unwind__thread(struct thread *thread);
++int test_dwarf_unwind__compare(void *p1, void *p2);
++int test_dwarf_unwind__krava_3(struct thread *thread);
++int test_dwarf_unwind__krava_2(struct thread *thread);
++int test_dwarf_unwind__krava_1(struct thread *thread);
++
+ #define MAX_STACK 8
+ 
+ static int unwind_entry(struct unwind_entry *entry, void *arg)
+@@ -45,12 +58,12 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
+ 	char *symbol = entry->sym ? entry->sym->name : NULL;
+ 	static const char *funcs[MAX_STACK] = {
+ 		"test__arch_unwind_sample",
+-		"unwind_thread",
+-		"compare",
++		"test_dwarf_unwind__thread",
++		"test_dwarf_unwind__compare",
+ 		"bsearch",
+-		"krava_3",
+-		"krava_2",
+-		"krava_1",
++		"test_dwarf_unwind__krava_3",
++		"test_dwarf_unwind__krava_2",
++		"test_dwarf_unwind__krava_1",
+ 		"test__dwarf_unwind"
+ 	};
+ 	/*
+@@ -77,7 +90,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
+ 	return strcmp((const char *) symbol, funcs[idx]);
+ }
+ 
+-static noinline int unwind_thread(struct thread *thread)
++noinline int test_dwarf_unwind__thread(struct thread *thread)
+ {
+ 	struct perf_sample sample;
+ 	unsigned long cnt = 0;
+@@ -108,7 +121,7 @@ static noinline int unwind_thread(struct thread *thread)
+ 
+ static int global_unwind_retval = -INT_MAX;
+ 
+-static noinline int compare(void *p1, void *p2)
++noinline int test_dwarf_unwind__compare(void *p1, void *p2)
+ {
+ 	/* Any possible value should be 'thread' */
+ 	struct thread *thread = *(struct thread **)p1;
+@@ -117,17 +130,17 @@ static noinline int compare(void *p1, void *p2)
+ 		/* Call unwinder twice for both callchain orders. */
+ 		callchain_param.order = ORDER_CALLER;
+ 
+-		global_unwind_retval = unwind_thread(thread);
++		global_unwind_retval = test_dwarf_unwind__thread(thread);
+ 		if (!global_unwind_retval) {
+ 			callchain_param.order = ORDER_CALLEE;
+-			global_unwind_retval = unwind_thread(thread);
++			global_unwind_retval = test_dwarf_unwind__thread(thread);
+ 		}
+ 	}
+ 
+ 	return p1 - p2;
+ }
+ 
+-static noinline int krava_3(struct thread *thread)
++noinline int test_dwarf_unwind__krava_3(struct thread *thread)
+ {
+ 	struct thread *array[2] = {thread, thread};
+ 	void *fp = &bsearch;
+@@ -141,18 +154,19 @@ static noinline int krava_3(struct thread *thread)
+ 			size_t, int (*)(void *, void *));
+ 
+ 	_bsearch = fp;
+-	_bsearch(array, &thread, 2, sizeof(struct thread **), compare);
++	_bsearch(array, &thread, 2, sizeof(struct thread **),
++		 test_dwarf_unwind__compare);
+ 	return global_unwind_retval;
+ }
+ 
+-static noinline int krava_2(struct thread *thread)
++noinline int test_dwarf_unwind__krava_2(struct thread *thread)
+ {
+-	return krava_3(thread);
++	return test_dwarf_unwind__krava_3(thread);
+ }
+ 
+-static noinline int krava_1(struct thread *thread)
++noinline int test_dwarf_unwind__krava_1(struct thread *thread)
+ {
+-	return krava_2(thread);
++	return test_dwarf_unwind__krava_2(thread);
+ }
+ 
+ int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused)
+@@ -189,7 +203,7 @@ int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unu
+ 		goto out;
+ 	}
+ 
+-	err = krava_1(thread);
++	err = test_dwarf_unwind__krava_1(thread);
+ 	thread__put(thread);
+ 
+  out:
+diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
+index c446c894b297..8c4ab0b390c0 100755
+--- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
++++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
+@@ -21,12 +21,12 @@ trace_libc_inet_pton_backtrace() {
+ 	expected[3]=".*packets transmitted.*"
+ 	expected[4]="rtt min.*"
+ 	expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
+-	expected[6]=".*inet_pton[[:space:]]\($libc\)$"
++	expected[6]=".*inet_pton[[:space:]]\($libc|inlined\)$"
+ 	case "$(uname -m)" in
+ 	s390x)
+ 		eventattr='call-graph=dwarf'
+-		expected[7]="gaih_inet[[:space:]]\(inlined\)$"
+-		expected[8]="__GI_getaddrinfo[[:space:]]\(inlined\)$"
++		expected[7]="gaih_inet.*[[:space:]]\($libc|inlined\)$"
++		expected[8]="__GI_getaddrinfo[[:space:]]\($libc|inlined\)$"
+ 		expected[9]="main[[:space:]]\(.*/bin/ping.*\)$"
+ 		expected[10]="__libc_start_main[[:space:]]\($libc\)$"
+ 		expected[11]="_start[[:space:]]\(.*/bin/ping.*\)$"
+diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
+index f6789fb029d6..884cad122acf 100644
+--- a/tools/perf/tests/vmlinux-kallsyms.c
++++ b/tools/perf/tests/vmlinux-kallsyms.c
+@@ -125,7 +125,7 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
+ 
+ 		if (pair && UM(pair->start) == mem_start) {
+ next_pair:
+-			if (strcmp(sym->name, pair->name) == 0) {
++			if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
+ 				/*
+ 				 * kallsyms don't have the symbol end, so we
+ 				 * set that by using the next symbol start - 1,
+diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
+index fbf927cf775d..6ff6839558b0 100644
+--- a/tools/perf/ui/browsers/annotate.c
++++ b/tools/perf/ui/browsers/annotate.c
+@@ -319,6 +319,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
+ 	struct map_symbol *ms = ab->b.priv;
+ 	struct symbol *sym = ms->sym;
+ 	u8 pcnt_width = annotate_browser__pcnt_width(ab);
++	int width = 0;
+ 
+ 	/* PLT symbols contain external offsets */
+ 	if (strstr(sym->name, "@plt"))
+@@ -365,13 +366,17 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
+ 		to = (u64)btarget->idx;
+ 	}
+ 
++	if (ab->have_cycles)
++		width = IPC_WIDTH + CYCLES_WIDTH;
++
+ 	ui_browser__set_color(browser, HE_COLORSET_JUMP_ARROWS);
+-	__ui_browser__line_arrow(browser, pcnt_width + 2 + ab->addr_width,
++	__ui_browser__line_arrow(browser,
++				 pcnt_width + 2 + ab->addr_width + width,
+ 				 from, to);
+ 
+ 	if (is_fused(ab, cursor)) {
+ 		ui_browser__mark_fused(browser,
+-				       pcnt_width + 3 + ab->addr_width,
++				       pcnt_width + 3 + ab->addr_width + width,
+ 				       from - 1,
+ 				       to > from ? true : false);
+ 	}
+diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
+index 1bfc946e37dc..bf31ceab33bd 100644
+--- a/tools/perf/util/c++/clang.cpp
++++ b/tools/perf/util/c++/clang.cpp
+@@ -9,6 +9,7 @@
+  * Copyright (C) 2016 Huawei Inc.
+  */
+ 
++#include "clang/Basic/Version.h"
+ #include "clang/CodeGen/CodeGenAction.h"
+ #include "clang/Frontend/CompilerInvocation.h"
+ #include "clang/Frontend/CompilerInstance.h"
+@@ -58,7 +59,8 @@ createCompilerInvocation(llvm::opt::ArgStringList CFlags, StringRef& Path,
+ 
+ 	FrontendOptions& Opts = CI->getFrontendOpts();
+ 	Opts.Inputs.clear();
+-	Opts.Inputs.emplace_back(Path, IK_C);
++	Opts.Inputs.emplace_back(Path,
++			FrontendOptions::getInputKindForExtension("c"));
+ 	return CI;
+ }
+ 
+@@ -71,10 +73,17 @@ getModuleFromSource(llvm::opt::ArgStringList CFlags,
+ 
+ 	Clang.setVirtualFileSystem(&*VFS);
+ 
++#if CLANG_VERSION_MAJOR < 4
+ 	IntrusiveRefCntPtr<CompilerInvocation> CI =
+ 		createCompilerInvocation(std::move(CFlags), Path,
+ 					 Clang.getDiagnostics());
+ 	Clang.setInvocation(&*CI);
++#else
++	std::shared_ptr<CompilerInvocation> CI(
++		createCompilerInvocation(std::move(CFlags), Path,
++					 Clang.getDiagnostics()));
++	Clang.setInvocation(CI);
++#endif
+ 
+ 	std::unique_ptr<CodeGenAction> Act(new EmitLLVMOnlyAction(&*LLVMCtx));
+ 	if (!Clang.ExecuteAction(*Act))
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index b6140950301e..44a8456cea10 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -879,7 +879,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
+ 	 * cumulated only one time to prevent entries more than 100%
+ 	 * overhead.
+ 	 */
+-	he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
++	he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
+ 	if (he_cache == NULL)
+ 		return -ENOMEM;
+ 
+@@ -1045,8 +1045,6 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
+ 	if (err)
+ 		return err;
+ 
+-	iter->max_stack = max_stack_depth;
+-
+ 	err = iter->ops->prepare_entry(iter, al);
+ 	if (err)
+ 		goto out;
+diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
+index 02721b579746..e869cad4d89f 100644
+--- a/tools/perf/util/hist.h
++++ b/tools/perf/util/hist.h
+@@ -107,7 +107,6 @@ struct hist_entry_iter {
+ 	int curr;
+ 
+ 	bool hide_unresolved;
+-	int max_stack;
+ 
+ 	struct perf_evsel *evsel;
+ 	struct perf_sample *sample;
+diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
+index 91531a7c8fbf..0bda6dfd5b96 100644
+--- a/tools/perf/util/mmap.c
++++ b/tools/perf/util/mmap.c
+@@ -344,5 +344,11 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite,
+  */
+ void perf_mmap__read_done(struct perf_mmap *map)
+ {
++	/*
++	 * Check if event was unmapped due to a POLLHUP/POLLERR.
++	 */
++	if (!refcount_read(&map->refcnt))
++		return;
++
+ 	map->prev = perf_mmap__read_head(map);
+ }
+diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
+index 6c645eb77d42..ee820fcc29b0 100644
+--- a/tools/testing/radix-tree/idr-test.c
++++ b/tools/testing/radix-tree/idr-test.c
+@@ -252,6 +252,13 @@ void idr_checks(void)
+ 	idr_remove(&idr, 3);
+ 	idr_remove(&idr, 0);
+ 
++	assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0);
++	idr_remove(&idr, 1);
++	for (i = 1; i < RADIX_TREE_MAP_SIZE; i++)
++		assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i);
++	idr_remove(&idr, 1 << 30);
++	idr_destroy(&idr);
++
+ 	for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
+ 		struct item *item = item_create(i, 0);
+ 		assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index 7442dfb73b7f..0fbe778efd5f 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -130,6 +130,7 @@ ifdef INSTALL_PATH
+ 		BUILD_TARGET=$$BUILD/$$TARGET;	\
+ 		echo "echo ; echo Running tests in $$TARGET" >> $(ALL_SCRIPT); \
+ 		echo "echo ========================================" >> $(ALL_SCRIPT); \
++		echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
+ 		echo "cd $$TARGET" >> $(ALL_SCRIPT); \
+ 		make -s --no-print-directory OUTPUT=$$BUILD_TARGET -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
+ 		echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
+diff --git a/tools/testing/selftests/net/fib-onlink-tests.sh b/tools/testing/selftests/net/fib-onlink-tests.sh
+new file mode 100644
+index 000000000000..06b1d7cc12cc
+--- /dev/null
++++ b/tools/testing/selftests/net/fib-onlink-tests.sh
+@@ -0,0 +1,375 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++
++# IPv4 and IPv6 onlink tests
++
++PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
++
++# Network interfaces
++# - odd in current namespace; even in peer ns
++declare -A NETIFS
++# default VRF
++NETIFS[p1]=veth1
++NETIFS[p2]=veth2
++NETIFS[p3]=veth3
++NETIFS[p4]=veth4
++# VRF
++NETIFS[p5]=veth5
++NETIFS[p6]=veth6
++NETIFS[p7]=veth7
++NETIFS[p8]=veth8
++
++# /24 network
++declare -A V4ADDRS
++V4ADDRS[p1]=169.254.1.1
++V4ADDRS[p2]=169.254.1.2
++V4ADDRS[p3]=169.254.3.1
++V4ADDRS[p4]=169.254.3.2
++V4ADDRS[p5]=169.254.5.1
++V4ADDRS[p6]=169.254.5.2
++V4ADDRS[p7]=169.254.7.1
++V4ADDRS[p8]=169.254.7.2
++
++# /64 network
++declare -A V6ADDRS
++V6ADDRS[p1]=2001:db8:101::1
++V6ADDRS[p2]=2001:db8:101::2
++V6ADDRS[p3]=2001:db8:301::1
++V6ADDRS[p4]=2001:db8:301::2
++V6ADDRS[p5]=2001:db8:501::1
++V6ADDRS[p6]=2001:db8:501::2
++V6ADDRS[p7]=2001:db8:701::1
++V6ADDRS[p8]=2001:db8:701::2
++
++# Test networks:
++# [1] = default table
++# [2] = VRF
++#
++# /32 host routes
++declare -A TEST_NET4
++TEST_NET4[1]=169.254.101
++TEST_NET4[2]=169.254.102
++# /128 host routes
++declare -A TEST_NET6
++TEST_NET6[1]=2001:db8:101
++TEST_NET6[2]=2001:db8:102
++
++# connected gateway
++CONGW[1]=169.254.1.254
++CONGW[2]=169.254.5.254
++
++# recursive gateway
++RECGW4[1]=169.254.11.254
++RECGW4[2]=169.254.12.254
++RECGW6[1]=2001:db8:11::64
++RECGW6[2]=2001:db8:12::64
++
++# for v4 mapped to v6
++declare -A TEST_NET4IN6IN6
++TEST_NET4IN6[1]=10.1.1.254
++TEST_NET4IN6[2]=10.2.1.254
++
++# mcast address
++MCAST6=ff02::1
++
++
++PEER_NS=bart
++PEER_CMD="ip netns exec ${PEER_NS}"
++VRF=lisa
++VRF_TABLE=1101
++PBR_TABLE=101
++
++################################################################################
++# utilities
++
++log_test()
++{
++	local rc=$1
++	local expected=$2
++	local msg="$3"
++
++	if [ ${rc} -eq ${expected} ]; then
++		nsuccess=$((nsuccess+1))
++		printf "\n    TEST: %-50s  [ OK ]\n" "${msg}"
++	else
++		nfail=$((nfail+1))
++		printf "\n    TEST: %-50s  [FAIL]\n" "${msg}"
++		if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
++			echo
++			echo "hit enter to continue, 'q' to quit"
++			read a
++			[ "$a" = "q" ] && exit 1
++		fi
++	fi
++}
++
++log_section()
++{
++	echo
++	echo "######################################################################"
++	echo "TEST SECTION: $*"
++	echo "######################################################################"
++}
++
++log_subsection()
++{
++	echo
++	echo "#########################################"
++	echo "TEST SUBSECTION: $*"
++}
++
++run_cmd()
++{
++	echo
++	echo "COMMAND: $*"
++	eval $*
++}
++
++get_linklocal()
++{
++	local dev=$1
++	local pfx
++	local addr
++
++	addr=$(${pfx} ip -6 -br addr show dev ${dev} | \
++	awk '{
++		for (i = 3; i <= NF; ++i) {
++			if ($i ~ /^fe80/)
++				print $i
++		}
++	}'
++	)
++	addr=${addr/\/*}
++
++	[ -z "$addr" ] && return 1
++
++	echo $addr
++
++	return 0
++}
++
++################################################################################
++#
++
++setup()
++{
++	echo
++	echo "########################################"
++	echo "Configuring interfaces"
++
++	set -e
++
++	# create namespace
++	ip netns add ${PEER_NS}
++	ip -netns ${PEER_NS} li set lo up
++
++	# add vrf table
++	ip li add ${VRF} type vrf table ${VRF_TABLE}
++	ip li set ${VRF} up
++	ip ro add table ${VRF_TABLE} unreachable default
++	ip -6 ro add table ${VRF_TABLE} unreachable default
++
++	# create test interfaces
++	ip li add ${NETIFS[p1]} type veth peer name ${NETIFS[p2]}
++	ip li add ${NETIFS[p3]} type veth peer name ${NETIFS[p4]}
++	ip li add ${NETIFS[p5]} type veth peer name ${NETIFS[p6]}
++	ip li add ${NETIFS[p7]} type veth peer name ${NETIFS[p8]}
++
++	# enslave vrf interfaces
++	for n in 5 7; do
++		ip li set ${NETIFS[p${n}]} vrf ${VRF}
++	done
++
++	# add addresses
++	for n in 1 3 5 7; do
++		ip li set ${NETIFS[p${n}]} up
++		ip addr add ${V4ADDRS[p${n}]}/24 dev ${NETIFS[p${n}]}
++		ip addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]}
++	done
++
++	# move peer interfaces to namespace and add addresses
++	for n in 2 4 6 8; do
++		ip li set ${NETIFS[p${n}]} netns ${PEER_NS} up
++		ip -netns ${PEER_NS} addr add ${V4ADDRS[p${n}]}/24 dev ${NETIFS[p${n}]}
++		ip -netns ${PEER_NS} addr add ${V6ADDRS[p${n}]}/64 dev ${NETIFS[p${n}]}
++	done
++
++	set +e
++
++	# let DAD complete - assume default of 1 probe
++	sleep 1
++}
++
++cleanup()
++{
++	# make sure we start from a clean slate
++	ip netns del ${PEER_NS} 2>/dev/null
++	for n in 1 3 5 7; do
++		ip link del ${NETIFS[p${n}]} 2>/dev/null
++	done
++	ip link del ${VRF} 2>/dev/null
++	ip ro flush table ${VRF_TABLE}
++	ip -6 ro flush table ${VRF_TABLE}
++}
++
++################################################################################
++# IPv4 tests
++#
++
++run_ip()
++{
++	local table="$1"
++	local prefix="$2"
++	local gw="$3"
++	local dev="$4"
++	local exp_rc="$5"
++	local desc="$6"
++
++	# dev arg may be empty
++	[ -n "${dev}" ] && dev="dev ${dev}"
++
++	run_cmd ip ro add table "${table}" "${prefix}"/32 via "${gw}" "${dev}" onlink
++	log_test $? ${exp_rc} "${desc}"
++}
++
++valid_onlink_ipv4()
++{
++	# - unicast connected, unicast recursive
++	#
++	log_subsection "default VRF - main table"
++
++	run_ip 254 ${TEST_NET4[1]}.1 ${CONGW[1]} ${NETIFS[p1]} 0 "unicast connected"
++	run_ip 254 ${TEST_NET4[1]}.2 ${RECGW4[1]} ${NETIFS[p1]} 0 "unicast recursive"
++
++	log_subsection "VRF ${VRF}"
++
++	run_ip ${VRF_TABLE} ${TEST_NET4[2]}.1 ${CONGW[2]} ${NETIFS[p5]} 0 "unicast connected"
++	run_ip ${VRF_TABLE} ${TEST_NET4[2]}.2 ${RECGW4[2]} ${NETIFS[p5]} 0 "unicast recursive"
++
++	log_subsection "VRF device, PBR table"
++
++	run_ip ${PBR_TABLE} ${TEST_NET4[2]}.3 ${CONGW[2]} ${NETIFS[p5]} 0 "unicast connected"
++	run_ip ${PBR_TABLE} ${TEST_NET4[2]}.4 ${RECGW4[2]} ${NETIFS[p5]} 0 "unicast recursive"
++}
++
++invalid_onlink_ipv4()
++{
++	run_ip 254 ${TEST_NET4[1]}.11 ${V4ADDRS[p1]} ${NETIFS[p1]} 2 \
++		"Invalid gw - local unicast address"
++
++	run_ip ${VRF_TABLE} ${TEST_NET4[2]}.11 ${V4ADDRS[p5]} ${NETIFS[p5]} 2 \
++		"Invalid gw - local unicast address, VRF"
++
++	run_ip 254 ${TEST_NET4[1]}.101 ${V4ADDRS[p1]} "" 2 "No nexthop device given"
++
++	run_ip 254 ${TEST_NET4[1]}.102 ${V4ADDRS[p3]} ${NETIFS[p1]} 2 \
++		"Gateway resolves to wrong nexthop device"
++
++	run_ip ${VRF_TABLE} ${TEST_NET4[2]}.103 ${V4ADDRS[p7]} ${NETIFS[p5]} 2 \
++		"Gateway resolves to wrong nexthop device - VRF"
++}
++
++################################################################################
++# IPv6 tests
++#
++
++run_ip6()
++{
++	local table="$1"
++	local prefix="$2"
++	local gw="$3"
++	local dev="$4"
++	local exp_rc="$5"
++	local desc="$6"
++
++	# dev arg may be empty
++	[ -n "${dev}" ] && dev="dev ${dev}"
++
++	run_cmd ip -6 ro add table "${table}" "${prefix}"/128 via "${gw}" "${dev}" onlink
++	log_test $? ${exp_rc} "${desc}"
++}
++
++valid_onlink_ipv6()
++{
++	# - unicast connected, unicast recursive, v4-mapped
++	#
++	log_subsection "default VRF - main table"
++
++	run_ip6 254 ${TEST_NET6[1]}::1 ${V6ADDRS[p1]/::*}::64 ${NETIFS[p1]} 0 "unicast connected"
++	run_ip6 254 ${TEST_NET6[1]}::2 ${RECGW6[1]} ${NETIFS[p1]} 0 "unicast recursive"
++	run_ip6 254 ${TEST_NET6[1]}::3 ::ffff:${TEST_NET4IN6[1]} ${NETIFS[p1]} 0 "v4-mapped"
++
++	log_subsection "VRF ${VRF}"
++
++	run_ip6 ${VRF_TABLE} ${TEST_NET6[2]}::1 ${V6ADDRS[p5]/::*}::64 ${NETIFS[p5]} 0 "unicast connected"
++	run_ip6 ${VRF_TABLE} ${TEST_NET6[2]}::2 ${RECGW6[2]} ${NETIFS[p5]} 0 "unicast recursive"
++	run_ip6 ${VRF_TABLE} ${TEST_NET6[2]}::3 ::ffff:${TEST_NET4IN6[2]} ${NETIFS[p5]} 0 "v4-mapped"
++
++	log_subsection "VRF device, PBR table"
++
++	run_ip6 ${PBR_TABLE} ${TEST_NET6[2]}::4 ${V6ADDRS[p5]/::*}::64 ${NETIFS[p5]} 0 "unicast connected"
++	run_ip6 ${PBR_TABLE} ${TEST_NET6[2]}::5 ${RECGW6[2]} ${NETIFS[p5]} 0 "unicast recursive"
++	run_ip6 ${PBR_TABLE} ${TEST_NET6[2]}::6 ::ffff:${TEST_NET4IN6[2]} ${NETIFS[p5]} 0 "v4-mapped"
++}
++
++invalid_onlink_ipv6()
++{
++	local lladdr
++
++	lladdr=$(get_linklocal ${NETIFS[p1]}) || return 1
++
++	run_ip6 254 ${TEST_NET6[1]}::11 ${V6ADDRS[p1]} ${NETIFS[p1]} 2 \
++		"Invalid gw - local unicast address"
++	run_ip6 254 ${TEST_NET6[1]}::12 ${lladdr} ${NETIFS[p1]} 2 \
++		"Invalid gw - local linklocal address"
++	run_ip6 254 ${TEST_NET6[1]}::12 ${MCAST6} ${NETIFS[p1]} 2 \
++		"Invalid gw - multicast address"
++
++	lladdr=$(get_linklocal ${NETIFS[p5]}) || return 1
++	run_ip6 ${VRF_TABLE} ${TEST_NET6[2]}::11 ${V6ADDRS[p5]} ${NETIFS[p5]} 2 \
++		"Invalid gw - local unicast address, VRF"
++	run_ip6 ${VRF_TABLE} ${TEST_NET6[2]}::12 ${lladdr} ${NETIFS[p5]} 2 \
++		"Invalid gw - local linklocal address, VRF"
++	run_ip6 ${VRF_TABLE} ${TEST_NET6[2]}::12 ${MCAST6} ${NETIFS[p5]} 2 \
++		"Invalid gw - multicast address, VRF"
++
++	run_ip6 254 ${TEST_NET6[1]}::101 ${V6ADDRS[p1]} "" 2 \
++		"No nexthop device given"
++
++	# default VRF validation is done against LOCAL table
++	# run_ip6 254 ${TEST_NET6[1]}::102 ${V6ADDRS[p3]/::[0-9]/::64} ${NETIFS[p1]} 2 \
++	#	"Gateway resolves to wrong nexthop device"
++
++	run_ip6 ${VRF_TABLE} ${TEST_NET6[2]}::103 ${V6ADDRS[p7]/::[0-9]/::64} ${NETIFS[p5]} 2 \
++		"Gateway resolves to wrong nexthop device - VRF"
++}
++
++run_onlink_tests()
++{
++	log_section "IPv4 onlink"
++	log_subsection "Valid onlink commands"
++	valid_onlink_ipv4
++	log_subsection "Invalid onlink commands"
++	invalid_onlink_ipv4
++
++	log_section "IPv6 onlink"
++	log_subsection "Valid onlink commands"
++	valid_onlink_ipv6
++	invalid_onlink_ipv6
++}
++
++################################################################################
++# main
++
++nsuccess=0
++nfail=0
++
++cleanup
++setup
++run_onlink_tests
++cleanup
++
++if [ "$TESTS" != "none" ]; then
++	printf "\nTests passed: %3d\n" ${nsuccess}
++	printf "Tests failed: %3d\n"   ${nfail}
++fi
+diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
+index 989f917068d1..d4346b16b2c1 100644
+--- a/tools/testing/selftests/net/psock_fanout.c
++++ b/tools/testing/selftests/net/psock_fanout.c
+@@ -128,6 +128,8 @@ static void sock_fanout_getopts(int fd, uint16_t *typeflags, uint16_t *group_id)
+ 
+ static void sock_fanout_set_ebpf(int fd)
+ {
++	static char log_buf[65536];
++
+ 	const int len_off = __builtin_offsetof(struct __sk_buff, len);
+ 	struct bpf_insn prog[] = {
+ 		{ BPF_ALU64 | BPF_MOV | BPF_X,   6, 1, 0, 0 },
+@@ -140,7 +142,6 @@ static void sock_fanout_set_ebpf(int fd)
+ 		{ BPF_ALU   | BPF_MOV | BPF_K,   0, 0, 0, 0 },
+ 		{ BPF_JMP   | BPF_EXIT,          0, 0, 0, 0 }
+ 	};
+-	char log_buf[512];
+ 	union bpf_attr attr;
+ 	int pfd;
+ 
+diff --git a/tools/thermal/tmon/sysfs.c b/tools/thermal/tmon/sysfs.c
+index 1c12536f2081..18f523557983 100644
+--- a/tools/thermal/tmon/sysfs.c
++++ b/tools/thermal/tmon/sysfs.c
+@@ -486,6 +486,7 @@ int zone_instance_to_index(int zone_inst)
+ int update_thermal_data()
+ {
+ 	int i;
++	int next_thermal_record = cur_thermal_record + 1;
+ 	char tz_name[256];
+ 	static unsigned long samples;
+ 
+@@ -495,9 +496,9 @@ int update_thermal_data()
+ 	}
+ 
+ 	/* circular buffer for keeping historic data */
+-	if (cur_thermal_record >= NR_THERMAL_RECORDS)
+-		cur_thermal_record = 0;
+-	gettimeofday(&trec[cur_thermal_record].tv, NULL);
++	if (next_thermal_record >= NR_THERMAL_RECORDS)
++		next_thermal_record = 0;
++	gettimeofday(&trec[next_thermal_record].tv, NULL);
+ 	if (tmon_log) {
+ 		fprintf(tmon_log, "%lu ", ++samples);
+ 		fprintf(tmon_log, "%3.1f ", p_param.t_target);
+@@ -507,11 +508,12 @@ int update_thermal_data()
+ 		snprintf(tz_name, 256, "%s/%s%d", THERMAL_SYSFS, TZONE,
+ 			ptdata.tzi[i].instance);
+ 		sysfs_get_ulong(tz_name, "temp",
+-				&trec[cur_thermal_record].temp[i]);
++				&trec[next_thermal_record].temp[i]);
+ 		if (tmon_log)
+ 			fprintf(tmon_log, "%lu ",
+-				trec[cur_thermal_record].temp[i]/1000);
++				trec[next_thermal_record].temp[i] / 1000);
+ 	}
++	cur_thermal_record = next_thermal_record;
+ 	for (i = 0; i < ptdata.nr_cooling_dev; i++) {
+ 		char cdev_name[256];
+ 		unsigned long val;
+diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
+index 9aa19652e8e8..b43138f8b862 100644
+--- a/tools/thermal/tmon/tmon.c
++++ b/tools/thermal/tmon/tmon.c
+@@ -336,7 +336,6 @@ int main(int argc, char **argv)
+ 			show_data_w();
+ 			show_cooling_device();
+ 		}
+-		cur_thermal_record++;
+ 		time_elapsed += ticktime;
+ 		controller_handler(trec[0].temp[target_tz_index] / 1000,
+ 				&yk);


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-05-25 15:37 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-05-25 15:37 UTC (permalink / raw
  To: gentoo-commits

commit:     ae8ff671b8db0c142f5db55ac3e30eaf78171d29
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri May 25 15:37:41 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri May 25 15:37:41 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ae8ff671

Linux patch 4.16.12

 0000_README              |    4 +
 1011_linux-4.16.12.patch | 7076 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7080 insertions(+)

diff --git a/0000_README b/0000_README
index a5237c6..603fb6f 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  1010_linux-4.16.11.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.11
 
+Patch:  1011_linux-4.16.12.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.12
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1011_linux-4.16.12.patch b/1011_linux-4.16.12.patch
new file mode 100644
index 0000000..2ef2e6c
--- /dev/null
+++ b/1011_linux-4.16.12.patch
@@ -0,0 +1,7076 @@
+diff --git a/Makefile b/Makefile
+index 79c191442771..ded9e8480d74 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
+index 471b2274fbeb..c40b4380951c 100644
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -74,6 +74,27 @@
+  */
+ #define EX_R3		EX_DAR
+ 
++#define STF_ENTRY_BARRIER_SLOT						\
++	STF_ENTRY_BARRIER_FIXUP_SECTION;				\
++	nop;								\
++	nop;								\
++	nop
++
++#define STF_EXIT_BARRIER_SLOT						\
++	STF_EXIT_BARRIER_FIXUP_SECTION;					\
++	nop;								\
++	nop;								\
++	nop;								\
++	nop;								\
++	nop;								\
++	nop
++
++/*
++ * r10 must be free to use, r13 must be paca
++ */
++#define INTERRUPT_TO_KERNEL						\
++	STF_ENTRY_BARRIER_SLOT
++
+ /*
+  * Macros for annotating the expected destination of (h)rfid
+  *
+@@ -90,16 +111,19 @@
+ 	rfid
+ 
+ #define RFI_TO_USER							\
++	STF_EXIT_BARRIER_SLOT;						\
+ 	RFI_FLUSH_SLOT;							\
+ 	rfid;								\
+ 	b	rfi_flush_fallback
+ 
+ #define RFI_TO_USER_OR_KERNEL						\
++	STF_EXIT_BARRIER_SLOT;						\
+ 	RFI_FLUSH_SLOT;							\
+ 	rfid;								\
+ 	b	rfi_flush_fallback
+ 
+ #define RFI_TO_GUEST							\
++	STF_EXIT_BARRIER_SLOT;						\
+ 	RFI_FLUSH_SLOT;							\
+ 	rfid;								\
+ 	b	rfi_flush_fallback
+@@ -108,21 +132,25 @@
+ 	hrfid
+ 
+ #define HRFI_TO_USER							\
++	STF_EXIT_BARRIER_SLOT;						\
+ 	RFI_FLUSH_SLOT;							\
+ 	hrfid;								\
+ 	b	hrfi_flush_fallback
+ 
+ #define HRFI_TO_USER_OR_KERNEL						\
++	STF_EXIT_BARRIER_SLOT;						\
+ 	RFI_FLUSH_SLOT;							\
+ 	hrfid;								\
+ 	b	hrfi_flush_fallback
+ 
+ #define HRFI_TO_GUEST							\
++	STF_EXIT_BARRIER_SLOT;						\
+ 	RFI_FLUSH_SLOT;							\
+ 	hrfid;								\
+ 	b	hrfi_flush_fallback
+ 
+ #define HRFI_TO_UNKNOWN							\
++	STF_EXIT_BARRIER_SLOT;						\
+ 	RFI_FLUSH_SLOT;							\
+ 	hrfid;								\
+ 	b	hrfi_flush_fallback
+@@ -254,6 +282,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
+ #define __EXCEPTION_PROLOG_1_PRE(area)					\
+ 	OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR);		\
+ 	OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR);		\
++	INTERRUPT_TO_KERNEL;						\
+ 	SAVE_CTR(r10, area);						\
+ 	mfcr	r9;
+ 
+diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
+index 1e82eb3caabd..a9b64df34e2a 100644
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -187,6 +187,22 @@ label##3:					       	\
+ 	FTR_ENTRY_OFFSET label##1b-label##3b;		\
+ 	.popsection;
+ 
++#define STF_ENTRY_BARRIER_FIXUP_SECTION			\
++953:							\
++	.pushsection __stf_entry_barrier_fixup,"a";	\
++	.align 2;					\
++954:							\
++	FTR_ENTRY_OFFSET 953b-954b;			\
++	.popsection;
++
++#define STF_EXIT_BARRIER_FIXUP_SECTION			\
++955:							\
++	.pushsection __stf_exit_barrier_fixup,"a";	\
++	.align 2;					\
++956:							\
++	FTR_ENTRY_OFFSET 955b-956b;			\
++	.popsection;
++
+ #define RFI_FLUSH_FIXUP_SECTION				\
+ 951:							\
+ 	.pushsection __rfi_flush_fixup,"a";		\
+@@ -199,6 +215,9 @@ label##3:					       	\
+ #ifndef __ASSEMBLY__
+ #include <linux/types.h>
+ 
++extern long stf_barrier_fallback;
++extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
++extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+ 
+ void apply_feature_fixups(void);
+diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
+index eca3f9c68907..5a740feb7bd7 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -337,6 +337,9 @@
+ #define H_CPU_CHAR_L1D_FLUSH_ORI30	(1ull << 61) // IBM bit 2
+ #define H_CPU_CHAR_L1D_FLUSH_TRIG2	(1ull << 60) // IBM bit 3
+ #define H_CPU_CHAR_L1D_THREAD_PRIV	(1ull << 59) // IBM bit 4
++#define H_CPU_CHAR_BRANCH_HINTS_HONORED	(1ull << 58) // IBM bit 5
++#define H_CPU_CHAR_THREAD_RECONFIG_CTRL	(1ull << 57) // IBM bit 6
++#define H_CPU_CHAR_COUNT_CACHE_DISABLED	(1ull << 56) // IBM bit 7
+ 
+ #define H_CPU_BEHAV_FAVOUR_SECURITY	(1ull << 63) // IBM bit 0
+ #define H_CPU_BEHAV_L1D_FLUSH_PR	(1ull << 62) // IBM bit 1
+diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
+new file mode 100644
+index 000000000000..44989b22383c
+--- /dev/null
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -0,0 +1,85 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Security related feature bit definitions.
++ *
++ * Copyright 2018, Michael Ellerman, IBM Corporation.
++ */
++
++#ifndef _ASM_POWERPC_SECURITY_FEATURES_H
++#define _ASM_POWERPC_SECURITY_FEATURES_H
++
++
++extern unsigned long powerpc_security_features;
++extern bool rfi_flush;
++
++/* These are bit flags */
++enum stf_barrier_type {
++	STF_BARRIER_NONE	= 0x1,
++	STF_BARRIER_FALLBACK	= 0x2,
++	STF_BARRIER_EIEIO	= 0x4,
++	STF_BARRIER_SYNC_ORI	= 0x8,
++};
++
++void setup_stf_barrier(void);
++void do_stf_barrier_fixups(enum stf_barrier_type types);
++
++static inline void security_ftr_set(unsigned long feature)
++{
++	powerpc_security_features |= feature;
++}
++
++static inline void security_ftr_clear(unsigned long feature)
++{
++	powerpc_security_features &= ~feature;
++}
++
++static inline bool security_ftr_enabled(unsigned long feature)
++{
++	return !!(powerpc_security_features & feature);
++}
++
++
++// Features indicating support for Spectre/Meltdown mitigations
++
++// The L1-D cache can be flushed with ori r30,r30,0
++#define SEC_FTR_L1D_FLUSH_ORI30		0x0000000000000001ull
++
++// The L1-D cache can be flushed with mtspr 882,r0 (aka SPRN_TRIG2)
++#define SEC_FTR_L1D_FLUSH_TRIG2		0x0000000000000002ull
++
++// ori r31,r31,0 acts as a speculation barrier
++#define SEC_FTR_SPEC_BAR_ORI31		0x0000000000000004ull
++
++// Speculation past bctr is disabled
++#define SEC_FTR_BCCTRL_SERIALISED	0x0000000000000008ull
++
++// Entries in L1-D are private to a SMT thread
++#define SEC_FTR_L1D_THREAD_PRIV		0x0000000000000010ull
++
++// Indirect branch prediction cache disabled
++#define SEC_FTR_COUNT_CACHE_DISABLED	0x0000000000000020ull
++
++
++// Features indicating need for Spectre/Meltdown mitigations
++
++// The L1-D cache should be flushed on MSR[HV] 1->0 transition (hypervisor to guest)
++#define SEC_FTR_L1D_FLUSH_HV		0x0000000000000040ull
++
++// The L1-D cache should be flushed on MSR[PR] 0->1 transition (kernel to userspace)
++#define SEC_FTR_L1D_FLUSH_PR		0x0000000000000080ull
++
++// A speculation barrier should be used for bounds checks (Spectre variant 1)
++#define SEC_FTR_BNDS_CHK_SPEC_BAR	0x0000000000000100ull
++
++// Firmware configuration indicates user favours security over performance
++#define SEC_FTR_FAVOUR_SECURITY		0x0000000000000200ull
++
++
++// Features enabled by default
++#define SEC_FTR_DEFAULT \
++	(SEC_FTR_L1D_FLUSH_HV | \
++	 SEC_FTR_L1D_FLUSH_PR | \
++	 SEC_FTR_BNDS_CHK_SPEC_BAR | \
++	 SEC_FTR_FAVOUR_SECURITY)
++
++#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index 1b6bc7fba996..d458c45e5004 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -42,7 +42,7 @@ obj-$(CONFIG_VDSO32)		+= vdso32/
+ obj-$(CONFIG_PPC_WATCHDOG)	+= watchdog.o
+ obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= hw_breakpoint.o
+ obj-$(CONFIG_PPC_BOOK3S_64)	+= cpu_setup_ppc970.o cpu_setup_pa6t.o
+-obj-$(CONFIG_PPC_BOOK3S_64)	+= cpu_setup_power.o
++obj-$(CONFIG_PPC_BOOK3S_64)	+= cpu_setup_power.o security.o
+ obj-$(CONFIG_PPC_BOOK3S_64)	+= mce.o mce_power.o
+ obj-$(CONFIG_PPC_BOOK3E_64)	+= exceptions-64e.o idle_book3e.o
+ obj-$(CONFIG_PPC64)		+= vdso64/
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 1ecfd8ffb098..bf9b94e376fd 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -833,7 +833,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+ 
+ 
+-EXC_REAL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
++EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
+ EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
+ TRAMP_KVM(PACA_EXGEN, 0x900)
+ EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
+@@ -909,6 +909,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
+ 	mtctr	r13;							\
+ 	GET_PACA(r13);							\
+ 	std	r10,PACA_EXGEN+EX_R10(r13);				\
++	INTERRUPT_TO_KERNEL;						\
+ 	KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
+ 	HMT_MEDIUM;							\
+ 	mfctr	r9;
+@@ -917,7 +918,8 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
+ #define SYSCALL_KVMTEST							\
+ 	HMT_MEDIUM;							\
+ 	mr	r9,r13;							\
+-	GET_PACA(r13);
++	GET_PACA(r13);							\
++	INTERRUPT_TO_KERNEL;
+ #endif
+ 	
+ #define LOAD_SYSCALL_HANDLER(reg)					\
+@@ -1455,6 +1457,19 @@ masked_##_H##interrupt:					\
+ 	b	.;					\
+ 	MASKED_DEC_HANDLER(_H)
+ 
++TRAMP_REAL_BEGIN(stf_barrier_fallback)
++	std	r9,PACA_EXRFI+EX_R9(r13)
++	std	r10,PACA_EXRFI+EX_R10(r13)
++	sync
++	ld	r9,PACA_EXRFI+EX_R9(r13)
++	ld	r10,PACA_EXRFI+EX_R10(r13)
++	ori	31,31,0
++	.rept 14
++	b	1f
++1:
++	.endr
++	blr
++
+ TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ 	SET_SCRATCH0(r13);
+ 	GET_PACA(r13);
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+new file mode 100644
+index 000000000000..b98a722da915
+--- /dev/null
++++ b/arch/powerpc/kernel/security.c
+@@ -0,0 +1,237 @@
++// SPDX-License-Identifier: GPL-2.0+
++//
++// Security related flags and so on.
++//
++// Copyright 2018, Michael Ellerman, IBM Corporation.
++
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <linux/seq_buf.h>
++
++#include <asm/debugfs.h>
++#include <asm/security_features.h>
++
++
++unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
++
++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	bool thread_priv;
++
++	thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
++
++	if (rfi_flush || thread_priv) {
++		struct seq_buf s;
++		seq_buf_init(&s, buf, PAGE_SIZE - 1);
++
++		seq_buf_printf(&s, "Mitigation: ");
++
++		if (rfi_flush)
++			seq_buf_printf(&s, "RFI Flush");
++
++		if (rfi_flush && thread_priv)
++			seq_buf_printf(&s, ", ");
++
++		if (thread_priv)
++			seq_buf_printf(&s, "L1D private per thread");
++
++		seq_buf_printf(&s, "\n");
++
++		return s.len;
++	}
++
++	if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
++	    !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
++		return sprintf(buf, "Not affected\n");
++
++	return sprintf(buf, "Vulnerable\n");
++}
++
++ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
++		return sprintf(buf, "Not affected\n");
++
++	return sprintf(buf, "Vulnerable\n");
++}
++
++ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	bool bcs, ccd, ori;
++	struct seq_buf s;
++
++	seq_buf_init(&s, buf, PAGE_SIZE - 1);
++
++	bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
++	ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
++	ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
++
++	if (bcs || ccd) {
++		seq_buf_printf(&s, "Mitigation: ");
++
++		if (bcs)
++			seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
++
++		if (bcs && ccd)
++			seq_buf_printf(&s, ", ");
++
++		if (ccd)
++			seq_buf_printf(&s, "Indirect branch cache disabled");
++	} else
++		seq_buf_printf(&s, "Vulnerable");
++
++	if (ori)
++		seq_buf_printf(&s, ", ori31 speculation barrier enabled");
++
++	seq_buf_printf(&s, "\n");
++
++	return s.len;
++}
++
++/*
++ * Store-forwarding barrier support.
++ */
++
++static enum stf_barrier_type stf_enabled_flush_types;
++static bool no_stf_barrier;
++bool stf_barrier;
++
++static int __init handle_no_stf_barrier(char *p)
++{
++	pr_info("stf-barrier: disabled on command line.");
++	no_stf_barrier = true;
++	return 0;
++}
++
++early_param("no_stf_barrier", handle_no_stf_barrier);
++
++/* This is the generic flag used by other architectures */
++static int __init handle_ssbd(char *p)
++{
++	if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
++		/* Until firmware tells us, we have the barrier with auto */
++		return 0;
++	} else if (strncmp(p, "off", 3) == 0) {
++		handle_no_stf_barrier(NULL);
++		return 0;
++	} else
++		return 1;
++
++	return 0;
++}
++early_param("spec_store_bypass_disable", handle_ssbd);
++
++/* This is the generic flag used by other architectures */
++static int __init handle_no_ssbd(char *p)
++{
++	handle_no_stf_barrier(NULL);
++	return 0;
++}
++early_param("nospec_store_bypass_disable", handle_no_ssbd);
++
++static void stf_barrier_enable(bool enable)
++{
++	if (enable)
++		do_stf_barrier_fixups(stf_enabled_flush_types);
++	else
++		do_stf_barrier_fixups(STF_BARRIER_NONE);
++
++	stf_barrier = enable;
++}
++
++void setup_stf_barrier(void)
++{
++	enum stf_barrier_type type;
++	bool enable, hv;
++
++	hv = cpu_has_feature(CPU_FTR_HVMODE);
++
++	/* Default to fallback in case fw-features are not available */
++	if (cpu_has_feature(CPU_FTR_ARCH_300))
++		type = STF_BARRIER_EIEIO;
++	else if (cpu_has_feature(CPU_FTR_ARCH_207S))
++		type = STF_BARRIER_SYNC_ORI;
++	else if (cpu_has_feature(CPU_FTR_ARCH_206))
++		type = STF_BARRIER_FALLBACK;
++	else
++		type = STF_BARRIER_NONE;
++
++	enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++		(security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
++		 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
++
++	if (type == STF_BARRIER_FALLBACK) {
++		pr_info("stf-barrier: fallback barrier available\n");
++	} else if (type == STF_BARRIER_SYNC_ORI) {
++		pr_info("stf-barrier: hwsync barrier available\n");
++	} else if (type == STF_BARRIER_EIEIO) {
++		pr_info("stf-barrier: eieio barrier available\n");
++	}
++
++	stf_enabled_flush_types = type;
++
++	if (!no_stf_barrier)
++		stf_barrier_enable(enable);
++}
++
++ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
++		const char *type;
++		switch (stf_enabled_flush_types) {
++		case STF_BARRIER_EIEIO:
++			type = "eieio";
++			break;
++		case STF_BARRIER_SYNC_ORI:
++			type = "hwsync";
++			break;
++		case STF_BARRIER_FALLBACK:
++			type = "fallback";
++			break;
++		default:
++			type = "unknown";
++		}
++		return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
++	}
++
++	if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
++	    !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
++		return sprintf(buf, "Not affected\n");
++
++	return sprintf(buf, "Vulnerable\n");
++}
++
++#ifdef CONFIG_DEBUG_FS
++static int stf_barrier_set(void *data, u64 val)
++{
++	bool enable;
++
++	if (val == 1)
++		enable = true;
++	else if (val == 0)
++		enable = false;
++	else
++		return -EINVAL;
++
++	/* Only do anything if we're changing state */
++	if (enable != stf_barrier)
++		stf_barrier_enable(enable);
++
++	return 0;
++}
++
++static int stf_barrier_get(void *data, u64 *val)
++{
++	*val = stf_barrier ? 1 : 0;
++	return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
++
++static __init int stf_barrier_debugfs_init(void)
++{
++	debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
++	return 0;
++}
++device_initcall(stf_barrier_debugfs_init);
++#endif /* CONFIG_DEBUG_FS */
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index c388cc3357fa..c27557aff394 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -927,12 +927,4 @@ static __init int rfi_flush_debugfs_init(void)
+ }
+ device_initcall(rfi_flush_debugfs_init);
+ #endif
+-
+-ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+-{
+-	if (rfi_flush)
+-		return sprintf(buf, "Mitigation: RFI Flush\n");
+-
+-	return sprintf(buf, "Vulnerable\n");
+-}
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index c8af90ff49f0..b8d82678f8b4 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -133,6 +133,20 @@ SECTIONS
+ 	RO_DATA(PAGE_SIZE)
+ 
+ #ifdef CONFIG_PPC64
++	. = ALIGN(8);
++	__stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
++		__start___stf_entry_barrier_fixup = .;
++		*(__stf_entry_barrier_fixup)
++		__stop___stf_entry_barrier_fixup = .;
++	}
++
++	. = ALIGN(8);
++	__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
++		__start___stf_exit_barrier_fixup = .;
++		*(__stf_exit_barrier_fixup)
++		__stop___stf_exit_barrier_fixup = .;
++	}
++
+ 	. = ALIGN(8);
+ 	__rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
+ 		__start___rfi_flush_fixup = .;
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index f61ff5a6bddb..6b3c2d405a6d 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -23,6 +23,7 @@
+ #include <asm/page.h>
+ #include <asm/sections.h>
+ #include <asm/setup.h>
++#include <asm/security_features.h>
+ #include <asm/firmware.h>
+ 
+ struct fixup_entry {
+@@ -117,6 +118,120 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+ }
+ 
+ #ifdef CONFIG_PPC_BOOK3S_64
++void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
++{
++	unsigned int instrs[3], *dest;
++	long *start, *end;
++	int i;
++
++	start = PTRRELOC(&__start___stf_entry_barrier_fixup),
++	end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
++
++	instrs[0] = 0x60000000; /* nop */
++	instrs[1] = 0x60000000; /* nop */
++	instrs[2] = 0x60000000; /* nop */
++
++	i = 0;
++	if (types & STF_BARRIER_FALLBACK) {
++		instrs[i++] = 0x7d4802a6; /* mflr r10		*/
++		instrs[i++] = 0x60000000; /* branch patched below */
++		instrs[i++] = 0x7d4803a6; /* mtlr r10		*/
++	} else if (types & STF_BARRIER_EIEIO) {
++		instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
++	} else if (types & STF_BARRIER_SYNC_ORI) {
++		instrs[i++] = 0x7c0004ac; /* hwsync		*/
++		instrs[i++] = 0xe94d0000; /* ld r10,0(r13)	*/
++		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++	}
++
++	for (i = 0; start < end; start++, i++) {
++		dest = (void *)start + *start;
++
++		pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++		patch_instruction(dest, instrs[0]);
++
++		if (types & STF_BARRIER_FALLBACK)
++			patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
++				     BRANCH_SET_LINK);
++		else
++			patch_instruction(dest + 1, instrs[1]);
++
++		patch_instruction(dest + 2, instrs[2]);
++	}
++
++	printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
++		(types == STF_BARRIER_NONE)                  ? "no" :
++		(types == STF_BARRIER_FALLBACK)              ? "fallback" :
++		(types == STF_BARRIER_EIEIO)                 ? "eieio" :
++		(types == (STF_BARRIER_SYNC_ORI))            ? "hwsync"
++		                                           : "unknown");
++}
++
++void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
++{
++	unsigned int instrs[6], *dest;
++	long *start, *end;
++	int i;
++
++	start = PTRRELOC(&__start___stf_exit_barrier_fixup),
++	end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
++
++	instrs[0] = 0x60000000; /* nop */
++	instrs[1] = 0x60000000; /* nop */
++	instrs[2] = 0x60000000; /* nop */
++	instrs[3] = 0x60000000; /* nop */
++	instrs[4] = 0x60000000; /* nop */
++	instrs[5] = 0x60000000; /* nop */
++
++	i = 0;
++	if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
++		if (cpu_has_feature(CPU_FTR_HVMODE)) {
++			instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
++			instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
++		} else {
++			instrs[i++] = 0x7db243a6; /* mtsprg 2,r13	*/
++			instrs[i++] = 0x7db142a6; /* mfsprg r13,1    */
++	        }
++		instrs[i++] = 0x7c0004ac; /* hwsync		*/
++		instrs[i++] = 0xe9ad0000; /* ld r13,0(r13)	*/
++		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++		if (cpu_has_feature(CPU_FTR_HVMODE)) {
++			instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
++		} else {
++			instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
++		}
++	} else if (types & STF_BARRIER_EIEIO) {
++		instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
++	}
++
++	for (i = 0; start < end; start++, i++) {
++		dest = (void *)start + *start;
++
++		pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++		patch_instruction(dest, instrs[0]);
++		patch_instruction(dest + 1, instrs[1]);
++		patch_instruction(dest + 2, instrs[2]);
++		patch_instruction(dest + 3, instrs[3]);
++		patch_instruction(dest + 4, instrs[4]);
++		patch_instruction(dest + 5, instrs[5]);
++	}
++	printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
++		(types == STF_BARRIER_NONE)                  ? "no" :
++		(types == STF_BARRIER_FALLBACK)              ? "fallback" :
++		(types == STF_BARRIER_EIEIO)                 ? "eieio" :
++		(types == (STF_BARRIER_SYNC_ORI))            ? "hwsync"
++		                                           : "unknown");
++}
++
++
++void do_stf_barrier_fixups(enum stf_barrier_type types)
++{
++	do_stf_entry_barrier_fixups(types);
++	do_stf_exit_barrier_fixups(types);
++}
++
+ void do_rfi_flush_fixups(enum l1d_flush_type types)
+ {
+ 	unsigned int instrs[3], *dest;
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
+index 092715b9674b..fc0412d59149 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -38,57 +38,92 @@
+ #include <asm/smp.h>
+ #include <asm/tm.h>
+ #include <asm/setup.h>
++#include <asm/security_features.h>
+ 
+ #include "powernv.h"
+ 
++
++static bool fw_feature_is(const char *state, const char *name,
++			  struct device_node *fw_features)
++{
++	struct device_node *np;
++	bool rc = false;
++
++	np = of_get_child_by_name(fw_features, name);
++	if (np) {
++		rc = of_property_read_bool(np, state);
++		of_node_put(np);
++	}
++
++	return rc;
++}
++
++static void init_fw_feat_flags(struct device_node *np)
++{
++	if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
++		security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
++
++	if (fw_feature_is("enabled", "fw-bcctrl-serialized", np))
++		security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
++
++	if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np))
++		security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
++
++	if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np))
++		security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
++
++	if (fw_feature_is("enabled", "fw-l1d-thread-split", np))
++		security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
++
++	if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
++		security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
++
++	/*
++	 * The features below are enabled by default, so we instead look to see
++	 * if firmware has *disabled* them, and clear them if so.
++	 */
++	if (fw_feature_is("disabled", "speculation-policy-favor-security", np))
++		security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
++
++	if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np))
++		security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
++
++	if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np))
++		security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
++
++	if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np))
++		security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
++}
++
+ static void pnv_setup_rfi_flush(void)
+ {
+ 	struct device_node *np, *fw_features;
+ 	enum l1d_flush_type type;
+-	int enable;
++	bool enable;
+ 
+ 	/* Default to fallback in case fw-features are not available */
+ 	type = L1D_FLUSH_FALLBACK;
+-	enable = 1;
+ 
+ 	np = of_find_node_by_name(NULL, "ibm,opal");
+ 	fw_features = of_get_child_by_name(np, "fw-features");
+ 	of_node_put(np);
+ 
+ 	if (fw_features) {
+-		np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
+-		if (np && of_property_read_bool(np, "enabled"))
+-			type = L1D_FLUSH_MTTRIG;
++		init_fw_feat_flags(fw_features);
++		of_node_put(fw_features);
+ 
+-		of_node_put(np);
++		if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
++			type = L1D_FLUSH_MTTRIG;
+ 
+-		np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
+-		if (np && of_property_read_bool(np, "enabled"))
++		if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
+ 			type = L1D_FLUSH_ORI;
+-
+-		of_node_put(np);
+-
+-		/* Enable unless firmware says NOT to */
+-		enable = 2;
+-		np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
+-		if (np && of_property_read_bool(np, "disabled"))
+-			enable--;
+-
+-		of_node_put(np);
+-
+-		np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
+-		if (np && of_property_read_bool(np, "disabled"))
+-			enable--;
+-
+-		np = of_get_child_by_name(fw_features, "speculation-policy-favor-security");
+-		if (np && of_property_read_bool(np, "disabled"))
+-			enable = 0;
+-
+-		of_node_put(np);
+-		of_node_put(fw_features);
+ 	}
+ 
+-	setup_rfi_flush(type, enable > 0);
++	enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
++		 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)   || \
++		  security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
++
++	setup_rfi_flush(type, enable);
+ }
+ 
+ static void __init pnv_setup_arch(void)
+@@ -96,6 +131,7 @@ static void __init pnv_setup_arch(void)
+ 	set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
+ 
+ 	pnv_setup_rfi_flush();
++	setup_stf_barrier();
+ 
+ 	/* Initialize SMP */
+ 	pnv_smp_init();
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 1a527625acf7..21fed38bbbd5 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -68,6 +68,7 @@
+ #include <asm/plpar_wrappers.h>
+ #include <asm/kexec.h>
+ #include <asm/isa-bridge.h>
++#include <asm/security_features.h>
+ 
+ #include "pseries.h"
+ 
+@@ -459,6 +460,40 @@ static void __init find_and_init_phbs(void)
+ 	of_pci_check_probe_only();
+ }
+ 
++static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
++{
++	if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31)
++		security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
++
++	if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED)
++		security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
++
++	if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30)
++		security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
++
++	if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
++		security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
++
++	if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV)
++		security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
++
++	if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
++		security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
++
++	/*
++	 * The features below are enabled by default, so we instead look to see
++	 * if firmware has *disabled* them, and clear them if so.
++	 */
++	if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
++		security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
++
++	if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
++		security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
++
++	if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
++		security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
++}
++
+ static void pseries_setup_rfi_flush(void)
+ {
+ 	struct h_cpu_char_result result;
+@@ -466,29 +501,26 @@ static void pseries_setup_rfi_flush(void)
+ 	bool enable;
+ 	long rc;
+ 
+-	/* Enable by default */
+-	enable = true;
+-
+ 	rc = plpar_get_cpu_characteristics(&result);
+-	if (rc == H_SUCCESS) {
+-		types = L1D_FLUSH_NONE;
++	if (rc == H_SUCCESS)
++		init_cpu_char_feature_flags(&result);
++
++	/*
++	 * We're the guest so this doesn't apply to us, clear it to simplify
++	 * handling of it elsewhere.
++	 */
++	security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
+ 
+-		if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
+-			types |= L1D_FLUSH_MTTRIG;
+-		if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
+-			types |= L1D_FLUSH_ORI;
++	types = L1D_FLUSH_FALLBACK;
+ 
+-		/* Use fallback if nothing set in hcall */
+-		if (types == L1D_FLUSH_NONE)
+-			types = L1D_FLUSH_FALLBACK;
++	if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
++		types |= L1D_FLUSH_MTTRIG;
+ 
+-		if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ||
+-		    (!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)))
+-			enable = false;
+-	} else {
+-		/* Default to fallback if case hcall is not available */
+-		types = L1D_FLUSH_FALLBACK;
+-	}
++	if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
++		types |= L1D_FLUSH_ORI;
++
++	enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
++		 security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
+ 
+ 	setup_rfi_flush(types, enable);
+ }
+@@ -667,6 +699,7 @@ static void __init pSeries_setup_arch(void)
+ 	fwnmi_init();
+ 
+ 	pseries_setup_rfi_flush();
++	setup_stf_barrier();
+ 
+ 	/* By default, only probe PCI (can be overridden by rtas_pci) */
+ 	pci_add_flags(PCI_PROBE_ONLY);
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 6e91e0d422ea..c94dd09a82d1 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -120,6 +120,7 @@ config S390
+ 	select GENERIC_CLOCKEVENTS
+ 	select GENERIC_CPU_AUTOPROBE
+ 	select GENERIC_CPU_DEVICES if !SMP
++	select GENERIC_CPU_VULNERABILITIES
+ 	select GENERIC_FIND_FIRST_BIT
+ 	select GENERIC_SMP_IDLE_THREAD
+ 	select GENERIC_TIME_VSYSCALL
+@@ -576,7 +577,7 @@ choice
+ config EXPOLINE_OFF
+ 	bool "spectre_v2=off"
+ 
+-config EXPOLINE_MEDIUM
++config EXPOLINE_AUTO
+ 	bool "spectre_v2=auto"
+ 
+ config EXPOLINE_FULL
+diff --git a/arch/s390/Makefile b/arch/s390/Makefile
+index 2ced3239cb84..e1bc722fba41 100644
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -84,7 +84,7 @@ ifdef CONFIG_EXPOLINE
+     CC_FLAGS_EXPOLINE += -mfunction-return=thunk
+     CC_FLAGS_EXPOLINE += -mindirect-branch-table
+     export CC_FLAGS_EXPOLINE
+-    cflags-y += $(CC_FLAGS_EXPOLINE)
++    cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
+   endif
+ endif
+ 
+diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S
+index e8077f0971f8..2bf01ba44107 100644
+--- a/arch/s390/crypto/crc32be-vx.S
++++ b/arch/s390/crypto/crc32be-vx.S
+@@ -13,6 +13,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <asm/nospec-insn.h>
+ #include <asm/vx-insn.h>
+ 
+ /* Vector register range containing CRC-32 constants */
+@@ -67,6 +68,8 @@
+ 
+ .previous
+ 
++	GEN_BR_THUNK %r14
++
+ .text
+ /*
+  * The CRC-32 function(s) use these calling conventions:
+@@ -203,6 +206,6 @@ ENTRY(crc32_be_vgfm_16)
+ 
+ .Ldone:
+ 	VLGVF	%r2,%v2,3
+-	br	%r14
++	BR_EX	%r14
+ 
+ .previous
+diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S
+index d8c67a58c0c5..7d6f568bd3ad 100644
+--- a/arch/s390/crypto/crc32le-vx.S
++++ b/arch/s390/crypto/crc32le-vx.S
+@@ -14,6 +14,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <asm/nospec-insn.h>
+ #include <asm/vx-insn.h>
+ 
+ /* Vector register range containing CRC-32 constants */
+@@ -76,6 +77,7 @@
+ 
+ .previous
+ 
++	GEN_BR_THUNK %r14
+ 
+ .text
+ 
+@@ -264,6 +266,6 @@ crc32_le_vgfm_generic:
+ 
+ .Ldone:
+ 	VLGVF	%r2,%v2,2
+-	br	%r14
++	BR_EX	%r14
+ 
+ .previous
+diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
+new file mode 100644
+index 000000000000..955d620db23e
+--- /dev/null
++++ b/arch/s390/include/asm/alternative-asm.h
+@@ -0,0 +1,108 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_S390_ALTERNATIVE_ASM_H
++#define _ASM_S390_ALTERNATIVE_ASM_H
++
++#ifdef __ASSEMBLY__
++
++/*
++ * Check the length of an instruction sequence. The length may not be larger
++ * than 254 bytes and it has to be divisible by 2.
++ */
++.macro alt_len_check start,end
++	.if ( \end - \start ) > 254
++	.error "cpu alternatives does not support instructions blocks > 254 bytes\n"
++	.endif
++	.if ( \end - \start ) % 2
++	.error "cpu alternatives instructions length is odd\n"
++	.endif
++.endm
++
++/*
++ * Issue one struct alt_instr descriptor entry (need to put it into
++ * the section .altinstructions, see below). This entry contains
++ * enough information for the alternatives patching code to patch an
++ * instruction. See apply_alternatives().
++ */
++.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
++	.long	\orig_start - .
++	.long	\alt_start - .
++	.word	\feature
++	.byte	\orig_end - \orig_start
++	.byte	\alt_end - \alt_start
++.endm
++
++/*
++ * Fill up @bytes with nops. The macro emits 6-byte nop instructions
++ * for the bulk of the area, possibly followed by a 4-byte and/or
++ * a 2-byte nop if the size of the area is not divisible by 6.
++ */
++.macro alt_pad_fill bytes
++	.fill	( \bytes ) / 6, 6, 0xc0040000
++	.fill	( \bytes ) % 6 / 4, 4, 0x47000000
++	.fill	( \bytes ) % 6 % 4 / 2, 2, 0x0700
++.endm
++
++/*
++ * Fill up @bytes with nops. If the number of bytes is larger
++ * than 6, emit a jg instruction to branch over all nops, then
++ * fill an area of size (@bytes - 6) with nop instructions.
++ */
++.macro alt_pad bytes
++	.if ( \bytes > 0 )
++	.if ( \bytes > 6 )
++	jg	. + \bytes
++	alt_pad_fill \bytes - 6
++	.else
++	alt_pad_fill \bytes
++	.endif
++	.endif
++.endm
++
++/*
++ * Define an alternative between two instructions. If @feature is
++ * present, early code in apply_alternatives() replaces @oldinstr with
++ * @newinstr. ".skip" directive takes care of proper instruction padding
++ * in case @newinstr is longer than @oldinstr.
++ */
++.macro ALTERNATIVE oldinstr, newinstr, feature
++	.pushsection .altinstr_replacement,"ax"
++770:	\newinstr
++771:	.popsection
++772:	\oldinstr
++773:	alt_len_check 770b, 771b
++	alt_len_check 772b, 773b
++	alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) )
++774:	.pushsection .altinstructions,"a"
++	alt_entry 772b, 774b, 770b, 771b, \feature
++	.popsection
++.endm
++
++/*
++ * Define an alternative between two instructions. If @feature is
++ * present, early code in apply_alternatives() replaces @oldinstr with
++ * @newinstr. ".skip" directive takes care of proper instruction padding
++ * in case @newinstr is longer than @oldinstr.
++ */
++.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
++	.pushsection .altinstr_replacement,"ax"
++770:	\newinstr1
++771:	\newinstr2
++772:	.popsection
++773:	\oldinstr
++774:	alt_len_check 770b, 771b
++	alt_len_check 771b, 772b
++	alt_len_check 773b, 774b
++	.if ( 771b - 770b > 772b - 771b )
++	alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) )
++	.else
++	alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) )
++	.endif
++775:	.pushsection .altinstructions,"a"
++	alt_entry 773b, 775b, 770b, 771b,\feature1
++	alt_entry 773b, 775b, 771b, 772b,\feature2
++	.popsection
++.endm
++
++#endif	/*  __ASSEMBLY__  */
++
++#endif /* _ASM_S390_ALTERNATIVE_ASM_H */
+diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
+index 7df48e5cf36f..b4bd8c41e9d3 100644
+--- a/arch/s390/include/asm/nospec-branch.h
++++ b/arch/s390/include/asm/nospec-branch.h
+@@ -6,12 +6,11 @@
+ 
+ #include <linux/types.h>
+ 
+-extern int nospec_call_disable;
+-extern int nospec_return_disable;
++extern int nospec_disable;
+ 
+ void nospec_init_branches(void);
+-void nospec_call_revert(s32 *start, s32 *end);
+-void nospec_return_revert(s32 *start, s32 *end);
++void nospec_auto_detect(void);
++void nospec_revert(s32 *start, s32 *end);
+ 
+ #endif /* __ASSEMBLY__ */
+ 
+diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
+new file mode 100644
+index 000000000000..a01f81186e86
+--- /dev/null
++++ b/arch/s390/include/asm/nospec-insn.h
+@@ -0,0 +1,196 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_S390_NOSPEC_ASM_H
++#define _ASM_S390_NOSPEC_ASM_H
++
++#include <asm/alternative-asm.h>
++#include <asm/asm-offsets.h>
++#include <asm/dwarf.h>
++
++#ifdef __ASSEMBLY__
++
++#ifdef CONFIG_EXPOLINE
++
++_LC_BR_R1 = __LC_BR_R1
++
++/*
++ * The expoline macros are used to create thunks in the same format
++ * as gcc generates them. The 'comdat' section flag makes sure that
++ * the various thunks are merged into a single copy.
++ */
++	.macro __THUNK_PROLOG_NAME name
++	.pushsection .text.\name,"axG",@progbits,\name,comdat
++	.globl \name
++	.hidden \name
++	.type \name,@function
++\name:
++	CFI_STARTPROC
++	.endm
++
++	.macro __THUNK_EPILOG
++	CFI_ENDPROC
++	.popsection
++	.endm
++
++	.macro __THUNK_PROLOG_BR r1,r2
++	__THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
++	.endm
++
++	.macro __THUNK_PROLOG_BC d0,r1,r2
++	__THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
++	.endm
++
++	.macro __THUNK_BR r1,r2
++	jg	__s390x_indirect_jump_r\r2\()use_r\r1
++	.endm
++
++	.macro __THUNK_BC d0,r1,r2
++	jg	__s390x_indirect_branch_\d0\()_\r2\()use_\r1
++	.endm
++
++	.macro __THUNK_BRASL r1,r2,r3
++	brasl	\r1,__s390x_indirect_jump_r\r3\()use_r\r2
++	.endm
++
++	.macro	__DECODE_RR expand,reg,ruse
++	.set __decode_fail,1
++	.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++	.ifc \reg,%r\r1
++	.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++	.ifc \ruse,%r\r2
++	\expand \r1,\r2
++	.set __decode_fail,0
++	.endif
++	.endr
++	.endif
++	.endr
++	.if __decode_fail == 1
++	.error "__DECODE_RR failed"
++	.endif
++	.endm
++
++	.macro	__DECODE_RRR expand,rsave,rtarget,ruse
++	.set __decode_fail,1
++	.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++	.ifc \rsave,%r\r1
++	.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++	.ifc \rtarget,%r\r2
++	.irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++	.ifc \ruse,%r\r3
++	\expand \r1,\r2,\r3
++	.set __decode_fail,0
++	.endif
++	.endr
++	.endif
++	.endr
++	.endif
++	.endr
++	.if __decode_fail == 1
++	.error "__DECODE_RRR failed"
++	.endif
++	.endm
++
++	.macro	__DECODE_DRR expand,disp,reg,ruse
++	.set __decode_fail,1
++	.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++	.ifc \reg,%r\r1
++	.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
++	.ifc \ruse,%r\r2
++	\expand \disp,\r1,\r2
++	.set __decode_fail,0
++	.endif
++	.endr
++	.endif
++	.endr
++	.if __decode_fail == 1
++	.error "__DECODE_DRR failed"
++	.endif
++	.endm
++
++	.macro __THUNK_EX_BR reg,ruse
++	# Be very careful when adding instructions to this macro!
++	# The ALTERNATIVE replacement code has a .+10 which targets
++	# the "br \reg" after the code has been patched.
++#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
++	exrl	0,555f
++	j	.
++#else
++	.ifc \reg,%r1
++	ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35
++	j	.
++	.else
++	larl	\ruse,555f
++	ex	0,0(\ruse)
++	j	.
++	.endif
++#endif
++555:	br	\reg
++	.endm
++
++	.macro __THUNK_EX_BC disp,reg,ruse
++#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
++	exrl	0,556f
++	j	.
++#else
++	larl	\ruse,556f
++	ex	0,0(\ruse)
++	j	.
++#endif
++556:	b	\disp(\reg)
++	.endm
++
++	.macro GEN_BR_THUNK reg,ruse=%r1
++	__DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse
++	__THUNK_EX_BR \reg,\ruse
++	__THUNK_EPILOG
++	.endm
++
++	.macro GEN_B_THUNK disp,reg,ruse=%r1
++	__DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse
++	__THUNK_EX_BC \disp,\reg,\ruse
++	__THUNK_EPILOG
++	.endm
++
++	.macro BR_EX reg,ruse=%r1
++557:	__DECODE_RR __THUNK_BR,\reg,\ruse
++	.pushsection .s390_indirect_branches,"a",@progbits
++	.long	557b-.
++	.popsection
++	.endm
++
++	 .macro B_EX disp,reg,ruse=%r1
++558:	__DECODE_DRR __THUNK_BC,\disp,\reg,\ruse
++	.pushsection .s390_indirect_branches,"a",@progbits
++	.long	558b-.
++	.popsection
++	.endm
++
++	.macro BASR_EX rsave,rtarget,ruse=%r1
++559:	__DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse
++	.pushsection .s390_indirect_branches,"a",@progbits
++	.long	559b-.
++	.popsection
++	.endm
++
++#else
++	.macro GEN_BR_THUNK reg,ruse=%r1
++	.endm
++
++	.macro GEN_B_THUNK disp,reg,ruse=%r1
++	.endm
++
++	 .macro BR_EX reg,ruse=%r1
++	br	\reg
++	.endm
++
++	 .macro B_EX disp,reg,ruse=%r1
++	b	\disp(\reg)
++	.endm
++
++	.macro BASR_EX rsave,rtarget,ruse=%r1
++	basr	\rsave,\rtarget
++	.endm
++#endif
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _ASM_S390_NOSPEC_ASM_H */
+diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
+index 7f27e3da9709..a02bc90fe5f3 100644
+--- a/arch/s390/kernel/Makefile
++++ b/arch/s390/kernel/Makefile
+@@ -61,11 +61,12 @@ obj-y	+= debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
+ obj-y	+= sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
+ obj-y	+= runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
+ obj-y	+= entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
++obj-y	+= nospec-branch.o
+ 
+ extra-y				+= head.o head64.o vmlinux.lds
+ 
+-obj-$(CONFIG_EXPOLINE)		+= nospec-branch.o
+-CFLAGS_REMOVE_expoline.o	+= $(CC_FLAGS_EXPOLINE)
++obj-$(CONFIG_SYSFS)		+= nospec-sysfs.o
++CFLAGS_REMOVE_nospec-branch.o	+= $(CC_FLAGS_EXPOLINE)
+ 
+ obj-$(CONFIG_MODULES)		+= module.o
+ obj-$(CONFIG_SMP)		+= smp.o
+diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
+index 22476135f738..8e1f2aee85ef 100644
+--- a/arch/s390/kernel/alternative.c
++++ b/arch/s390/kernel/alternative.c
+@@ -2,6 +2,7 @@
+ #include <linux/module.h>
+ #include <asm/alternative.h>
+ #include <asm/facility.h>
++#include <asm/nospec-branch.h>
+ 
+ #define MAX_PATCH_LEN (255 - 1)
+ 
+@@ -15,29 +16,6 @@ static int __init disable_alternative_instructions(char *str)
+ 
+ early_param("noaltinstr", disable_alternative_instructions);
+ 
+-static int __init nobp_setup_early(char *str)
+-{
+-	bool enabled;
+-	int rc;
+-
+-	rc = kstrtobool(str, &enabled);
+-	if (rc)
+-		return rc;
+-	if (enabled && test_facility(82))
+-		__set_facility(82, S390_lowcore.alt_stfle_fac_list);
+-	else
+-		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+-	return 0;
+-}
+-early_param("nobp", nobp_setup_early);
+-
+-static int __init nospec_setup_early(char *str)
+-{
+-	__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+-	return 0;
+-}
+-early_param("nospec", nospec_setup_early);
+-
+ struct brcl_insn {
+ 	u16 opc;
+ 	s32 disp;
+diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
+index 587b195b588d..3fd0b4535a71 100644
+--- a/arch/s390/kernel/asm-offsets.c
++++ b/arch/s390/kernel/asm-offsets.c
+@@ -179,6 +179,7 @@ int main(void)
+ 	OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
+ 	OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
+ 	OFFSET(__LC_GMAP, lowcore, gmap);
++	OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline);
+ 	/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
+ 	OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
+ 	/* hardware defined lowcore locations 0x1000 - 0x18ff */
+diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
+index f6c56009e822..b65874b0b412 100644
+--- a/arch/s390/kernel/base.S
++++ b/arch/s390/kernel/base.S
+@@ -9,18 +9,22 @@
+ 
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
++#include <asm/nospec-insn.h>
+ #include <asm/ptrace.h>
+ #include <asm/sigp.h>
+ 
++	GEN_BR_THUNK %r9
++	GEN_BR_THUNK %r14
++
+ ENTRY(s390_base_mcck_handler)
+ 	basr	%r13,0
+ 0:	lg	%r15,__LC_PANIC_STACK	# load panic stack
+ 	aghi	%r15,-STACK_FRAME_OVERHEAD
+ 	larl	%r1,s390_base_mcck_handler_fn
+-	lg	%r1,0(%r1)
+-	ltgr	%r1,%r1
++	lg	%r9,0(%r1)
++	ltgr	%r9,%r9
+ 	jz	1f
+-	basr	%r14,%r1
++	BASR_EX	%r14,%r9
+ 1:	la	%r1,4095
+ 	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
+ 	lpswe	__LC_MCK_OLD_PSW
+@@ -37,10 +41,10 @@ ENTRY(s390_base_ext_handler)
+ 	basr	%r13,0
+ 0:	aghi	%r15,-STACK_FRAME_OVERHEAD
+ 	larl	%r1,s390_base_ext_handler_fn
+-	lg	%r1,0(%r1)
+-	ltgr	%r1,%r1
++	lg	%r9,0(%r1)
++	ltgr	%r9,%r9
+ 	jz	1f
+-	basr	%r14,%r1
++	BASR_EX	%r14,%r9
+ 1:	lmg	%r0,%r15,__LC_SAVE_AREA_ASYNC
+ 	ni	__LC_EXT_OLD_PSW+1,0xfd	# clear wait state bit
+ 	lpswe	__LC_EXT_OLD_PSW
+@@ -57,10 +61,10 @@ ENTRY(s390_base_pgm_handler)
+ 	basr	%r13,0
+ 0:	aghi	%r15,-STACK_FRAME_OVERHEAD
+ 	larl	%r1,s390_base_pgm_handler_fn
+-	lg	%r1,0(%r1)
+-	ltgr	%r1,%r1
++	lg	%r9,0(%r1)
++	ltgr	%r9,%r9
+ 	jz	1f
+-	basr	%r14,%r1
++	BASR_EX	%r14,%r9
+ 	lmg	%r0,%r15,__LC_SAVE_AREA_SYNC
+ 	lpswe	__LC_PGM_OLD_PSW
+ 1:	lpswe	disabled_wait_psw-0b(%r13)
+@@ -117,7 +121,7 @@ ENTRY(diag308_reset)
+ 	larl	%r4,.Lcontinue_psw	# Restore PSW flags
+ 	lpswe	0(%r4)
+ .Lcontinue:
+-	br	%r14
++	BR_EX	%r14
+ .align 16
+ .Lrestart_psw:
+ 	.long	0x00080000,0x80000000 + .Lrestart_part2
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index a5621ea6d123..d3e1a510c9c1 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -27,6 +27,7 @@
+ #include <asm/setup.h>
+ #include <asm/nmi.h>
+ #include <asm/export.h>
++#include <asm/nospec-insn.h>
+ 
+ __PT_R0      =	__PT_GPRS
+ __PT_R1      =	__PT_GPRS + 8
+@@ -223,67 +224,9 @@ _PIF_WORK	= (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
+ 	.popsection
+ 	.endm
+ 
+-#ifdef CONFIG_EXPOLINE
+-
+-	.macro GEN_BR_THUNK name,reg,tmp
+-	.section .text.\name,"axG",@progbits,\name,comdat
+-	.globl \name
+-	.hidden \name
+-	.type \name,@function
+-\name:
+-	CFI_STARTPROC
+-#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+-	exrl	0,0f
+-#else
+-	larl	\tmp,0f
+-	ex	0,0(\tmp)
+-#endif
+-	j	.
+-0:	br	\reg
+-	CFI_ENDPROC
+-	.endm
+-
+-	GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
+-	GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
+-	GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
+-
+-	.macro BASR_R14_R9
+-0:	brasl	%r14,__s390x_indirect_jump_r1use_r9
+-	.pushsection .s390_indirect_branches,"a",@progbits
+-	.long	0b-.
+-	.popsection
+-	.endm
+-
+-	.macro BR_R1USE_R14
+-0:	jg	__s390x_indirect_jump_r1use_r14
+-	.pushsection .s390_indirect_branches,"a",@progbits
+-	.long	0b-.
+-	.popsection
+-	.endm
+-
+-	.macro BR_R11USE_R14
+-0:	jg	__s390x_indirect_jump_r11use_r14
+-	.pushsection .s390_indirect_branches,"a",@progbits
+-	.long	0b-.
+-	.popsection
+-	.endm
+-
+-#else	/* CONFIG_EXPOLINE */
+-
+-	.macro BASR_R14_R9
+-	basr	%r14,%r9
+-	.endm
+-
+-	.macro BR_R1USE_R14
+-	br	%r14
+-	.endm
+-
+-	.macro BR_R11USE_R14
+-	br	%r14
+-	.endm
+-
+-#endif /* CONFIG_EXPOLINE */
+-
++	GEN_BR_THUNK %r9
++	GEN_BR_THUNK %r14
++	GEN_BR_THUNK %r14,%r11
+ 
+ 	.section .kprobes.text, "ax"
+ .Ldummy:
+@@ -300,7 +243,7 @@ _PIF_WORK	= (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
+ ENTRY(__bpon)
+ 	.globl __bpon
+ 	BPON
+-	BR_R1USE_R14
++	BR_EX	%r14
+ 
+ /*
+  * Scheduler resume function, called by switch_to
+@@ -326,7 +269,7 @@ ENTRY(__switch_to)
+ 	TSTMSK	__LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
+ 	jz	0f
+ 	.insn	s,0xb2800000,__LC_LPP		# set program parameter
+-0:	BR_R1USE_R14
++0:	BR_EX	%r14
+ 
+ .L__critical_start:
+ 
+@@ -393,7 +336,7 @@ sie_exit:
+ 	xgr	%r5,%r5
+ 	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
+ 	lg	%r2,__SF_EMPTY+16(%r15)		# return exit reason code
+-	BR_R1USE_R14
++	BR_EX	%r14
+ .Lsie_fault:
+ 	lghi	%r14,-EFAULT
+ 	stg	%r14,__SF_EMPTY+16(%r15)	# set exit reason code
+@@ -452,7 +395,7 @@ ENTRY(system_call)
+ 	lgf	%r9,0(%r8,%r10)			# get system call add.
+ 	TSTMSK	__TI_flags(%r12),_TIF_TRACE
+ 	jnz	.Lsysc_tracesys
+-	BASR_R14_R9				# call sys_xxxx
++	BASR_EX	%r14,%r9			# call sys_xxxx
+ 	stg	%r2,__PT_R2(%r11)		# store return value
+ 
+ .Lsysc_return:
+@@ -637,7 +580,7 @@ ENTRY(system_call)
+ 	lmg	%r3,%r7,__PT_R3(%r11)
+ 	stg	%r7,STACK_FRAME_OVERHEAD(%r15)
+ 	lg	%r2,__PT_ORIG_GPR2(%r11)
+-	BASR_R14_R9			# call sys_xxx
++	BASR_EX	%r14,%r9		# call sys_xxx
+ 	stg	%r2,__PT_R2(%r11)	# store return value
+ .Lsysc_tracenogo:
+ 	TSTMSK	__TI_flags(%r12),_TIF_TRACE
+@@ -661,7 +604,7 @@ ENTRY(ret_from_fork)
+ 	lmg	%r9,%r10,__PT_R9(%r11)	# load gprs
+ ENTRY(kernel_thread_starter)
+ 	la	%r2,0(%r10)
+-	BASR_R14_R9
++	BASR_EX	%r14,%r9
+ 	j	.Lsysc_tracenogo
+ 
+ /*
+@@ -743,7 +686,7 @@ ENTRY(pgm_check_handler)
+ 	je	.Lpgm_return
+ 	lgf	%r9,0(%r10,%r1)		# load address of handler routine
+ 	lgr	%r2,%r11		# pass pointer to pt_regs
+-	BASR_R14_R9			# branch to interrupt-handler
++	BASR_EX	%r14,%r9		# branch to interrupt-handler
+ .Lpgm_return:
+ 	LOCKDEP_SYS_EXIT
+ 	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
+@@ -1061,7 +1004,7 @@ ENTRY(psw_idle)
+ 	stpt	__TIMER_IDLE_ENTER(%r2)
+ .Lpsw_idle_lpsw:
+ 	lpswe	__SF_EMPTY(%r15)
+-	BR_R1USE_R14
++	BR_EX	%r14
+ .Lpsw_idle_end:
+ 
+ /*
+@@ -1103,7 +1046,7 @@ ENTRY(save_fpu_regs)
+ .Lsave_fpu_regs_done:
+ 	oi	__LC_CPU_FLAGS+7,_CIF_FPU
+ .Lsave_fpu_regs_exit:
+-	BR_R1USE_R14
++	BR_EX	%r14
+ .Lsave_fpu_regs_end:
+ EXPORT_SYMBOL(save_fpu_regs)
+ 
+@@ -1149,7 +1092,7 @@ load_fpu_regs:
+ .Lload_fpu_regs_done:
+ 	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
+ .Lload_fpu_regs_exit:
+-	BR_R1USE_R14
++	BR_EX	%r14
+ .Lload_fpu_regs_end:
+ 
+ .L__critical_end:
+@@ -1366,7 +1309,7 @@ cleanup_critical:
+ 	jl	0f
+ 	clg	%r9,BASED(.Lcleanup_table+104)	# .Lload_fpu_regs_end
+ 	jl	.Lcleanup_load_fpu_regs
+-0:	BR_R11USE_R14
++0:	BR_EX	%r14
+ 
+ 	.align	8
+ .Lcleanup_table:
+@@ -1402,7 +1345,7 @@ cleanup_critical:
+ 	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
+ 	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
+ 	larl	%r9,sie_exit			# skip forward to sie_exit
+-	BR_R11USE_R14
++	BR_EX	%r14
+ #endif
+ 
+ .Lcleanup_system_call:
+@@ -1456,7 +1399,7 @@ cleanup_critical:
+ 	stg	%r15,56(%r11)		# r15 stack pointer
+ 	# set new psw address and exit
+ 	larl	%r9,.Lsysc_do_svc
+-	BR_R11USE_R14
++	BR_EX	%r14,%r11
+ .Lcleanup_system_call_insn:
+ 	.quad	system_call
+ 	.quad	.Lsysc_stmg
+@@ -1468,7 +1411,7 @@ cleanup_critical:
+ 
+ .Lcleanup_sysc_tif:
+ 	larl	%r9,.Lsysc_tif
+-	BR_R11USE_R14
++	BR_EX	%r14,%r11
+ 
+ .Lcleanup_sysc_restore:
+ 	# check if stpt has been executed
+@@ -1485,14 +1428,14 @@ cleanup_critical:
+ 	mvc	0(64,%r11),__PT_R8(%r9)
+ 	lmg	%r0,%r7,__PT_R0(%r9)
+ 1:	lmg	%r8,%r9,__LC_RETURN_PSW
+-	BR_R11USE_R14
++	BR_EX	%r14,%r11
+ .Lcleanup_sysc_restore_insn:
+ 	.quad	.Lsysc_exit_timer
+ 	.quad	.Lsysc_done - 4
+ 
+ .Lcleanup_io_tif:
+ 	larl	%r9,.Lio_tif
+-	BR_R11USE_R14
++	BR_EX	%r14,%r11
+ 
+ .Lcleanup_io_restore:
+ 	# check if stpt has been executed
+@@ -1506,7 +1449,7 @@ cleanup_critical:
+ 	mvc	0(64,%r11),__PT_R8(%r9)
+ 	lmg	%r0,%r7,__PT_R0(%r9)
+ 1:	lmg	%r8,%r9,__LC_RETURN_PSW
+-	BR_R11USE_R14
++	BR_EX	%r14,%r11
+ .Lcleanup_io_restore_insn:
+ 	.quad	.Lio_exit_timer
+ 	.quad	.Lio_done - 4
+@@ -1559,17 +1502,17 @@ cleanup_critical:
+ 	# prepare return psw
+ 	nihh	%r8,0xfcfd		# clear irq & wait state bits
+ 	lg	%r9,48(%r11)		# return from psw_idle
+-	BR_R11USE_R14
++	BR_EX	%r14,%r11
+ .Lcleanup_idle_insn:
+ 	.quad	.Lpsw_idle_lpsw
+ 
+ .Lcleanup_save_fpu_regs:
+ 	larl	%r9,save_fpu_regs
+-	BR_R11USE_R14
++	BR_EX	%r14,%r11
+ 
+ .Lcleanup_load_fpu_regs:
+ 	larl	%r9,load_fpu_regs
+-	BR_R11USE_R14
++	BR_EX	%r14,%r11
+ 
+ /*
+  * Integer constants
+diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
+index 82df7d80fab2..27110f3294ed 100644
+--- a/arch/s390/kernel/mcount.S
++++ b/arch/s390/kernel/mcount.S
+@@ -9,13 +9,17 @@
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/ftrace.h>
++#include <asm/nospec-insn.h>
+ #include <asm/ptrace.h>
+ #include <asm/export.h>
+ 
++	GEN_BR_THUNK %r1
++	GEN_BR_THUNK %r14
++
+ 	.section .kprobes.text, "ax"
+ 
+ ENTRY(ftrace_stub)
+-	br	%r14
++	BR_EX	%r14
+ 
+ #define STACK_FRAME_SIZE  (STACK_FRAME_OVERHEAD + __PT_SIZE)
+ #define STACK_PTREGS	  (STACK_FRAME_OVERHEAD)
+@@ -23,7 +27,7 @@ ENTRY(ftrace_stub)
+ #define STACK_PTREGS_PSW  (STACK_PTREGS + __PT_PSW)
+ 
+ ENTRY(_mcount)
+-	br	%r14
++	BR_EX	%r14
+ 
+ EXPORT_SYMBOL(_mcount)
+ 
+@@ -53,7 +57,7 @@ ENTRY(ftrace_caller)
+ #endif
+ 	lgr	%r3,%r14
+ 	la	%r5,STACK_PTREGS(%r15)
+-	basr	%r14,%r1
++	BASR_EX	%r14,%r1
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ # The j instruction gets runtime patched to a nop instruction.
+ # See ftrace_enable_ftrace_graph_caller.
+@@ -68,7 +72,7 @@ ftrace_graph_caller_end:
+ #endif
+ 	lg	%r1,(STACK_PTREGS_PSW+8)(%r15)
+ 	lmg	%r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
+-	br	%r1
++	BR_EX	%r1
+ 
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ 
+@@ -81,6 +85,6 @@ ENTRY(return_to_handler)
+ 	aghi	%r15,STACK_FRAME_OVERHEAD
+ 	lgr	%r14,%r2
+ 	lmg	%r2,%r5,32(%r15)
+-	br	%r14
++	BR_EX	%r14
+ 
+ #endif
+diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
+index 1fc6d1ff92d3..0dc8ac8548ee 100644
+--- a/arch/s390/kernel/module.c
++++ b/arch/s390/kernel/module.c
+@@ -159,7 +159,7 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ 	me->core_layout.size += me->arch.got_size;
+ 	me->arch.plt_offset = me->core_layout.size;
+ 	if (me->arch.plt_size) {
+-		if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_call_disable)
++		if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
+ 			me->arch.plt_size += PLT_ENTRY_SIZE;
+ 		me->core_layout.size += me->arch.plt_size;
+ 	}
+@@ -318,8 +318,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ 				info->plt_offset;
+ 			ip[0] = 0x0d10e310;	/* basr 1,0  */
+ 			ip[1] = 0x100a0004;	/* lg	1,10(1) */
+-			if (IS_ENABLED(CONFIG_EXPOLINE) &&
+-			    !nospec_call_disable) {
++			if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
+ 				unsigned int *ij;
+ 				ij = me->core_layout.base +
+ 					me->arch.plt_offset +
+@@ -440,7 +439,7 @@ int module_finalize(const Elf_Ehdr *hdr,
+ 	void *aseg;
+ 
+ 	if (IS_ENABLED(CONFIG_EXPOLINE) &&
+-	    !nospec_call_disable && me->arch.plt_size) {
++	    !nospec_disable && me->arch.plt_size) {
+ 		unsigned int *ij;
+ 
+ 		ij = me->core_layout.base + me->arch.plt_offset +
+@@ -466,12 +465,12 @@ int module_finalize(const Elf_Ehdr *hdr,
+ 			apply_alternatives(aseg, aseg + s->sh_size);
+ 
+ 		if (IS_ENABLED(CONFIG_EXPOLINE) &&
+-		    (!strcmp(".nospec_call_table", secname)))
+-			nospec_call_revert(aseg, aseg + s->sh_size);
++		    (!strncmp(".s390_indirect", secname, 14)))
++			nospec_revert(aseg, aseg + s->sh_size);
+ 
+ 		if (IS_ENABLED(CONFIG_EXPOLINE) &&
+-		    (!strcmp(".nospec_return_table", secname)))
+-			nospec_return_revert(aseg, aseg + s->sh_size);
++		    (!strncmp(".s390_return", secname, 12)))
++			nospec_revert(aseg, aseg + s->sh_size);
+ 	}
+ 
+ 	jump_label_apply_nops(me);
+diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
+index 9aff72d3abda..8ad6a7128b3a 100644
+--- a/arch/s390/kernel/nospec-branch.c
++++ b/arch/s390/kernel/nospec-branch.c
+@@ -1,32 +1,86 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/module.h>
++#include <linux/device.h>
+ #include <asm/nospec-branch.h>
+ 
+-int nospec_call_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
+-int nospec_return_disable = !IS_ENABLED(CONFIG_EXPOLINE_FULL);
++static int __init nobp_setup_early(char *str)
++{
++	bool enabled;
++	int rc;
++
++	rc = kstrtobool(str, &enabled);
++	if (rc)
++		return rc;
++	if (enabled && test_facility(82)) {
++		/*
++		 * The user explicitely requested nobp=1, enable it and
++		 * disable the expoline support.
++		 */
++		__set_facility(82, S390_lowcore.alt_stfle_fac_list);
++		if (IS_ENABLED(CONFIG_EXPOLINE))
++			nospec_disable = 1;
++	} else {
++		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++	}
++	return 0;
++}
++early_param("nobp", nobp_setup_early);
++
++static int __init nospec_setup_early(char *str)
++{
++	__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++	return 0;
++}
++early_param("nospec", nospec_setup_early);
++
++static int __init nospec_report(void)
++{
++	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
++		pr_info("Spectre V2 mitigation: execute trampolines.\n");
++	if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
++		pr_info("Spectre V2 mitigation: limited branch prediction.\n");
++	return 0;
++}
++arch_initcall(nospec_report);
++
++#ifdef CONFIG_EXPOLINE
++
++int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
+ 
+ static int __init nospectre_v2_setup_early(char *str)
+ {
+-	nospec_call_disable = 1;
+-	nospec_return_disable = 1;
++	nospec_disable = 1;
+ 	return 0;
+ }
+ early_param("nospectre_v2", nospectre_v2_setup_early);
+ 
++void __init nospec_auto_detect(void)
++{
++	if (IS_ENABLED(CC_USING_EXPOLINE)) {
++		/*
++		 * The kernel has been compiled with expolines.
++		 * Keep expolines enabled and disable nobp.
++		 */
++		nospec_disable = 0;
++		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++	}
++	/*
++	 * If the kernel has not been compiled with expolines the
++	 * nobp setting decides what is done, this depends on the
++	 * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
++	 */
++}
++
+ static int __init spectre_v2_setup_early(char *str)
+ {
+ 	if (str && !strncmp(str, "on", 2)) {
+-		nospec_call_disable = 0;
+-		nospec_return_disable = 0;
+-	}
+-	if (str && !strncmp(str, "off", 3)) {
+-		nospec_call_disable = 1;
+-		nospec_return_disable = 1;
+-	}
+-	if (str && !strncmp(str, "auto", 4)) {
+-		nospec_call_disable = 0;
+-		nospec_return_disable = 1;
++		nospec_disable = 0;
++		__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ 	}
++	if (str && !strncmp(str, "off", 3))
++		nospec_disable = 1;
++	if (str && !strncmp(str, "auto", 4))
++		nospec_auto_detect();
+ 	return 0;
+ }
+ early_param("spectre_v2", spectre_v2_setup_early);
+@@ -39,7 +93,6 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
+ 	s32 *epo;
+ 
+ 	/* Second part of the instruction replace is always a nop */
+-	memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
+ 	for (epo = start; epo < end; epo++) {
+ 		instr = (u8 *) epo + *epo;
+ 		if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
+@@ -60,18 +113,34 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
+ 			br = thunk + (*(int *)(thunk + 2)) * 2;
+ 		else
+ 			continue;
+-		if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
++		/* Check for unconditional branch 0x07f? or 0x47f???? */
++		if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
+ 			continue;
++
++		memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
+ 		switch (type) {
+ 		case BRCL_EXPOLINE:
+-			/* brcl to thunk, replace with br + nop */
+ 			insnbuf[0] = br[0];
+ 			insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
++			if (br[0] == 0x47) {
++				/* brcl to b, replace with bc + nopr */
++				insnbuf[2] = br[2];
++				insnbuf[3] = br[3];
++			} else {
++				/* brcl to br, replace with bcr + nop */
++			}
+ 			break;
+ 		case BRASL_EXPOLINE:
+-			/* brasl to thunk, replace with basr + nop */
+-			insnbuf[0] = 0x0d;
+ 			insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
++			if (br[0] == 0x47) {
++				/* brasl to b, replace with bas + nopr */
++				insnbuf[0] = 0x4d;
++				insnbuf[2] = br[2];
++				insnbuf[3] = br[3];
++			} else {
++				/* brasl to br, replace with basr + nop */
++				insnbuf[0] = 0x0d;
++			}
+ 			break;
+ 		}
+ 
+@@ -79,15 +148,9 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
+ 	}
+ }
+ 
+-void __init_or_module nospec_call_revert(s32 *start, s32 *end)
+-{
+-	if (nospec_call_disable)
+-		__nospec_revert(start, end);
+-}
+-
+-void __init_or_module nospec_return_revert(s32 *start, s32 *end)
++void __init_or_module nospec_revert(s32 *start, s32 *end)
+ {
+-	if (nospec_return_disable)
++	if (nospec_disable)
+ 		__nospec_revert(start, end);
+ }
+ 
+@@ -95,6 +158,8 @@ extern s32 __nospec_call_start[], __nospec_call_end[];
+ extern s32 __nospec_return_start[], __nospec_return_end[];
+ void __init nospec_init_branches(void)
+ {
+-	nospec_call_revert(__nospec_call_start, __nospec_call_end);
+-	nospec_return_revert(__nospec_return_start, __nospec_return_end);
++	nospec_revert(__nospec_call_start, __nospec_call_end);
++	nospec_revert(__nospec_return_start, __nospec_return_end);
+ }
++
++#endif /* CONFIG_EXPOLINE */
+diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
+new file mode 100644
+index 000000000000..8affad5f18cb
+--- /dev/null
++++ b/arch/s390/kernel/nospec-sysfs.c
+@@ -0,0 +1,21 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <linux/device.h>
++#include <linux/cpu.h>
++#include <asm/facility.h>
++#include <asm/nospec-branch.h>
++
++ssize_t cpu_show_spectre_v1(struct device *dev,
++			    struct device_attribute *attr, char *buf)
++{
++	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++}
++
++ssize_t cpu_show_spectre_v2(struct device *dev,
++			    struct device_attribute *attr, char *buf)
++{
++	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
++		return sprintf(buf, "Mitigation: execute trampolines\n");
++	if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
++		return sprintf(buf, "Mitigation: limited branch prediction\n");
++	return sprintf(buf, "Vulnerable\n");
++}
+diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
+index a40ebd1d29d0..8e954c102639 100644
+--- a/arch/s390/kernel/reipl.S
++++ b/arch/s390/kernel/reipl.S
+@@ -7,8 +7,11 @@
+ 
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
++#include <asm/nospec-insn.h>
+ #include <asm/sigp.h>
+ 
++	GEN_BR_THUNK %r9
++
+ #
+ # Issue "store status" for the current CPU to its prefix page
+ # and call passed function afterwards
+@@ -67,9 +70,9 @@ ENTRY(store_status)
+ 	st	%r4,0(%r1)
+ 	st	%r5,4(%r1)
+ 	stg	%r2,8(%r1)
+-	lgr	%r1,%r2
++	lgr	%r9,%r2
+ 	lgr	%r2,%r3
+-	br	%r1
++	BR_EX	%r9
+ 
+ 	.section .bss
+ 	.align	8
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index ce5ff4c4d435..0786a6b53f98 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -893,6 +893,9 @@ void __init setup_arch(char **cmdline_p)
+ 	init_mm.end_data = (unsigned long) &_edata;
+ 	init_mm.brk = (unsigned long) &_end;
+ 
++	if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
++		nospec_auto_detect();
++
+ 	parse_early_param();
+ #ifdef CONFIG_CRASH_DUMP
+ 	/* Deactivate elfcorehdr= kernel parameter */
+diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
+index e99187149f17..a049a7b9d6e8 100644
+--- a/arch/s390/kernel/swsusp.S
++++ b/arch/s390/kernel/swsusp.S
+@@ -13,6 +13,7 @@
+ #include <asm/ptrace.h>
+ #include <asm/thread_info.h>
+ #include <asm/asm-offsets.h>
++#include <asm/nospec-insn.h>
+ #include <asm/sigp.h>
+ 
+ /*
+@@ -24,6 +25,8 @@
+  * (see below) in the resume process.
+  * This function runs with disabled interrupts.
+  */
++	GEN_BR_THUNK %r14
++
+ 	.section .text
+ ENTRY(swsusp_arch_suspend)
+ 	stmg	%r6,%r15,__SF_GPRS(%r15)
+@@ -103,7 +106,7 @@ ENTRY(swsusp_arch_suspend)
+ 	spx	0x318(%r1)
+ 	lmg	%r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+ 	lghi	%r2,0
+-	br	%r14
++	BR_EX	%r14
+ 
+ /*
+  * Restore saved memory image to correct place and restore register context.
+@@ -197,11 +200,10 @@ pgm_check_entry:
+ 	larl	%r15,init_thread_union
+ 	ahi	%r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
+ 	larl	%r2,.Lpanic_string
+-	larl	%r3,sclp_early_printk
+ 	lghi	%r1,0
+ 	sam31
+ 	sigp	%r1,%r0,SIGP_SET_ARCHITECTURE
+-	basr	%r14,%r3
++	brasl	%r14,sclp_early_printk
+ 	larl	%r3,.Ldisabled_wait_31
+ 	lpsw	0(%r3)
+ 4:
+@@ -267,7 +269,7 @@ restore_registers:
+ 	/* Return 0 */
+ 	lmg	%r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+ 	lghi	%r2,0
+-	br	%r14
++	BR_EX	%r14
+ 
+ 	.section .data..nosave,"aw",@progbits
+ 	.align	8
+diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
+index 495c9c4bacc7..2311f15be9cf 100644
+--- a/arch/s390/lib/mem.S
++++ b/arch/s390/lib/mem.S
+@@ -7,6 +7,9 @@
+ 
+ #include <linux/linkage.h>
+ #include <asm/export.h>
++#include <asm/nospec-insn.h>
++
++	GEN_BR_THUNK %r14
+ 
+ /*
+  * void *memmove(void *dest, const void *src, size_t n)
+@@ -33,14 +36,14 @@ ENTRY(memmove)
+ .Lmemmove_forward_remainder:
+ 	larl	%r5,.Lmemmove_mvc
+ 	ex	%r4,0(%r5)
+-	br	%r14
++	BR_EX	%r14
+ .Lmemmove_reverse:
+ 	ic	%r0,0(%r4,%r3)
+ 	stc	%r0,0(%r4,%r1)
+ 	brctg	%r4,.Lmemmove_reverse
+ 	ic	%r0,0(%r4,%r3)
+ 	stc	%r0,0(%r4,%r1)
+-	br	%r14
++	BR_EX	%r14
+ .Lmemmove_mvc:
+ 	mvc	0(1,%r1),0(%r3)
+ EXPORT_SYMBOL(memmove)
+@@ -77,7 +80,7 @@ ENTRY(memset)
+ .Lmemset_clear_remainder:
+ 	larl	%r3,.Lmemset_xc
+ 	ex	%r4,0(%r3)
+-	br	%r14
++	BR_EX	%r14
+ .Lmemset_fill:
+ 	cghi	%r4,1
+ 	lgr	%r1,%r2
+@@ -95,10 +98,10 @@ ENTRY(memset)
+ 	stc	%r3,0(%r1)
+ 	larl	%r5,.Lmemset_mvc
+ 	ex	%r4,0(%r5)
+-	br	%r14
++	BR_EX	%r14
+ .Lmemset_fill_exit:
+ 	stc	%r3,0(%r1)
+-	br	%r14
++	BR_EX	%r14
+ .Lmemset_xc:
+ 	xc	0(1,%r1),0(%r1)
+ .Lmemset_mvc:
+@@ -121,7 +124,7 @@ ENTRY(memcpy)
+ .Lmemcpy_remainder:
+ 	larl	%r5,.Lmemcpy_mvc
+ 	ex	%r4,0(%r5)
+-	br	%r14
++	BR_EX	%r14
+ .Lmemcpy_loop:
+ 	mvc	0(256,%r1),0(%r3)
+ 	la	%r1,256(%r1)
+@@ -159,10 +162,10 @@ ENTRY(__memset\bits)
+ 	\insn	%r3,0(%r1)
+ 	larl	%r5,.L__memset_mvc\bits
+ 	ex	%r4,0(%r5)
+-	br	%r14
++	BR_EX	%r14
+ .L__memset_exit\bits:
+ 	\insn	%r3,0(%r2)
+-	br	%r14
++	BR_EX	%r14
+ .L__memset_mvc\bits:
+ 	mvc	\bytes(1,%r1),0(%r1)
+ .endm
+diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
+index 25bb4643c4f4..9f794869c1b0 100644
+--- a/arch/s390/net/bpf_jit.S
++++ b/arch/s390/net/bpf_jit.S
+@@ -9,6 +9,7 @@
+  */
+ 
+ #include <linux/linkage.h>
++#include <asm/nospec-insn.h>
+ #include "bpf_jit.h"
+ 
+ /*
+@@ -54,7 +55,7 @@ ENTRY(sk_load_##NAME##_pos);						\
+ 	clg	%r3,STK_OFF_HLEN(%r15);	/* Offset + SIZE > hlen? */	\
+ 	jh	sk_load_##NAME##_slow;					\
+ 	LOAD	%r14,-SIZE(%r3,%r12);	/* Get data from skb */		\
+-	b	OFF_OK(%r6);		/* Return */			\
++	B_EX	OFF_OK,%r6;		/* Return */			\
+ 									\
+ sk_load_##NAME##_slow:;							\
+ 	lgr	%r2,%r7;		/* Arg1 = skb pointer */	\
+@@ -64,11 +65,14 @@ sk_load_##NAME##_slow:;							\
+ 	brasl	%r14,skb_copy_bits;	/* Get data from skb */		\
+ 	LOAD	%r14,STK_OFF_TMP(%r15);	/* Load from temp bufffer */	\
+ 	ltgr	%r2,%r2;		/* Set cc to (%r2 != 0) */	\
+-	br	%r6;			/* Return */
++	BR_EX	%r6;			/* Return */
+ 
+ sk_load_common(word, 4, llgf)	/* r14 = *(u32 *) (skb->data+offset) */
+ sk_load_common(half, 2, llgh)	/* r14 = *(u16 *) (skb->data+offset) */
+ 
++	GEN_BR_THUNK %r6
++	GEN_B_THUNK OFF_OK,%r6
++
+ /*
+  * Load 1 byte from SKB (optimized version)
+  */
+@@ -80,7 +84,7 @@ ENTRY(sk_load_byte_pos)
+ 	clg	%r3,STK_OFF_HLEN(%r15)	# Offset >= hlen?
+ 	jnl	sk_load_byte_slow
+ 	llgc	%r14,0(%r3,%r12)	# Get byte from skb
+-	b	OFF_OK(%r6)		# Return OK
++	B_EX	OFF_OK,%r6		# Return OK
+ 
+ sk_load_byte_slow:
+ 	lgr	%r2,%r7			# Arg1 = skb pointer
+@@ -90,7 +94,7 @@ sk_load_byte_slow:
+ 	brasl	%r14,skb_copy_bits	# Get data from skb
+ 	llgc	%r14,STK_OFF_TMP(%r15)	# Load result from temp buffer
+ 	ltgr	%r2,%r2			# Set cc to (%r2 != 0)
+-	br	%r6			# Return cc
++	BR_EX	%r6			# Return cc
+ 
+ #define sk_negative_common(NAME, SIZE, LOAD)				\
+ sk_load_##NAME##_slow_neg:;						\
+@@ -104,7 +108,7 @@ sk_load_##NAME##_slow_neg:;						\
+ 	jz	bpf_error;						\
+ 	LOAD	%r14,0(%r2);		/* Get data from pointer */	\
+ 	xr	%r3,%r3;		/* Set cc to zero */		\
+-	br	%r6;			/* Return cc */
++	BR_EX	%r6;			/* Return cc */
+ 
+ sk_negative_common(word, 4, llgf)
+ sk_negative_common(half, 2, llgh)
+@@ -113,4 +117,4 @@ sk_negative_common(byte, 1, llgc)
+ bpf_error:
+ # force a return 0 from jit handler
+ 	ltgr	%r15,%r15	# Set condition code
+-	br	%r6
++	BR_EX	%r6
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 78a19c93b380..dd2bcf0e7d00 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -25,6 +25,8 @@
+ #include <linux/bpf.h>
+ #include <asm/cacheflush.h>
+ #include <asm/dis.h>
++#include <asm/facility.h>
++#include <asm/nospec-branch.h>
+ #include <asm/set_memory.h>
+ #include "bpf_jit.h"
+ 
+@@ -41,6 +43,8 @@ struct bpf_jit {
+ 	int base_ip;		/* Base address for literal pool */
+ 	int ret0_ip;		/* Address of return 0 */
+ 	int exit_ip;		/* Address of exit */
++	int r1_thunk_ip;	/* Address of expoline thunk for 'br %r1' */
++	int r14_thunk_ip;	/* Address of expoline thunk for 'br %r14' */
+ 	int tail_call_start;	/* Tail call start offset */
+ 	int labels[1];		/* Labels for local jumps */
+ };
+@@ -250,6 +254,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
+ 	REG_SET_SEEN(b2);					\
+ })
+ 
++#define EMIT6_PCREL_RILB(op, b, target)				\
++({								\
++	int rel = (target - jit->prg) / 2;			\
++	_EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff);	\
++	REG_SET_SEEN(b);					\
++})
++
++#define EMIT6_PCREL_RIL(op, target)				\
++({								\
++	int rel = (target - jit->prg) / 2;			\
++	_EMIT6(op | rel >> 16, rel & 0xffff);			\
++})
++
+ #define _EMIT6_IMM(op, imm)					\
+ ({								\
+ 	unsigned int __imm = (imm);				\
+@@ -469,8 +486,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
+ 	EMIT4(0xb9040000, REG_2, BPF_REG_0);
+ 	/* Restore registers */
+ 	save_restore_regs(jit, REGS_RESTORE, stack_depth);
++	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
++		jit->r14_thunk_ip = jit->prg;
++		/* Generate __s390_indirect_jump_r14 thunk */
++		if (test_facility(35)) {
++			/* exrl %r0,.+10 */
++			EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
++		} else {
++			/* larl %r1,.+14 */
++			EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
++			/* ex 0,0(%r1) */
++			EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
++		}
++		/* j . */
++		EMIT4_PCREL(0xa7f40000, 0);
++	}
+ 	/* br %r14 */
+ 	_EMIT2(0x07fe);
++
++	if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
++	    (jit->seen & SEEN_FUNC)) {
++		jit->r1_thunk_ip = jit->prg;
++		/* Generate __s390_indirect_jump_r1 thunk */
++		if (test_facility(35)) {
++			/* exrl %r0,.+10 */
++			EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
++			/* j . */
++			EMIT4_PCREL(0xa7f40000, 0);
++			/* br %r1 */
++			_EMIT2(0x07f1);
++		} else {
++			/* larl %r1,.+14 */
++			EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
++			/* ex 0,S390_lowcore.br_r1_tampoline */
++			EMIT4_DISP(0x44000000, REG_0, REG_0,
++				   offsetof(struct lowcore, br_r1_trampoline));
++			/* j . */
++			EMIT4_PCREL(0xa7f40000, 0);
++		}
++	}
+ }
+ 
+ /*
+@@ -966,8 +1020,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
+ 		/* lg %w1,<d(imm)>(%l) */
+ 		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
+ 			      EMIT_CONST_U64(func));
+-		/* basr %r14,%w1 */
+-		EMIT2(0x0d00, REG_14, REG_W1);
++		if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
++			/* brasl %r14,__s390_indirect_jump_r1 */
++			EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
++		} else {
++			/* basr %r14,%w1 */
++			EMIT2(0x0d00, REG_14, REG_W1);
++		}
+ 		/* lgr %b0,%r2: load return value into %b0 */
+ 		EMIT4(0xb9040000, BPF_REG_0, REG_2);
+ 		if ((jit->seen & SEEN_SKB) &&
+diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
+index 1a0fa10cb6b7..32bae68e34c1 100644
+--- a/arch/sparc/kernel/vio.c
++++ b/arch/sparc/kernel/vio.c
+@@ -403,7 +403,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
+ 	if (err) {
+ 		printk(KERN_ERR "VIO: Could not register device %s, err=%d\n",
+ 		       dev_name(&vdev->dev), err);
+-		kfree(vdev);
++		put_device(&vdev->dev);
+ 		return NULL;
+ 	}
+ 	if (vdev->dp)
+diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
+index edfede768688..5167f3f74136 100644
+--- a/arch/x86/kernel/machine_kexec_32.c
++++ b/arch/x86/kernel/machine_kexec_32.c
+@@ -57,12 +57,17 @@ static void load_segments(void)
+ static void machine_kexec_free_page_tables(struct kimage *image)
+ {
+ 	free_page((unsigned long)image->arch.pgd);
++	image->arch.pgd = NULL;
+ #ifdef CONFIG_X86_PAE
+ 	free_page((unsigned long)image->arch.pmd0);
++	image->arch.pmd0 = NULL;
+ 	free_page((unsigned long)image->arch.pmd1);
++	image->arch.pmd1 = NULL;
+ #endif
+ 	free_page((unsigned long)image->arch.pte0);
++	image->arch.pte0 = NULL;
+ 	free_page((unsigned long)image->arch.pte1);
++	image->arch.pte1 = NULL;
+ }
+ 
+ static int machine_kexec_alloc_page_tables(struct kimage *image)
+@@ -79,7 +84,6 @@ static int machine_kexec_alloc_page_tables(struct kimage *image)
+ 	    !image->arch.pmd0 || !image->arch.pmd1 ||
+ #endif
+ 	    !image->arch.pte0 || !image->arch.pte1) {
+-		machine_kexec_free_page_tables(image);
+ 		return -ENOMEM;
+ 	}
+ 	return 0;
+diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
+index 3b7427aa7d85..5bce2a88e8a3 100644
+--- a/arch/x86/kernel/machine_kexec_64.c
++++ b/arch/x86/kernel/machine_kexec_64.c
+@@ -38,9 +38,13 @@ static struct kexec_file_ops *kexec_file_loaders[] = {
+ static void free_transition_pgtable(struct kimage *image)
+ {
+ 	free_page((unsigned long)image->arch.p4d);
++	image->arch.p4d = NULL;
+ 	free_page((unsigned long)image->arch.pud);
++	image->arch.pud = NULL;
+ 	free_page((unsigned long)image->arch.pmd);
++	image->arch.pmd = NULL;
+ 	free_page((unsigned long)image->arch.pte);
++	image->arch.pte = NULL;
+ }
+ 
+ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
+@@ -90,7 +94,6 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
+ 	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC_NOENC));
+ 	return 0;
+ err:
+-	free_transition_pgtable(image);
+ 	return result;
+ }
+ 
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index fe92cb972dd1..1629a2099adf 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1171,21 +1171,17 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
+ static int
+ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
+ {
+-	struct file *file = lo->lo_backing_file;
++	struct file *file;
+ 	struct kstat stat;
+-	int error;
++	int ret;
+ 
+-	if (lo->lo_state != Lo_bound)
++	if (lo->lo_state != Lo_bound) {
++		mutex_unlock(&lo->lo_ctl_mutex);
+ 		return -ENXIO;
+-	error = vfs_getattr(&file->f_path, &stat,
+-			    STATX_INO, AT_STATX_SYNC_AS_STAT);
+-	if (error)
+-		return error;
++	}
++
+ 	memset(info, 0, sizeof(*info));
+ 	info->lo_number = lo->lo_number;
+-	info->lo_device = huge_encode_dev(stat.dev);
+-	info->lo_inode = stat.ino;
+-	info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
+ 	info->lo_offset = lo->lo_offset;
+ 	info->lo_sizelimit = lo->lo_sizelimit;
+ 	info->lo_flags = lo->lo_flags;
+@@ -1198,7 +1194,19 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
+ 		memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
+ 		       lo->lo_encrypt_key_size);
+ 	}
+-	return 0;
++
++	/* Drop lo_ctl_mutex while we call into the filesystem. */
++	file = get_file(lo->lo_backing_file);
++	mutex_unlock(&lo->lo_ctl_mutex);
++	ret = vfs_getattr(&file->f_path, &stat, STATX_INO,
++			  AT_STATX_SYNC_AS_STAT);
++	if (!ret) {
++		info->lo_device = huge_encode_dev(stat.dev);
++		info->lo_inode = stat.ino;
++		info->lo_rdevice = huge_encode_dev(stat.rdev);
++	}
++	fput(file);
++	return ret;
+ }
+ 
+ static void
+@@ -1279,12 +1287,13 @@ static int
+ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
+ 	struct loop_info info;
+ 	struct loop_info64 info64;
+-	int err = 0;
++	int err;
+ 
+-	if (!arg)
+-		err = -EINVAL;
+-	if (!err)
+-		err = loop_get_status(lo, &info64);
++	if (!arg) {
++		mutex_unlock(&lo->lo_ctl_mutex);
++		return -EINVAL;
++	}
++	err = loop_get_status(lo, &info64);
+ 	if (!err)
+ 		err = loop_info64_to_old(&info64, &info);
+ 	if (!err && copy_to_user(arg, &info, sizeof(info)))
+@@ -1296,12 +1305,13 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
+ static int
+ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
+ 	struct loop_info64 info64;
+-	int err = 0;
++	int err;
+ 
+-	if (!arg)
+-		err = -EINVAL;
+-	if (!err)
+-		err = loop_get_status(lo, &info64);
++	if (!arg) {
++		mutex_unlock(&lo->lo_ctl_mutex);
++		return -EINVAL;
++	}
++	err = loop_get_status(lo, &info64);
+ 	if (!err && copy_to_user(arg, &info64, sizeof(info64)))
+ 		err = -EFAULT;
+ 
+@@ -1378,7 +1388,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ 		break;
+ 	case LOOP_GET_STATUS:
+ 		err = loop_get_status_old(lo, (struct loop_info __user *) arg);
+-		break;
++		/* loop_get_status() unlocks lo_ctl_mutex */
++		goto out_unlocked;
+ 	case LOOP_SET_STATUS64:
+ 		err = -EPERM;
+ 		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+@@ -1387,7 +1398,8 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
+ 		break;
+ 	case LOOP_GET_STATUS64:
+ 		err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
+-		break;
++		/* loop_get_status() unlocks lo_ctl_mutex */
++		goto out_unlocked;
+ 	case LOOP_SET_CAPACITY:
+ 		err = -EPERM;
+ 		if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+@@ -1520,12 +1532,13 @@ loop_get_status_compat(struct loop_device *lo,
+ 		       struct compat_loop_info __user *arg)
+ {
+ 	struct loop_info64 info64;
+-	int err = 0;
++	int err;
+ 
+-	if (!arg)
+-		err = -EINVAL;
+-	if (!err)
+-		err = loop_get_status(lo, &info64);
++	if (!arg) {
++		mutex_unlock(&lo->lo_ctl_mutex);
++		return -EINVAL;
++	}
++	err = loop_get_status(lo, &info64);
+ 	if (!err)
+ 		err = loop_info64_to_compat(&info64, arg);
+ 	return err;
+@@ -1548,7 +1561,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ 		mutex_lock(&lo->lo_ctl_mutex);
+ 		err = loop_get_status_compat(
+ 			lo, (struct compat_loop_info __user *) arg);
+-		mutex_unlock(&lo->lo_ctl_mutex);
++		/* loop_get_status() unlocks lo_ctl_mutex */
+ 		break;
+ 	case LOOP_SET_CAPACITY:
+ 	case LOOP_CLR_FD:
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index dcb982e3a41f..6bfb8088e5f5 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -340,6 +340,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 
+ 	/* Intel Bluetooth devices */
+ 	{ USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
++	{ USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
+ 	{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
+ 	{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
+ 	{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
+@@ -367,6 +368,9 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
+ 	{ USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
+ 
++	/* Additional Realtek 8723BU Bluetooth devices */
++	{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
++
+ 	/* Additional Realtek 8821AE Bluetooth devices */
+ 	{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
+ 	{ USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
+@@ -374,6 +378,9 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
+ 	{ USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
+ 
++	/* Additional Realtek 8822BE Bluetooth devices */
++	{ USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
++
+ 	/* Silicon Wave based devices */
+ 	{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
+ 
+@@ -2080,6 +2087,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
+ 	case 0x0c:	/* WsP */
+ 	case 0x11:	/* JfP */
+ 	case 0x12:	/* ThP */
++	case 0x13:	/* HrP */
++	case 0x14:	/* QnJ, IcP */
+ 		break;
+ 	default:
+ 		BT_ERR("%s: Unsupported Intel hardware variant (%u)",
+@@ -2172,6 +2181,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
+ 		break;
+ 	case 0x11:	/* JfP */
+ 	case 0x12:	/* ThP */
++	case 0x13:	/* HrP */
++	case 0x14:	/* QnJ, IcP */
+ 		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
+ 			 le16_to_cpu(ver.hw_variant),
+ 			 le16_to_cpu(ver.hw_revision),
+@@ -2203,6 +2214,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
+ 		break;
+ 	case 0x11:	/* JfP */
+ 	case 0x12:	/* ThP */
++	case 0x13:	/* HrP */
++	case 0x14:	/* QnJ, IcP */
+ 		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
+ 			 le16_to_cpu(ver.hw_variant),
+ 			 le16_to_cpu(ver.hw_revision),
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 076d4244d672..5698d2fac1af 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -2375,6 +2375,9 @@ static int clk_core_get_phase(struct clk_core *core)
+ 	int ret;
+ 
+ 	clk_prepare_lock();
++	/* Always try to update cached phase if possible */
++	if (core->ops->get_phase)
++		core->phase = core->ops->get_phase(core->hw);
+ 	ret = core->phase;
+ 	clk_prepare_unlock();
+ 
+diff --git a/drivers/clk/hisilicon/crg-hi3516cv300.c b/drivers/clk/hisilicon/crg-hi3516cv300.c
+index 2007123832bb..53450b651e4c 100644
+--- a/drivers/clk/hisilicon/crg-hi3516cv300.c
++++ b/drivers/clk/hisilicon/crg-hi3516cv300.c
+@@ -204,7 +204,7 @@ static const struct hisi_crg_funcs hi3516cv300_crg_funcs = {
+ /* hi3516CV300 sysctrl CRG */
+ #define HI3516CV300_SYSCTRL_NR_CLKS 16
+ 
+-static const char *wdt_mux_p[] __initconst = { "3m", "apb" };
++static const char *const wdt_mux_p[] __initconst = { "3m", "apb" };
+ static u32 wdt_mux_table[] = {0, 1};
+ 
+ static const struct hisi_mux_clock hi3516cv300_sysctrl_mux_clks[] = {
+diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
+index 1294f3ad7cd5..3b8b53b279dc 100644
+--- a/drivers/clk/meson/axg.c
++++ b/drivers/clk/meson/axg.c
+@@ -129,6 +129,11 @@ static struct meson_clk_pll axg_fixed_pll = {
+ 		.shift   = 16,
+ 		.width   = 2,
+ 	},
++	.frac = {
++		.reg_off = HHI_MPLL_CNTL2,
++		.shift   = 0,
++		.width   = 12,
++	},
+ 	.lock = &meson_clk_lock,
+ 	.hw.init = &(struct clk_init_data){
+ 		.name = "fixed_pll",
+@@ -151,7 +156,7 @@ static struct meson_clk_pll axg_sys_pll = {
+ 	},
+ 	.od = {
+ 		.reg_off = HHI_SYS_PLL_CNTL,
+-		.shift   = 10,
++		.shift   = 16,
+ 		.width   = 2,
+ 	},
+ 	.rate_table = sys_pll_rate_table,
+diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
+index 077fcdc7908b..fe7d9ed1d436 100644
+--- a/drivers/clk/rockchip/clk-mmc-phase.c
++++ b/drivers/clk/rockchip/clk-mmc-phase.c
+@@ -58,6 +58,12 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
+ 	u16 degrees;
+ 	u32 delay_num = 0;
+ 
++	/* See the comment for rockchip_mmc_set_phase below */
++	if (!rate) {
++		pr_err("%s: invalid clk rate\n", __func__);
++		return -EINVAL;
++	}
++
+ 	raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
+ 
+ 	degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
+@@ -84,6 +90,23 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
+ 	u32 raw_value;
+ 	u32 delay;
+ 
++	/*
++	 * The below calculation is based on the output clock from
++	 * MMC host to the card, which expects the phase clock inherits
++	 * the clock rate from its parent, namely the output clock
++	 * provider of MMC host. However, things may go wrong if
++	 * (1) It is orphan.
++	 * (2) It is assigned to the wrong parent.
++	 *
++	 * This check help debug the case (1), which seems to be the
++	 * most likely problem we often face and which makes it difficult
++	 * for people to debug unstable mmc tuning results.
++	 */
++	if (!rate) {
++		pr_err("%s: invalid clk rate\n", __func__);
++		return -EINVAL;
++	}
++
+ 	nineties = degrees / 90;
+ 	remainder = (degrees % 90);
+ 
+diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
+index 11e7f2d1c054..7af48184b022 100644
+--- a/drivers/clk/rockchip/clk-rk3228.c
++++ b/drivers/clk/rockchip/clk-rk3228.c
+@@ -387,7 +387,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
+ 			RK2928_CLKSEL_CON(23), 5, 2, MFLAGS, 0, 6, DFLAGS,
+ 			RK2928_CLKGATE_CON(2), 15, GFLAGS),
+ 
+-	COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
++	COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
+ 			RK2928_CLKSEL_CON(11), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ 			RK2928_CLKGATE_CON(2), 11, GFLAGS),
+ 
+diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
+index 1b81e283f605..ed36728424a2 100644
+--- a/drivers/clk/samsung/clk-exynos3250.c
++++ b/drivers/clk/samsung/clk-exynos3250.c
+@@ -698,7 +698,7 @@ static const struct samsung_pll_rate_table exynos3250_epll_rates[] __initconst =
+ 	PLL_36XX_RATE(144000000,  96, 2, 3,     0),
+ 	PLL_36XX_RATE( 96000000, 128, 2, 4,     0),
+ 	PLL_36XX_RATE( 84000000, 112, 2, 4,     0),
+-	PLL_36XX_RATE( 80000004, 106, 2, 4, 43691),
++	PLL_36XX_RATE( 80000003, 106, 2, 4, 43691),
+ 	PLL_36XX_RATE( 73728000,  98, 2, 4, 19923),
+ 	PLL_36XX_RATE( 67737598, 270, 3, 5, 62285),
+ 	PLL_36XX_RATE( 65535999, 174, 2, 5, 49982),
+@@ -734,7 +734,7 @@ static const struct samsung_pll_rate_table exynos3250_vpll_rates[] __initconst =
+ 	PLL_36XX_RATE(148352005,  98, 2, 3, 59070),
+ 	PLL_36XX_RATE(108000000, 144, 2, 4,     0),
+ 	PLL_36XX_RATE( 74250000,  99, 2, 4,     0),
+-	PLL_36XX_RATE( 74176002,  98, 3, 4, 59070),
++	PLL_36XX_RATE( 74176002,  98, 2, 4, 59070),
+ 	PLL_36XX_RATE( 54054000, 216, 3, 5, 14156),
+ 	PLL_36XX_RATE( 54000000, 144, 2, 5,     0),
+ 	{ /* sentinel */ }
+diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
+index 9b073c98a891..923c608b1b95 100644
+--- a/drivers/clk/samsung/clk-exynos5250.c
++++ b/drivers/clk/samsung/clk-exynos5250.c
+@@ -711,13 +711,13 @@ static const struct samsung_pll_rate_table epll_24mhz_tbl[] __initconst = {
+ 	/* sorted in descending order */
+ 	/* PLL_36XX_RATE(rate, m, p, s, k) */
+ 	PLL_36XX_RATE(192000000, 64, 2, 2, 0),
+-	PLL_36XX_RATE(180633600, 90, 3, 2, 20762),
++	PLL_36XX_RATE(180633605, 90, 3, 2, 20762),
+ 	PLL_36XX_RATE(180000000, 90, 3, 2, 0),
+ 	PLL_36XX_RATE(73728000, 98, 2, 4, 19923),
+-	PLL_36XX_RATE(67737600, 90, 2, 4, 20762),
++	PLL_36XX_RATE(67737602, 90, 2, 4, 20762),
+ 	PLL_36XX_RATE(49152000, 98, 3, 4, 19923),
+-	PLL_36XX_RATE(45158400, 90, 3, 4, 20762),
+-	PLL_36XX_RATE(32768000, 131, 3, 5, 4719),
++	PLL_36XX_RATE(45158401, 90, 3, 4, 20762),
++	PLL_36XX_RATE(32768001, 131, 3, 5, 4719),
+ 	{ },
+ };
+ 
+diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
+index fd1d9bfc151b..8eae1752d700 100644
+--- a/drivers/clk/samsung/clk-exynos5260.c
++++ b/drivers/clk/samsung/clk-exynos5260.c
+@@ -65,7 +65,7 @@ static const struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initconst = {
+ 	PLL_36XX_RATE(480000000, 160, 2, 2, 0),
+ 	PLL_36XX_RATE(432000000, 144, 2, 2, 0),
+ 	PLL_36XX_RATE(400000000, 200, 3, 2, 0),
+-	PLL_36XX_RATE(394073130, 459, 7, 2, 49282),
++	PLL_36XX_RATE(394073128, 459, 7, 2, 49282),
+ 	PLL_36XX_RATE(333000000, 111, 2, 2, 0),
+ 	PLL_36XX_RATE(300000000, 100, 2, 2, 0),
+ 	PLL_36XX_RATE(266000000, 266, 3, 3, 0),
+diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
+index db270908037a..335bebfa21c0 100644
+--- a/drivers/clk/samsung/clk-exynos5433.c
++++ b/drivers/clk/samsung/clk-exynos5433.c
+@@ -729,7 +729,7 @@ static const struct samsung_pll_rate_table exynos5433_pll_rates[] __initconst =
+ 	PLL_35XX_RATE(800000000U,  400, 6,  1),
+ 	PLL_35XX_RATE(733000000U,  733, 12, 1),
+ 	PLL_35XX_RATE(700000000U,  175, 3,  1),
+-	PLL_35XX_RATE(667000000U,  222, 4,  1),
++	PLL_35XX_RATE(666000000U,  222, 4,  1),
+ 	PLL_35XX_RATE(633000000U,  211, 4,  1),
+ 	PLL_35XX_RATE(600000000U,  500, 5,  2),
+ 	PLL_35XX_RATE(552000000U,  460, 5,  2),
+@@ -757,12 +757,12 @@ static const struct samsung_pll_rate_table exynos5433_pll_rates[] __initconst =
+ /* AUD_PLL */
+ static const struct samsung_pll_rate_table exynos5433_aud_pll_rates[] __initconst = {
+ 	PLL_36XX_RATE(400000000U, 200, 3, 2,      0),
+-	PLL_36XX_RATE(393216000U, 197, 3, 2, -25690),
++	PLL_36XX_RATE(393216003U, 197, 3, 2, -25690),
+ 	PLL_36XX_RATE(384000000U, 128, 2, 2,      0),
+-	PLL_36XX_RATE(368640000U, 246, 4, 2, -15729),
+-	PLL_36XX_RATE(361507200U, 181, 3, 2, -16148),
+-	PLL_36XX_RATE(338688000U, 113, 2, 2,  -6816),
+-	PLL_36XX_RATE(294912000U,  98, 1, 3,  19923),
++	PLL_36XX_RATE(368639991U, 246, 4, 2, -15729),
++	PLL_36XX_RATE(361507202U, 181, 3, 2, -16148),
++	PLL_36XX_RATE(338687988U, 113, 2, 2,  -6816),
++	PLL_36XX_RATE(294912002U,  98, 1, 3,  19923),
+ 	PLL_36XX_RATE(288000000U,  96, 1, 3,      0),
+ 	PLL_36XX_RATE(252000000U,  84, 1, 3,      0),
+ 	{ /* sentinel */ }
+diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
+index 5931a4140c3d..bbfa57b4e017 100644
+--- a/drivers/clk/samsung/clk-exynos7.c
++++ b/drivers/clk/samsung/clk-exynos7.c
+@@ -140,7 +140,7 @@ static const struct samsung_div_clock topc_div_clks[] __initconst = {
+ };
+ 
+ static const struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initconst = {
+-	PLL_36XX_RATE(491520000, 20, 1, 0, 31457),
++	PLL_36XX_RATE(491519897, 20, 1, 0, 31457),
+ 	{},
+ };
+ 
+diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
+index e0650c33863b..d8e58a659467 100644
+--- a/drivers/clk/samsung/clk-s3c2410.c
++++ b/drivers/clk/samsung/clk-s3c2410.c
+@@ -168,7 +168,7 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
+ 	PLL_35XX_RATE(226000000, 105, 1, 1),
+ 	PLL_35XX_RATE(210000000, 132, 2, 1),
+ 	/* 2410 common */
+-	PLL_35XX_RATE(203000000, 161, 3, 1),
++	PLL_35XX_RATE(202800000, 161, 3, 1),
+ 	PLL_35XX_RATE(192000000, 88, 1, 1),
+ 	PLL_35XX_RATE(186000000, 85, 1, 1),
+ 	PLL_35XX_RATE(180000000, 82, 1, 1),
+@@ -178,18 +178,18 @@ static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
+ 	PLL_35XX_RATE(147000000, 90, 2, 1),
+ 	PLL_35XX_RATE(135000000, 82, 2, 1),
+ 	PLL_35XX_RATE(124000000, 116, 1, 2),
+-	PLL_35XX_RATE(118000000, 150, 2, 2),
++	PLL_35XX_RATE(118500000, 150, 2, 2),
+ 	PLL_35XX_RATE(113000000, 105, 1, 2),
+-	PLL_35XX_RATE(101000000, 127, 2, 2),
++	PLL_35XX_RATE(101250000, 127, 2, 2),
+ 	PLL_35XX_RATE(90000000, 112, 2, 2),
+-	PLL_35XX_RATE(85000000, 105, 2, 2),
++	PLL_35XX_RATE(84750000, 105, 2, 2),
+ 	PLL_35XX_RATE(79000000, 71, 1, 2),
+-	PLL_35XX_RATE(68000000, 82, 2, 2),
+-	PLL_35XX_RATE(56000000, 142, 2, 3),
++	PLL_35XX_RATE(67500000, 82, 2, 2),
++	PLL_35XX_RATE(56250000, 142, 2, 3),
+ 	PLL_35XX_RATE(48000000, 120, 2, 3),
+-	PLL_35XX_RATE(51000000, 161, 3, 3),
++	PLL_35XX_RATE(50700000, 161, 3, 3),
+ 	PLL_35XX_RATE(45000000, 82, 1, 3),
+-	PLL_35XX_RATE(34000000, 82, 2, 3),
++	PLL_35XX_RATE(33750000, 82, 2, 3),
+ 	{ /* sentinel */ },
+ };
+ 
+diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
+index 7c369e21c91c..830d1c87fa7c 100644
+--- a/drivers/clk/tegra/clk-pll.c
++++ b/drivers/clk/tegra/clk-pll.c
+@@ -1151,6 +1151,8 @@ static const struct clk_ops tegra_clk_pllu_ops = {
+ 	.enable = clk_pllu_enable,
+ 	.disable = clk_pll_disable,
+ 	.recalc_rate = clk_pll_recalc_rate,
++	.round_rate = clk_pll_round_rate,
++	.set_rate = clk_pll_set_rate,
+ };
+ 
+ static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
+diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
+index 691c6465b71e..8561cce67741 100644
+--- a/drivers/crypto/atmel-aes.c
++++ b/drivers/crypto/atmel-aes.c
+@@ -2155,7 +2155,7 @@ static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
+ 
+ badkey:
+ 	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+-	memzero_explicit(&key, sizeof(keys));
++	memzero_explicit(&keys, sizeof(keys));
+ 	return -EINVAL;
+ }
+ 
+diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
+index 59d4ca4e72d8..1a734bd2070a 100644
+--- a/drivers/crypto/ccp/ccp-debugfs.c
++++ b/drivers/crypto/ccp/ccp-debugfs.c
+@@ -278,7 +278,7 @@ static const struct file_operations ccp_debugfs_stats_ops = {
+ };
+ 
+ static struct dentry *ccp_debugfs_dir;
+-static DEFINE_RWLOCK(ccp_debugfs_lock);
++static DEFINE_MUTEX(ccp_debugfs_lock);
+ 
+ #define	MAX_NAME_LEN	20
+ 
+@@ -290,16 +290,15 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
+ 	struct dentry *debugfs_stats;
+ 	struct dentry *debugfs_q_instance;
+ 	struct dentry *debugfs_q_stats;
+-	unsigned long flags;
+ 	int i;
+ 
+ 	if (!debugfs_initialized())
+ 		return;
+ 
+-	write_lock_irqsave(&ccp_debugfs_lock, flags);
++	mutex_lock(&ccp_debugfs_lock);
+ 	if (!ccp_debugfs_dir)
+ 		ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+-	write_unlock_irqrestore(&ccp_debugfs_lock, flags);
++	mutex_unlock(&ccp_debugfs_lock);
+ 	if (!ccp_debugfs_dir)
+ 		return;
+ 
+diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
+index 0dd3a7ac1dd1..f4a76971b4ac 100644
+--- a/drivers/crypto/inside-secure/safexcel.c
++++ b/drivers/crypto/inside-secure/safexcel.c
+@@ -490,6 +490,15 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
+ 		if (backlog)
+ 			backlog->complete(backlog, -EINPROGRESS);
+ 
++		/* In case the send() helper did not issue any command to push
++		 * to the engine because the input data was cached, continue to
++		 * dequeue other requests as this is valid and not an error.
++		 */
++		if (!commands && !results) {
++			kfree(request);
++			continue;
++		}
++
+ 		spin_lock_bh(&priv->ring[ring].egress_lock);
+ 		list_add_tail(&request->list, &priv->ring[ring].list);
+ 		spin_unlock_bh(&priv->ring[ring].egress_lock);
+@@ -514,8 +523,7 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
+ 
+ 	if (!priv->ring[ring].busy) {
+ 		nreq -= safexcel_try_push_requests(priv, ring, nreq);
+-		if (nreq)
+-			priv->ring[ring].busy = true;
++		priv->ring[ring].busy = true;
+ 	}
+ 
+ 	priv->ring[ring].requests_left += nreq;
+diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
+index 63a8768ed2ae..17a7725a6f6d 100644
+--- a/drivers/crypto/inside-secure/safexcel_cipher.c
++++ b/drivers/crypto/inside-secure/safexcel_cipher.c
+@@ -456,7 +456,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
+ 	queue_work(priv->ring[ring].workqueue,
+ 		   &priv->ring[ring].work_data.work);
+ 
+-	wait_for_completion_interruptible(&result.completion);
++	wait_for_completion(&result.completion);
+ 
+ 	if (result.error) {
+ 		dev_warn(priv->dev,
+diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
+index 122a2a58e98f..3e65bb5732da 100644
+--- a/drivers/crypto/inside-secure/safexcel_hash.c
++++ b/drivers/crypto/inside-secure/safexcel_hash.c
+@@ -21,7 +21,6 @@ struct safexcel_ahash_ctx {
+ 	struct safexcel_crypto_priv *priv;
+ 
+ 	u32 alg;
+-	u32 digest;
+ 
+ 	u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
+ 	u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
+@@ -35,6 +34,8 @@ struct safexcel_ahash_req {
+ 
+ 	int nents;
+ 
++	u32 digest;
++
+ 	u8 state_sz;    /* expected sate size, only set once */
+ 	u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
+ 
+@@ -49,6 +50,8 @@ struct safexcel_ahash_export_state {
+ 	u64 len;
+ 	u64 processed;
+ 
++	u32 digest;
++
+ 	u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
+ 	u8 cache[SHA256_BLOCK_SIZE];
+ };
+@@ -82,9 +85,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
+ 
+ 	cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
+ 	cdesc->control_data.control0 |= ctx->alg;
+-	cdesc->control_data.control0 |= ctx->digest;
++	cdesc->control_data.control0 |= req->digest;
+ 
+-	if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
++	if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
+ 		if (req->processed) {
+ 			if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+ 				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
+@@ -112,7 +115,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
+ 			if (req->finish)
+ 				ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
+ 		}
+-	} else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
++	} else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
+ 		cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
+ 
+ 		memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
+@@ -184,7 +187,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
+ 	int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+ 
+ 	queued = len = req->len - req->processed;
+-	if (queued < crypto_ahash_blocksize(ahash))
++	if (queued <= crypto_ahash_blocksize(ahash))
+ 		cache_len = queued;
+ 	else
+ 		cache_len = queued - areq->nbytes;
+@@ -198,7 +201,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
+ 			/* If this is not the last request and the queued data
+ 			 * is a multiple of a block, cache the last one for now.
+ 			 */
+-			extra = queued - crypto_ahash_blocksize(ahash);
++			extra = crypto_ahash_blocksize(ahash);
+ 
+ 		if (extra) {
+ 			sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+@@ -493,7 +496,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
+ 	queue_work(priv->ring[ring].workqueue,
+ 		   &priv->ring[ring].work_data.work);
+ 
+-	wait_for_completion_interruptible(&result.completion);
++	wait_for_completion(&result.completion);
+ 
+ 	if (result.error) {
+ 		dev_warn(priv->dev, "hash: completion error (%d)\n",
+@@ -550,7 +553,7 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
+ 	if (ctx->base.ctxr) {
+ 		if (priv->version == EIP197 &&
+ 		    !ctx->base.needs_inv && req->processed &&
+-		    ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
++		    req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
+ 			/* We're still setting needs_inv here, even though it is
+ 			 * cleared right away, because the needs_inv flag can be
+ 			 * set in other functions and we want to keep the same
+@@ -585,7 +588,6 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
+ 
+ static int safexcel_ahash_update(struct ahash_request *areq)
+ {
+-	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+ 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+ 
+@@ -601,7 +603,7 @@ static int safexcel_ahash_update(struct ahash_request *areq)
+ 	 * We're not doing partial updates when performing an hmac request.
+ 	 * Everything will be handled by the final() call.
+ 	 */
+-	if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
++	if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ 		return 0;
+ 
+ 	if (req->hmac)
+@@ -660,6 +662,8 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
+ 	export->len = req->len;
+ 	export->processed = req->processed;
+ 
++	export->digest = req->digest;
++
+ 	memcpy(export->state, req->state, req->state_sz);
+ 	memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
+ 
+@@ -680,6 +684,8 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
+ 	req->len = export->len;
+ 	req->processed = export->processed;
+ 
++	req->digest = export->digest;
++
+ 	memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
+ 	memcpy(req->state, export->state, req->state_sz);
+ 
+@@ -716,7 +722,7 @@ static int safexcel_sha1_init(struct ahash_request *areq)
+ 	req->state[4] = SHA1_H4;
+ 
+ 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+-	ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
++	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ 	req->state_sz = SHA1_DIGEST_SIZE;
+ 
+ 	return 0;
+@@ -783,10 +789,10 @@ struct safexcel_alg_template safexcel_alg_sha1 = {
+ 
+ static int safexcel_hmac_sha1_init(struct ahash_request *areq)
+ {
+-	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
++	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+ 
+ 	safexcel_sha1_init(areq);
+-	ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
++	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+ 	return 0;
+ }
+ 
+@@ -839,7 +845,7 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq,
+ 		init_completion(&result.completion);
+ 
+ 		ret = crypto_ahash_digest(areq);
+-		if (ret == -EINPROGRESS) {
++		if (ret == -EINPROGRESS || ret == -EBUSY) {
+ 			wait_for_completion_interruptible(&result.completion);
+ 			ret = result.error;
+ 		}
+@@ -1024,7 +1030,7 @@ static int safexcel_sha256_init(struct ahash_request *areq)
+ 	req->state[7] = SHA256_H7;
+ 
+ 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+-	ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
++	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ 	req->state_sz = SHA256_DIGEST_SIZE;
+ 
+ 	return 0;
+@@ -1086,7 +1092,7 @@ static int safexcel_sha224_init(struct ahash_request *areq)
+ 	req->state[7] = SHA224_H7;
+ 
+ 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+-	ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
++	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ 	req->state_sz = SHA256_DIGEST_SIZE;
+ 
+ 	return 0;
+diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+index 1547cbe13dc2..a81d89b3b7d8 100644
+--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
++++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+@@ -451,6 +451,7 @@ static struct platform_driver sun4i_ss_driver = {
+ 
+ module_platform_driver(sun4i_ss_driver);
+ 
++MODULE_ALIAS("platform:sun4i-ss");
+ MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
+diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+index 3a7c80cd1a17..359fb9804d16 100644
+--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
++++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+@@ -106,7 +106,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
+ 			if (nums[i-1] + 1 != nums[i])
+ 				goto fail_map;
+ 		buf->vaddr = (__force void *)
+-				ioremap_nocache(nums[0] << PAGE_SHIFT, size);
++			ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
+ 	} else {
+ 		buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
+ 					PAGE_KERNEL);
+diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
+index 6356815cf3e1..3642e6e4761e 100644
+--- a/drivers/media/dvb-frontends/lgdt3306a.c
++++ b/drivers/media/dvb-frontends/lgdt3306a.c
+@@ -1768,7 +1768,13 @@ static void lgdt3306a_release(struct dvb_frontend *fe)
+ 	struct lgdt3306a_state *state = fe->demodulator_priv;
+ 
+ 	dbg_info("\n");
+-	kfree(state);
++
++	/*
++	 * If state->muxc is not NULL, then we are an i2c device
++	 * and lgdt3306a_remove will clean up state
++	 */
++	if (!state->muxc)
++		kfree(state);
+ }
+ 
+ static const struct dvb_frontend_ops lgdt3306a_ops;
+@@ -2169,7 +2175,7 @@ static int lgdt3306a_probe(struct i2c_client *client,
+ 			sizeof(struct lgdt3306a_config));
+ 
+ 	config->i2c_addr = client->addr;
+-	fe = lgdt3306a_attach(config, client->adapter);
++	fe = dvb_attach(lgdt3306a_attach, config, client->adapter);
+ 	if (fe == NULL) {
+ 		ret = -ENODEV;
+ 		goto err_fe;
+diff --git a/drivers/media/i2c/adv748x/adv748x-hdmi.c b/drivers/media/i2c/adv748x/adv748x-hdmi.c
+index 4da4253553fc..10d229a4f088 100644
+--- a/drivers/media/i2c/adv748x/adv748x-hdmi.c
++++ b/drivers/media/i2c/adv748x/adv748x-hdmi.c
+@@ -105,6 +105,9 @@ static void adv748x_hdmi_fill_format(struct adv748x_hdmi *hdmi,
+ 
+ 	fmt->width = hdmi->timings.bt.width;
+ 	fmt->height = hdmi->timings.bt.height;
++
++	if (fmt->field == V4L2_FIELD_ALTERNATE)
++		fmt->height /= 2;
+ }
+ 
+ static void adv748x_fill_optional_dv_timings(struct v4l2_dv_timings *timings)
+diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
+index d28845f7356f..a31fe18c71d6 100644
+--- a/drivers/media/i2c/ov5645.c
++++ b/drivers/media/i2c/ov5645.c
+@@ -1131,13 +1131,14 @@ static int ov5645_probe(struct i2c_client *client,
+ 
+ 	ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
+ 					 &ov5645->ep);
++
++	of_node_put(endpoint);
++
+ 	if (ret < 0) {
+ 		dev_err(dev, "parsing endpoint node failed\n");
+ 		return ret;
+ 	}
+ 
+-	of_node_put(endpoint);
+-
+ 	if (ov5645->ep.bus_type != V4L2_MBUS_CSI2) {
+ 		dev_err(dev, "invalid bus type, must be CSI2\n");
+ 		return -EINVAL;
+diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
+index 3622521431f5..7ec8de7aee4f 100644
+--- a/drivers/media/pci/cx23885/cx23885-cards.c
++++ b/drivers/media/pci/cx23885/cx23885-cards.c
+@@ -2286,6 +2286,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
+ 				&dev->i2c_bus[2].i2c_adap,
+ 				"cx25840", 0x88 >> 1, NULL);
+ 		if (dev->sd_cx25840) {
++			/* set host data for clk_freq configuration */
++			v4l2_set_subdev_hostdata(dev->sd_cx25840,
++						&dev->clk_freq);
++
+ 			dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE;
+ 			v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
+ 		}
+diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
+index 8f63df1cb418..4612f26fcd6d 100644
+--- a/drivers/media/pci/cx23885/cx23885-core.c
++++ b/drivers/media/pci/cx23885/cx23885-core.c
+@@ -873,6 +873,16 @@ static int cx23885_dev_setup(struct cx23885_dev *dev)
+ 	if (cx23885_boards[dev->board].clk_freq > 0)
+ 		dev->clk_freq = cx23885_boards[dev->board].clk_freq;
+ 
++	if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
++		dev->pci->subsystem_device == 0x7137) {
++		/* Hauppauge ImpactVCBe device ID 0x7137 is populated
++		 * with an 888, and a 25Mhz crystal, instead of the
++		 * usual third overtone 50Mhz. The default clock rate must
++		 * be overridden so the cx25840 is properly configured
++		 */
++		dev->clk_freq = 25000000;
++	}
++
+ 	dev->pci_bus  = dev->pci->bus->number;
+ 	dev->pci_slot = PCI_SLOT(dev->pci->devfn);
+ 	cx23885_irq_add(dev, 0x001f00);
+diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
+index 04aa4a68a0ae..040c6c251d3a 100644
+--- a/drivers/media/pci/cx25821/cx25821-core.c
++++ b/drivers/media/pci/cx25821/cx25821-core.c
+@@ -867,6 +867,10 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
+ 	dev->nr = ++cx25821_devcount;
+ 	sprintf(dev->name, "cx25821[%d]", dev->nr);
+ 
++	if (dev->nr >= ARRAY_SIZE(card)) {
++		CX25821_INFO("dev->nr >= %zd", ARRAY_SIZE(card));
++		return -ENODEV;
++	}
+ 	if (dev->pci->device != 0x8210) {
+ 		pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
+ 			__func__, dev->pci->device);
+@@ -882,9 +886,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
+ 		dev->channels[i].sram_channels = &cx25821_sram_channels[i];
+ 	}
+ 
+-	if (dev->nr > 1)
+-		CX25821_INFO("dev->nr > 1!");
+-
+ 	/* board config */
+ 	dev->board = 1;		/* card[dev->nr]; */
+ 	dev->_max_num_decoders = MAX_DECODERS;
+diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
+index 437395a61065..9ab8e7ee2e1e 100644
+--- a/drivers/media/platform/s3c-camif/camif-capture.c
++++ b/drivers/media/platform/s3c-camif/camif-capture.c
+@@ -1256,16 +1256,17 @@ static void __camif_subdev_try_format(struct camif_dev *camif,
+ {
+ 	const struct s3c_camif_variant *variant = camif->variant;
+ 	const struct vp_pix_limits *pix_lim;
+-	int i = ARRAY_SIZE(camif_mbus_formats);
++	unsigned int i;
+ 
+ 	/* FIXME: constraints against codec or preview path ? */
+ 	pix_lim = &variant->vp_pix_limits[VP_CODEC];
+ 
+-	while (i-- >= 0)
++	for (i = 0; i < ARRAY_SIZE(camif_mbus_formats); i++)
+ 		if (camif_mbus_formats[i] == mf->code)
+ 			break;
+ 
+-	mf->code = camif_mbus_formats[i];
++	if (i == ARRAY_SIZE(camif_mbus_formats))
++		mf->code = camif_mbus_formats[0];
+ 
+ 	if (pad == CAMIF_SD_PAD_SINK) {
+ 		v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH,
+diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
+index 3f9d354827af..c586c2ab9b31 100644
+--- a/drivers/media/platform/vivid/vivid-ctrls.c
++++ b/drivers/media/platform/vivid/vivid-ctrls.c
+@@ -1208,6 +1208,7 @@ static int vivid_radio_rx_s_ctrl(struct v4l2_ctrl *ctrl)
+ 		v4l2_ctrl_activate(dev->radio_rx_rds_ta, dev->radio_rx_rds_controls);
+ 		v4l2_ctrl_activate(dev->radio_rx_rds_tp, dev->radio_rx_rds_controls);
+ 		v4l2_ctrl_activate(dev->radio_rx_rds_ms, dev->radio_rx_rds_controls);
++		dev->radio_rx_dev.device_caps = dev->radio_rx_caps;
+ 		break;
+ 	case V4L2_CID_RDS_RECEPTION:
+ 		dev->radio_rx_rds_enabled = ctrl->val;
+@@ -1282,6 +1283,7 @@ static int vivid_radio_tx_s_ctrl(struct v4l2_ctrl *ctrl)
+ 		dev->radio_tx_caps &= ~V4L2_CAP_READWRITE;
+ 		if (!dev->radio_tx_rds_controls)
+ 			dev->radio_tx_caps |= V4L2_CAP_READWRITE;
++		dev->radio_tx_dev.device_caps = dev->radio_tx_caps;
+ 		break;
+ 	case V4L2_CID_RDS_TX_PTY:
+ 		if (dev->radio_rx_rds_controls)
+diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
+index 7ce69f23f50a..ac85942162c1 100644
+--- a/drivers/media/platform/vsp1/vsp1_drm.c
++++ b/drivers/media/platform/vsp1/vsp1_drm.c
+@@ -530,6 +530,15 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index)
+ 		struct vsp1_rwpf *rpf = vsp1->rpf[i];
+ 		unsigned int j;
+ 
++		/*
++		 * Make sure we don't accept more inputs than the hardware can
++		 * handle. This is a temporary fix to avoid display stall, we
++		 * need to instead allocate the BRU or BRS to display pipelines
++		 * dynamically based on the number of planes they each use.
++		 */
++		if (pipe->num_inputs >= pipe->bru->source_pad)
++			pipe->inputs[i] = NULL;
++
+ 		if (!pipe->inputs[i])
+ 			continue;
+ 
+diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
+index 34e16f6ab4ac..545f9c1b6a58 100644
+--- a/drivers/media/usb/em28xx/em28xx-cards.c
++++ b/drivers/media/usb/em28xx/em28xx-cards.c
+@@ -507,8 +507,10 @@ static struct em28xx_reg_seq plex_px_bcud[] = {
+ };
+ 
+ /*
+- * 2040:0265 Hauppauge WinTV-dualHD DVB
+- * 2040:026d Hauppauge WinTV-dualHD ATSC/QAM
++ * 2040:0265 Hauppauge WinTV-dualHD DVB Isoc
++ * 2040:8265 Hauppauge WinTV-dualHD DVB Bulk
++ * 2040:026d Hauppauge WinTV-dualHD ATSC/QAM Isoc
++ * 2040:826d Hauppauge WinTV-dualHD ATSC/QAM Bulk
+  * reg 0x80/0x84:
+  * GPIO_0: Yellow LED tuner 1, 0=on, 1=off
+  * GPIO_1: Green LED tuner 1, 0=on, 1=off
+@@ -2391,7 +2393,8 @@ struct em28xx_board em28xx_boards[] = {
+ 		.has_dvb       = 1,
+ 	},
+ 	/*
+-	 * 2040:0265 Hauppauge WinTV-dualHD (DVB version).
++	 * 2040:0265 Hauppauge WinTV-dualHD (DVB version) Isoc.
++	 * 2040:8265 Hauppauge WinTV-dualHD (DVB version) Bulk.
+ 	 * Empia EM28274, 2x Silicon Labs Si2168, 2x Silicon Labs Si2157
+ 	 */
+ 	[EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB] = {
+@@ -2406,7 +2409,8 @@ struct em28xx_board em28xx_boards[] = {
+ 		.leds          = hauppauge_dualhd_leds,
+ 	},
+ 	/*
+-	 * 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM).
++	 * 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Isoc.
++	 * 2040:826d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Bulk.
+ 	 * Empia EM28274, 2x LG LGDT3306A, 2x Silicon Labs Si2157
+ 	 */
+ 	[EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595] = {
+@@ -2547,8 +2551,12 @@ struct usb_device_id em28xx_id_table[] = {
+ 			.driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850 },
+ 	{ USB_DEVICE(0x2040, 0x0265),
+ 			.driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB },
++	{ USB_DEVICE(0x2040, 0x8265),
++			.driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB },
+ 	{ USB_DEVICE(0x2040, 0x026d),
+ 			.driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 },
++	{ USB_DEVICE(0x2040, 0x826d),
++			.driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 },
+ 	{ USB_DEVICE(0x0438, 0xb002),
+ 			.driver_info = EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 },
+ 	{ USB_DEVICE(0x2001, 0xf112),
+@@ -2609,7 +2617,11 @@ struct usb_device_id em28xx_id_table[] = {
+ 			.driver_info = EM28178_BOARD_PCTV_461E },
+ 	{ USB_DEVICE(0x2013, 0x025f),
+ 			.driver_info = EM28178_BOARD_PCTV_292E },
+-	{ USB_DEVICE(0x2040, 0x0264), /* Hauppauge WinTV-soloHD */
++	{ USB_DEVICE(0x2040, 0x0264), /* Hauppauge WinTV-soloHD Isoc */
++			.driver_info = EM28178_BOARD_PCTV_292E },
++	{ USB_DEVICE(0x2040, 0x8264), /* Hauppauge OEM Generic WinTV-soloHD Bulk */
++			.driver_info = EM28178_BOARD_PCTV_292E },
++	{ USB_DEVICE(0x2040, 0x8268), /* Hauppauge Retail WinTV-soloHD Bulk */
+ 			.driver_info = EM28178_BOARD_PCTV_292E },
+ 	{ USB_DEVICE(0x0413, 0x6f07),
+ 			.driver_info = EM2861_BOARD_LEADTEK_VC100 },
+diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
+index 88084f24f033..094e83b6908d 100644
+--- a/drivers/media/usb/em28xx/em28xx.h
++++ b/drivers/media/usb/em28xx/em28xx.h
+@@ -191,7 +191,7 @@
+    USB 2.0 spec says bulk packet size is always 512 bytes
+  */
+ #define EM28XX_BULK_PACKET_MULTIPLIER 384
+-#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 384
++#define EM28XX_DVB_BULK_PACKET_MULTIPLIER 94
+ 
+ #define EM28XX_INTERLACED_DEFAULT 1
+ 
+diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
+index 23b45da784cb..b89acaee12d4 100644
+--- a/drivers/net/dsa/bcm_sf2_cfp.c
++++ b/drivers/net/dsa/bcm_sf2_cfp.c
+@@ -354,10 +354,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
+ 	/* Locate the first rule available */
+ 	if (fs->location == RX_CLS_LOC_ANY)
+ 		rule_index = find_first_zero_bit(priv->cfp.used,
+-						 bcm_sf2_cfp_rule_size(priv));
++						 priv->num_cfp_rules);
+ 	else
+ 		rule_index = fs->location;
+ 
++	if (rule_index > bcm_sf2_cfp_rule_size(priv))
++		return -ENOSPC;
++
+ 	layout = &udf_tcpip4_layout;
+ 	/* We only use one UDF slice for now */
+ 	slice_num = bcm_sf2_get_slice_number(layout, 0);
+@@ -562,19 +565,21 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
+ 	 * first half because the HW search is by incrementing addresses.
+ 	 */
+ 	if (fs->location == RX_CLS_LOC_ANY)
+-		rule_index[0] = find_first_zero_bit(priv->cfp.used,
+-						    bcm_sf2_cfp_rule_size(priv));
++		rule_index[1] = find_first_zero_bit(priv->cfp.used,
++						    priv->num_cfp_rules);
+ 	else
+-		rule_index[0] = fs->location;
++		rule_index[1] = fs->location;
++	if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
++		return -ENOSPC;
+ 
+ 	/* Flag it as used (cleared on error path) such that we can immediately
+ 	 * obtain a second one to chain from.
+ 	 */
+-	set_bit(rule_index[0], priv->cfp.used);
++	set_bit(rule_index[1], priv->cfp.used);
+ 
+-	rule_index[1] = find_first_zero_bit(priv->cfp.used,
+-					    bcm_sf2_cfp_rule_size(priv));
+-	if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) {
++	rule_index[0] = find_first_zero_bit(priv->cfp.used,
++					    priv->num_cfp_rules);
++	if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
+ 		ret = -ENOSPC;
+ 		goto out_err;
+ 	}
+@@ -712,14 +717,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
+ 	/* Flag the second half rule as being used now, return it as the
+ 	 * location, and flag it as unique while dumping rules
+ 	 */
+-	set_bit(rule_index[1], priv->cfp.used);
++	set_bit(rule_index[0], priv->cfp.used);
+ 	set_bit(rule_index[1], priv->cfp.unique);
+ 	fs->location = rule_index[1];
+ 
+ 	return ret;
+ 
+ out_err:
+-	clear_bit(rule_index[0], priv->cfp.used);
++	clear_bit(rule_index[1], priv->cfp.used);
+ 	return ret;
+ }
+ 
+@@ -785,10 +790,6 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
+ 	int ret;
+ 	u32 reg;
+ 
+-	/* Refuse deletion of unused rules, and the default reserved rule */
+-	if (!test_bit(loc, priv->cfp.used) || loc == 0)
+-		return -EINVAL;
+-
+ 	/* Indicate which rule we want to read */
+ 	bcm_sf2_cfp_rule_addr_set(priv, loc);
+ 
+@@ -826,6 +827,13 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
+ 	u32 next_loc = 0;
+ 	int ret;
+ 
++	/* Refuse deleting unused rules, and those that are not unique since
++	 * that could leave IPv6 rules with one of the chained rule in the
++	 * table.
++	 */
++	if (!test_bit(loc, priv->cfp.unique) || loc == 0)
++		return -EINVAL;
++
+ 	ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
+index 36c8950dbd2d..176861bd2252 100644
+--- a/drivers/net/ethernet/3com/3c59x.c
++++ b/drivers/net/ethernet/3com/3c59x.c
+@@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
+ 	vp->mii.reg_num_mask = 0x1f;
+ 
+ 	/* Makes sure rings are at least 16 byte aligned. */
+-	vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
++	vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
+ 					   + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+-					   &vp->rx_ring_dma);
++					   &vp->rx_ring_dma, GFP_KERNEL);
+ 	retval = -ENOMEM;
+ 	if (!vp->rx_ring)
+ 		goto free_device;
+@@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
+ 		return 0;
+ 
+ free_ring:
+-	pci_free_consistent(pdev,
+-						sizeof(struct boom_rx_desc) * RX_RING_SIZE
+-							+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+-						vp->rx_ring,
+-						vp->rx_ring_dma);
++	dma_free_coherent(&pdev->dev,
++		sizeof(struct boom_rx_desc) * RX_RING_SIZE +
++		sizeof(struct boom_tx_desc) * TX_RING_SIZE,
++		vp->rx_ring, vp->rx_ring_dma);
+ free_device:
+ 	free_netdev(dev);
+ 	pr_err(PFX "vortex_probe1 fails.  Returns %d\n", retval);
+@@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev)
+ 				break;			/* Bad news!  */
+ 
+ 			skb_reserve(skb, NET_IP_ALIGN);	/* Align IP on 16 byte boundaries */
+-			dma = pci_map_single(VORTEX_PCI(vp), skb->data,
+-					     PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+-			if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
++			dma = dma_map_single(vp->gendev, skb->data,
++					     PKT_BUF_SZ, DMA_FROM_DEVICE);
++			if (dma_mapping_error(vp->gendev, dma))
+ 				break;
+ 			vp->rx_ring[i].addr = cpu_to_le32(dma);
+ 		}
+@@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if (vp->bus_master) {
+ 		/* Set the bus-master controller to transfer the packet. */
+ 		int len = (skb->len + 3) & ~3;
+-		vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
+-						PCI_DMA_TODEVICE);
+-		if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
++		vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
++						DMA_TO_DEVICE);
++		if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
+ 			dev_kfree_skb_any(skb);
+ 			dev->stats.tx_dropped++;
+ 			return NETDEV_TX_OK;
+@@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
+ 
+ 	if (!skb_shinfo(skb)->nr_frags) {
+-		dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
+-					  PCI_DMA_TODEVICE);
+-		if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
++		dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
++					  DMA_TO_DEVICE);
++		if (dma_mapping_error(vp->gendev, dma_addr))
+ 			goto out_dma_err;
+ 
+ 		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
+@@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	} else {
+ 		int i;
+ 
+-		dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
+-					  skb_headlen(skb), PCI_DMA_TODEVICE);
+-		if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
++		dma_addr = dma_map_single(vp->gendev, skb->data,
++					  skb_headlen(skb), DMA_TO_DEVICE);
++		if (dma_mapping_error(vp->gendev, dma_addr))
+ 			goto out_dma_err;
+ 
+ 		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
+@@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ 
+-			dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
++			dma_addr = skb_frag_dma_map(vp->gendev, frag,
+ 						    0,
+ 						    frag->size,
+ 						    DMA_TO_DEVICE);
+-			if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
++			if (dma_mapping_error(vp->gendev, dma_addr)) {
+ 				for(i = i-1; i >= 0; i--)
+-					dma_unmap_page(&VORTEX_PCI(vp)->dev,
++					dma_unmap_page(vp->gendev,
+ 						       le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
+ 						       le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
+ 						       DMA_TO_DEVICE);
+ 
+-				pci_unmap_single(VORTEX_PCI(vp),
++				dma_unmap_single(vp->gendev,
+ 						 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
+ 						 le32_to_cpu(vp->tx_ring[entry].frag[0].length),
+-						 PCI_DMA_TODEVICE);
++						 DMA_TO_DEVICE);
+ 
+ 				goto out_dma_err;
+ 			}
+@@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		}
+ 	}
+ #else
+-	dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE);
+-	if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
++	dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
++	if (dma_mapping_error(vp->gendev, dma_addr))
+ 		goto out_dma_err;
+ 	vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
+ 	vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
+@@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ out:
+ 	return NETDEV_TX_OK;
+ out_dma_err:
+-	dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
++	dev_err(vp->gendev, "Error mapping dma buffer\n");
+ 	goto out;
+ }
+ 
+@@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id)
+ 		if (status & DMADone) {
+ 			if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
+ 				iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+-				pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
++				dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
+ 				pkts_compl++;
+ 				bytes_compl += vp->tx_skb->len;
+ 				dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
+@@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id)
+ 					struct sk_buff *skb = vp->tx_skbuff[entry];
+ #if DO_ZEROCOPY
+ 					int i;
+-					pci_unmap_single(VORTEX_PCI(vp),
++					dma_unmap_single(vp->gendev,
+ 							le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
+ 							le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
+-							PCI_DMA_TODEVICE);
++							DMA_TO_DEVICE);
+ 
+ 					for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
+-							pci_unmap_page(VORTEX_PCI(vp),
++							dma_unmap_page(vp->gendev,
+ 											 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
+ 											 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
+-											 PCI_DMA_TODEVICE);
++											 DMA_TO_DEVICE);
+ #else
+-					pci_unmap_single(VORTEX_PCI(vp),
+-						le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
++					dma_unmap_single(vp->gendev,
++						le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
+ #endif
+ 					pkts_compl++;
+ 					bytes_compl += skb->len;
+@@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev)
+ 				/* 'skb_put()' points to the start of sk_buff data area. */
+ 				if (vp->bus_master &&
+ 					! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
+-					dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
+-									   pkt_len, PCI_DMA_FROMDEVICE);
++					dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
++									   pkt_len, DMA_FROM_DEVICE);
+ 					iowrite32(dma, ioaddr + Wn7_MasterAddr);
+ 					iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ 					iowrite16(StartDMAUp, ioaddr + EL3_CMD);
+ 					while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
+ 						;
+-					pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
++					dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
+ 				} else {
+ 					ioread32_rep(ioaddr + RX_FIFO,
+ 					             skb_put(skb, pkt_len),
+@@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev)
+ 			if (pkt_len < rx_copybreak &&
+ 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
+ 				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
+-				pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
++				dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
+ 				/* 'skb_put()' points to the start of sk_buff data area. */
+ 				skb_put_data(skb, vp->rx_skbuff[entry]->data,
+ 					     pkt_len);
+-				pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
++				dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
+ 				vp->rx_copy++;
+ 			} else {
+ 				/* Pre-allocate the replacement skb.  If it or its
+@@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev)
+ 					dev->stats.rx_dropped++;
+ 					goto clear_complete;
+ 				}
+-				newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
+-							PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+-				if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
++				newdma = dma_map_single(vp->gendev, newskb->data,
++							PKT_BUF_SZ, DMA_FROM_DEVICE);
++				if (dma_mapping_error(vp->gendev, newdma)) {
+ 					dev->stats.rx_dropped++;
+ 					consume_skb(newskb);
+ 					goto clear_complete;
+@@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev)
+ 				vp->rx_skbuff[entry] = newskb;
+ 				vp->rx_ring[entry].addr = cpu_to_le32(newdma);
+ 				skb_put(skb, pkt_len);
+-				pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
++				dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
+ 				vp->rx_nocopy++;
+ 			}
+ 			skb->protocol = eth_type_trans(skb, dev);
+@@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev)
+ 	if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
+ 		for (i = 0; i < RX_RING_SIZE; i++)
+ 			if (vp->rx_skbuff[i]) {
+-				pci_unmap_single(	VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
+-									PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
++				dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
++									PKT_BUF_SZ, DMA_FROM_DEVICE);
+ 				dev_kfree_skb(vp->rx_skbuff[i]);
+ 				vp->rx_skbuff[i] = NULL;
+ 			}
+@@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev)
+ 				int k;
+ 
+ 				for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
+-						pci_unmap_single(VORTEX_PCI(vp),
++						dma_unmap_single(vp->gendev,
+ 										 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
+ 										 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
+-										 PCI_DMA_TODEVICE);
++										 DMA_TO_DEVICE);
+ #else
+-				pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
++				dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
+ #endif
+ 				dev_kfree_skb(skb);
+ 				vp->tx_skbuff[i] = NULL;
+@@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev)
+ 
+ 	pci_iounmap(pdev, vp->ioaddr);
+ 
+-	pci_free_consistent(pdev,
+-						sizeof(struct boom_rx_desc) * RX_RING_SIZE
+-							+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+-						vp->rx_ring,
+-						vp->rx_ring_dma);
++	dma_free_coherent(&pdev->dev,
++			sizeof(struct boom_rx_desc) * RX_RING_SIZE +
++			sizeof(struct boom_tx_desc) * TX_RING_SIZE,
++			vp->rx_ring, vp->rx_ring_dma);
+ 
+ 	pci_release_regions(pdev);
+ 
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+index b57acb8dc35b..dc25066c59a1 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+@@ -419,15 +419,15 @@ static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+ 	{0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+ 	{0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+ 	{0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+-	{0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+-	{0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+-	{0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+-	{0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+-	{0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+-	{0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */
+-	{0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */
+-	{0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+-	{0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
++	{0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */
++	{0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */
++	{0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */
++	{0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */
++	{0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */
++	{0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */
++	{0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */
++	{0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */
++	{0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */
+ };
+ 
+ static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+@@ -444,16 +444,6 @@ static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+ 	{0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+ 	{0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+ 	{0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+-	{0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+-	{0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+-	{0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+-	{0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+-	{0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+-	{0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
+-	{0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
+-	{0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
+-	{0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+-	{0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
+ };
+ 
+ static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+index 3177b0c9bd2d..829dc8c5ddff 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -836,7 +836,7 @@ bool is_filter_exact_match(struct adapter *adap,
+ {
+ 	struct tp_params *tp = &adap->params.tp;
+ 	u64 hash_filter_mask = tp->hash_filter_mask;
+-	u32 mask;
++	u64 ntuple_mask = 0;
+ 
+ 	if (!is_hashfilter(adap))
+ 		return false;
+@@ -865,73 +865,45 @@ bool is_filter_exact_match(struct adapter *adap,
+ 	if (!fs->val.fport || fs->mask.fport != 0xffff)
+ 		return false;
+ 
+-	if (tp->fcoe_shift >= 0) {
+-		mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W;
+-		if (mask && !fs->mask.fcoe)
+-			return false;
+-	}
++	/* calculate tuple mask and compare with mask configured in hw */
++	if (tp->fcoe_shift >= 0)
++		ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
+ 
+-	if (tp->port_shift >= 0) {
+-		mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W;
+-		if (mask && !fs->mask.iport)
+-			return false;
+-	}
++	if (tp->port_shift >= 0)
++		ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
+ 
+ 	if (tp->vnic_shift >= 0) {
+-		mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W;
+-
+-		if ((adap->params.tp.ingress_config & VNIC_F)) {
+-			if (mask && !fs->mask.pfvf_vld)
+-				return false;
+-		} else {
+-			if (mask && !fs->mask.ovlan_vld)
+-				return false;
+-		}
++		if ((adap->params.tp.ingress_config & VNIC_F))
++			ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
++		else
++			ntuple_mask |= (u64)fs->mask.ovlan_vld <<
++				tp->vnic_shift;
+ 	}
+ 
+-	if (tp->vlan_shift >= 0) {
+-		mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W;
+-		if (mask && !fs->mask.ivlan)
+-			return false;
+-	}
++	if (tp->vlan_shift >= 0)
++		ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
+ 
+-	if (tp->tos_shift >= 0) {
+-		mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W;
+-		if (mask && !fs->mask.tos)
+-			return false;
+-	}
++	if (tp->tos_shift >= 0)
++		ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
+ 
+-	if (tp->protocol_shift >= 0) {
+-		mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W;
+-		if (mask && !fs->mask.proto)
+-			return false;
+-	}
++	if (tp->protocol_shift >= 0)
++		ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
+ 
+-	if (tp->ethertype_shift >= 0) {
+-		mask = (hash_filter_mask >> tp->ethertype_shift) &
+-			FT_ETHERTYPE_W;
+-		if (mask && !fs->mask.ethtype)
+-			return false;
+-	}
++	if (tp->ethertype_shift >= 0)
++		ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
+ 
+-	if (tp->macmatch_shift >= 0) {
+-		mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W;
+-		if (mask && !fs->mask.macidx)
+-			return false;
+-	}
++	if (tp->macmatch_shift >= 0)
++		ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
++
++	if (tp->matchtype_shift >= 0)
++		ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
++
++	if (tp->frag_shift >= 0)
++		ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
++
++	if (ntuple_mask != hash_filter_mask)
++		return false;
+ 
+-	if (tp->matchtype_shift >= 0) {
+-		mask = (hash_filter_mask >> tp->matchtype_shift) &
+-			FT_MPSHITTYPE_W;
+-		if (mask && !fs->mask.matchtype)
+-			return false;
+-	}
+-	if (tp->frag_shift >= 0) {
+-		mask = (hash_filter_mask >> tp->frag_shift) &
+-			FT_FRAGMENTATION_W;
+-		if (mask && !fs->mask.frag)
+-			return false;
+-	}
+ 	return true;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index 4d84cab77105..e8a3a45d0b53 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -3007,6 +3007,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+ 		mlx4_err(dev, "Failed to create file for port %d\n", port);
+ 		devlink_port_unregister(&info->devlink_port);
+ 		info->port = -1;
++		return err;
+ 	}
+ 
+ 	sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
+@@ -3028,9 +3029,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+ 				   &info->port_attr);
+ 		devlink_port_unregister(&info->devlink_port);
+ 		info->port = -1;
++		return err;
+ 	}
+ 
+-	return err;
++	return 0;
+ }
+ 
+ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+index c4f14fdc4e77..0161e01778f2 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+@@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+ 	struct qed_ll2_tx_packet *p_pkt = NULL;
+ 	struct qed_ll2_info *p_ll2_conn;
+ 	struct qed_ll2_tx_queue *p_tx;
++	unsigned long flags = 0;
+ 	dma_addr_t tx_frag;
+ 
+ 	p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+@@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+ 
+ 	p_tx = &p_ll2_conn->tx_queue;
+ 
++	spin_lock_irqsave(&p_tx->lock, flags);
+ 	while (!list_empty(&p_tx->active_descq)) {
+ 		p_pkt = list_first_entry(&p_tx->active_descq,
+ 					 struct qed_ll2_tx_packet, list_entry);
+@@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+ 		list_del(&p_pkt->list_entry);
+ 		b_last_packet = list_empty(&p_tx->active_descq);
+ 		list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
++		spin_unlock_irqrestore(&p_tx->lock, flags);
+ 		if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
+ 			struct qed_ooo_buffer *p_buffer;
+ 
+@@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+ 						      b_last_frag,
+ 						      b_last_packet);
+ 		}
++		spin_lock_irqsave(&p_tx->lock, flags);
+ 	}
++	spin_unlock_irqrestore(&p_tx->lock, flags);
+ }
+ 
+ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+@@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+ 	struct qed_ll2_info *p_ll2_conn = NULL;
+ 	struct qed_ll2_rx_packet *p_pkt = NULL;
+ 	struct qed_ll2_rx_queue *p_rx;
++	unsigned long flags = 0;
+ 
+ 	p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+ 	if (!p_ll2_conn)
+@@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+ 
+ 	p_rx = &p_ll2_conn->rx_queue;
+ 
++	spin_lock_irqsave(&p_rx->lock, flags);
+ 	while (!list_empty(&p_rx->active_descq)) {
+ 		p_pkt = list_first_entry(&p_rx->active_descq,
+ 					 struct qed_ll2_rx_packet, list_entry);
+ 		if (!p_pkt)
+ 			break;
+-
+ 		list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
++		spin_unlock_irqrestore(&p_rx->lock, flags);
+ 
+ 		if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
+ 			struct qed_ooo_buffer *p_buffer;
+@@ -588,7 +595,9 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+ 						      cookie,
+ 						      rx_buf_addr, b_last);
+ 		}
++		spin_lock_irqsave(&p_rx->lock, flags);
+ 	}
++	spin_unlock_irqrestore(&p_rx->lock, flags);
+ }
+ 
+ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
+@@ -601,6 +610,27 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
+ 	return bd_flags;
+ }
+ 
++static bool
++qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
++				struct core_rx_slow_path_cqe *p_cqe)
++{
++	struct ooo_opaque *iscsi_ooo;
++	u32 cid;
++
++	if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
++		return false;
++
++	iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
++	if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
++		return false;
++
++	/* Need to make a flush */
++	cid = le32_to_cpu(iscsi_ooo->cid);
++	qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
++
++	return true;
++}
++
+ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
+ 				  struct qed_ll2_info *p_ll2_conn)
+ {
+@@ -627,6 +657,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
+ 		cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+ 		cqe_type = cqe->rx_cqe_sp.type;
+ 
++		if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
++			if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
++							    &cqe->rx_cqe_sp))
++				continue;
++
+ 		if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
+ 			DP_NOTICE(p_hwfn,
+ 				  "Got a non-regular LB LL2 completion [type 0x%02x]\n",
+@@ -807,6 +842,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+ 	struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
+ 	int rc;
+ 
++	if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
++		return 0;
++
+ 	rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
+ 	if (rc)
+ 		return rc;
+@@ -827,6 +865,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+ 	u16 new_idx = 0, num_bds = 0;
+ 	int rc;
+ 
++	if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
++		return 0;
++
+ 	new_idx = le16_to_cpu(*p_tx->p_fw_cons);
+ 	num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
+ 
+@@ -1880,17 +1921,25 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
+ 
+ 	/* Stop Tx & Rx of connection, if needed */
+ 	if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
++		p_ll2_conn->tx_queue.b_cb_registred = false;
++		smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
+ 		rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
+ 		if (rc)
+ 			goto out;
++
+ 		qed_ll2_txq_flush(p_hwfn, connection_handle);
++		qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
+ 	}
+ 
+ 	if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
++		p_ll2_conn->rx_queue.b_cb_registred = false;
++		smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
+ 		rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
+ 		if (rc)
+ 			goto out;
++
+ 		qed_ll2_rxq_flush(p_hwfn, connection_handle);
++		qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
+ 	}
+ 
+ 	if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
+@@ -1938,16 +1987,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle)
+ 	if (!p_ll2_conn)
+ 		return;
+ 
+-	if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+-		p_ll2_conn->rx_queue.b_cb_registred = false;
+-		qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
+-	}
+-
+-	if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+-		p_ll2_conn->tx_queue.b_cb_registred = false;
+-		qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
+-	}
+-
+ 	kfree(p_ll2_conn->tx_queue.descq_mem);
+ 	qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
+ 
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 6c7bdd0c361a..ffae19714ffd 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -680,15 +680,6 @@ static void tun_queue_purge(struct tun_file *tfile)
+ 	skb_queue_purge(&tfile->sk.sk_error_queue);
+ }
+ 
+-static void tun_cleanup_tx_ring(struct tun_file *tfile)
+-{
+-	if (tfile->tx_ring.queue) {
+-		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
+-		xdp_rxq_info_unreg(&tfile->xdp_rxq);
+-		memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
+-	}
+-}
+-
+ static void __tun_detach(struct tun_file *tfile, bool clean)
+ {
+ 	struct tun_file *ntfile;
+@@ -735,7 +726,9 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
+ 			    tun->dev->reg_state == NETREG_REGISTERED)
+ 				unregister_netdevice(tun->dev);
+ 		}
+-		tun_cleanup_tx_ring(tfile);
++		if (tun)
++			xdp_rxq_info_unreg(&tfile->xdp_rxq);
++		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
+ 		sock_put(&tfile->sk);
+ 	}
+ }
+@@ -775,14 +768,14 @@ static void tun_detach_all(struct net_device *dev)
+ 		tun_napi_del(tun, tfile);
+ 		/* Drop read queue */
+ 		tun_queue_purge(tfile);
++		xdp_rxq_info_unreg(&tfile->xdp_rxq);
+ 		sock_put(&tfile->sk);
+-		tun_cleanup_tx_ring(tfile);
+ 	}
+ 	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
+ 		tun_enable_queue(tfile);
+ 		tun_queue_purge(tfile);
++		xdp_rxq_info_unreg(&tfile->xdp_rxq);
+ 		sock_put(&tfile->sk);
+-		tun_cleanup_tx_ring(tfile);
+ 	}
+ 	BUG_ON(tun->numdisabled != 0);
+ 
+@@ -826,7 +819,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
+ 	}
+ 
+ 	if (!tfile->detached &&
+-	    ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) {
++	    ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
++			    GFP_KERNEL, tun_ptr_free)) {
+ 		err = -ENOMEM;
+ 		goto out;
+ 	}
+@@ -3131,6 +3125,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
+ 					    &tun_proto, 0);
+ 	if (!tfile)
+ 		return -ENOMEM;
++	if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
++		sk_free(&tfile->sk);
++		return -ENOMEM;
++	}
++
+ 	RCU_INIT_POINTER(tfile->tun, NULL);
+ 	tfile->flags = 0;
+ 	tfile->ifindex = 0;
+@@ -3151,8 +3150,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
+ 
+ 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
+ 
+-	memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 9ebe2a689966..27a9bb8c9611 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -369,6 +369,11 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
+ 
+ 	gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
+ 	while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
++		/* Prevent any &gdesc->tcd field from being (speculatively)
++		 * read before (&gdesc->tcd)->gen is read.
++		 */
++		dma_rmb();
++
+ 		completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
+ 					       &gdesc->tcd), tq, adapter->pdev,
+ 					       adapter);
+@@ -1103,6 +1108,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
+ 		gdesc->txd.tci = skb_vlan_tag_get(skb);
+ 	}
+ 
++	/* Ensure that the write to (&gdesc->txd)->gen will be observed after
++	 * all other writes to &gdesc->txd.
++	 */
++	dma_wmb();
++
+ 	/* finally flips the GEN bit of the SOP desc. */
+ 	gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
+ 						  VMXNET3_TXD_GEN);
+@@ -1298,6 +1308,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ 			 */
+ 			break;
+ 		}
++
++		/* Prevent any rcd field from being (speculatively) read before
++		 * rcd->gen is read.
++		 */
++		dma_rmb();
++
+ 		BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
+ 		       rcd->rqID != rq->dataRingQid);
+ 		idx = rcd->rxdIdx;
+@@ -1528,6 +1544,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
+ 		ring->next2comp = idx;
+ 		num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
+ 		ring = rq->rx_ring + ring_idx;
++
++		/* Ensure that the writes to rxd->gen bits will be observed
++		 * after all other writes to rxd objects.
++		 */
++		dma_wmb();
++
+ 		while (num_to_alloc) {
+ 			vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
+ 					  &rxCmdDesc);
+@@ -2688,7 +2710,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
+ /* ==================== initialization and cleanup routines ============ */
+ 
+ static int
+-vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
++vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
+ {
+ 	int err;
+ 	unsigned long mmio_start, mmio_len;
+@@ -2700,30 +2722,12 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
+ 		return err;
+ 	}
+ 
+-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+-		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+-			dev_err(&pdev->dev,
+-				"pci_set_consistent_dma_mask failed\n");
+-			err = -EIO;
+-			goto err_set_mask;
+-		}
+-		*dma64 = true;
+-	} else {
+-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+-			dev_err(&pdev->dev,
+-				"pci_set_dma_mask failed\n");
+-			err = -EIO;
+-			goto err_set_mask;
+-		}
+-		*dma64 = false;
+-	}
+-
+ 	err = pci_request_selected_regions(pdev, (1 << 2) - 1,
+ 					   vmxnet3_driver_name);
+ 	if (err) {
+ 		dev_err(&pdev->dev,
+ 			"Failed to request region for adapter: error %d\n", err);
+-		goto err_set_mask;
++		goto err_enable_device;
+ 	}
+ 
+ 	pci_set_master(pdev);
+@@ -2751,7 +2755,7 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
+ 	iounmap(adapter->hw_addr0);
+ err_ioremap:
+ 	pci_release_selected_regions(pdev, (1 << 2) - 1);
+-err_set_mask:
++err_enable_device:
+ 	pci_disable_device(pdev);
+ 	return err;
+ }
+@@ -3254,7 +3258,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
+ #endif
+ 	};
+ 	int err;
+-	bool dma64 = false; /* stupid gcc */
++	bool dma64;
+ 	u32 ver;
+ 	struct net_device *netdev;
+ 	struct vmxnet3_adapter *adapter;
+@@ -3300,6 +3304,24 @@ vmxnet3_probe_device(struct pci_dev *pdev,
+ 	adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+ 	adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
+ 
++	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
++		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
++			dev_err(&pdev->dev,
++				"pci_set_consistent_dma_mask failed\n");
++			err = -EIO;
++			goto err_set_mask;
++		}
++		dma64 = true;
++	} else {
++		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
++			dev_err(&pdev->dev,
++				"pci_set_dma_mask failed\n");
++			err = -EIO;
++			goto err_set_mask;
++		}
++		dma64 = false;
++	}
++
+ 	spin_lock_init(&adapter->cmd_lock);
+ 	adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
+ 					     sizeof(struct vmxnet3_adapter),
+@@ -3307,7 +3329,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
+ 	if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
+ 		dev_err(&pdev->dev, "Failed to map dma\n");
+ 		err = -EFAULT;
+-		goto err_dma_map;
++		goto err_set_mask;
+ 	}
+ 	adapter->shared = dma_alloc_coherent(
+ 				&adapter->pdev->dev,
+@@ -3358,7 +3380,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
+ 	}
+ #endif /* VMXNET3_RSS */
+ 
+-	err = vmxnet3_alloc_pci_resources(adapter, &dma64);
++	err = vmxnet3_alloc_pci_resources(adapter);
+ 	if (err < 0)
+ 		goto err_alloc_pci;
+ 
+@@ -3504,7 +3526,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
+ err_alloc_shared:
+ 	dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
+ 			 sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
+-err_dma_map:
++err_set_mask:
+ 	free_netdev(netdev);
+ 	return err;
+ }
+diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
+index a3326463b71f..a2c554f8a61b 100644
+--- a/drivers/net/vmxnet3/vmxnet3_int.h
++++ b/drivers/net/vmxnet3/vmxnet3_int.h
+@@ -69,10 +69,12 @@
+ /*
+  * Version numbers
+  */
+-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.14.0-k"
++#define VMXNET3_DRIVER_VERSION_STRING   "1.4.16.0-k"
+ 
+-/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
+-#define VMXNET3_DRIVER_VERSION_NUM      0x01040e00
++/* Each byte of this 32-bit integer encodes a version number in
++ * VMXNET3_DRIVER_VERSION_STRING.
++ */
++#define VMXNET3_DRIVER_VERSION_NUM      0x01041000
+ 
+ #if defined(CONFIG_PCI_MSI)
+ 	/* RSS only makes sense if MSI-X is supported. */
+diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
+index e1cfa06810ef..e79f2a181ad2 100644
+--- a/drivers/rtc/hctosys.c
++++ b/drivers/rtc/hctosys.c
+@@ -49,6 +49,11 @@ static int __init rtc_hctosys(void)
+ 
+ 	tv64.tv_sec = rtc_tm_to_time64(&tm);
+ 
++#if BITS_PER_LONG == 32
++	if (tv64.tv_sec > INT_MAX)
++		goto err_read;
++#endif
++
+ 	err = do_settimeofday64(&tv64);
+ 
+ 	dev_info(rtc->dev.parent,
+diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
+index d67769265185..a1c44d0c8557 100644
+--- a/drivers/rtc/rtc-goldfish.c
++++ b/drivers/rtc/rtc-goldfish.c
+@@ -235,3 +235,5 @@ static struct platform_driver goldfish_rtc = {
+ };
+ 
+ module_platform_driver(goldfish_rtc);
++
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
+index c90fba3ed861..6620016869cf 100644
+--- a/drivers/rtc/rtc-m41t80.c
++++ b/drivers/rtc/rtc-m41t80.c
+@@ -885,7 +885,6 @@ static int m41t80_probe(struct i2c_client *client,
+ {
+ 	struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ 	int rc = 0;
+-	struct rtc_device *rtc = NULL;
+ 	struct rtc_time tm;
+ 	struct m41t80_data *m41t80_data = NULL;
+ 	bool wakeup_source = false;
+@@ -909,6 +908,10 @@ static int m41t80_probe(struct i2c_client *client,
+ 		m41t80_data->features = id->driver_data;
+ 	i2c_set_clientdata(client, m41t80_data);
+ 
++	m41t80_data->rtc =  devm_rtc_allocate_device(&client->dev);
++	if (IS_ERR(m41t80_data->rtc))
++		return PTR_ERR(m41t80_data->rtc);
++
+ #ifdef CONFIG_OF
+ 	wakeup_source = of_property_read_bool(client->dev.of_node,
+ 					      "wakeup-source");
+@@ -932,15 +935,11 @@ static int m41t80_probe(struct i2c_client *client,
+ 		device_init_wakeup(&client->dev, true);
+ 	}
+ 
+-	rtc = devm_rtc_device_register(&client->dev, client->name,
+-				       &m41t80_rtc_ops, THIS_MODULE);
+-	if (IS_ERR(rtc))
+-		return PTR_ERR(rtc);
++	m41t80_data->rtc->ops = &m41t80_rtc_ops;
+ 
+-	m41t80_data->rtc = rtc;
+ 	if (client->irq <= 0) {
+ 		/* We cannot support UIE mode if we do not have an IRQ line */
+-		rtc->uie_unsupported = 1;
++		m41t80_data->rtc->uie_unsupported = 1;
+ 	}
+ 
+ 	/* Make sure HT (Halt Update) bit is cleared */
+@@ -993,6 +992,11 @@ static int m41t80_probe(struct i2c_client *client,
+ 	if (m41t80_data->features & M41T80_FEATURE_SQ)
+ 		m41t80_sqw_register_clk(m41t80_data);
+ #endif
++
++	rc = rtc_register_device(m41t80_data->rtc);
++	if (rc)
++		return rc;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c
+index 35c9aada07c8..79c8da54e922 100644
+--- a/drivers/rtc/rtc-rk808.c
++++ b/drivers/rtc/rtc-rk808.c
+@@ -416,12 +416,11 @@ static int rk808_rtc_probe(struct platform_device *pdev)
+ 
+ 	device_init_wakeup(&pdev->dev, 1);
+ 
+-	rk808_rtc->rtc = devm_rtc_device_register(&pdev->dev, "rk808-rtc",
+-						  &rk808_rtc_ops, THIS_MODULE);
+-	if (IS_ERR(rk808_rtc->rtc)) {
+-		ret = PTR_ERR(rk808_rtc->rtc);
+-		return ret;
+-	}
++	rk808_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
++	if (IS_ERR(rk808_rtc->rtc))
++		return PTR_ERR(rk808_rtc->rtc);
++
++	rk808_rtc->rtc->ops = &rk808_rtc_ops;
+ 
+ 	rk808_rtc->irq = platform_get_irq(pdev, 0);
+ 	if (rk808_rtc->irq < 0) {
+@@ -438,9 +437,10 @@ static int rk808_rtc_probe(struct platform_device *pdev)
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "Failed to request alarm IRQ %d: %d\n",
+ 			rk808_rtc->irq, ret);
++		return ret;
+ 	}
+ 
+-	return ret;
++	return rtc_register_device(rk808_rtc->rtc);
+ }
+ 
+ static struct platform_driver rk808_rtc_driver = {
+diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
+index 026035373ae6..38a12435b5a0 100644
+--- a/drivers/rtc/rtc-rp5c01.c
++++ b/drivers/rtc/rtc-rp5c01.c
+@@ -249,16 +249,24 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev)
+ 
+ 	platform_set_drvdata(dev, priv);
+ 
+-	rtc = devm_rtc_device_register(&dev->dev, "rtc-rp5c01", &rp5c01_rtc_ops,
+-				  THIS_MODULE);
++	rtc = devm_rtc_allocate_device(&dev->dev);
+ 	if (IS_ERR(rtc))
+ 		return PTR_ERR(rtc);
++
++	rtc->ops = &rp5c01_rtc_ops;
++
+ 	priv->rtc = rtc;
+ 
+ 	error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr);
+ 	if (error)
+ 		return error;
+ 
++	error = rtc_register_device(rtc);
++	if (error) {
++		sysfs_remove_bin_file(&dev->dev.kobj, &priv->nvram_attr);
++		return error;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
+index d8ef9e052c4f..9af591d5223c 100644
+--- a/drivers/rtc/rtc-snvs.c
++++ b/drivers/rtc/rtc-snvs.c
+@@ -132,20 +132,23 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ {
+ 	struct snvs_rtc_data *data = dev_get_drvdata(dev);
+ 	unsigned long time;
++	int ret;
+ 
+ 	rtc_tm_to_time(tm, &time);
+ 
+ 	/* Disable RTC first */
+-	snvs_rtc_enable(data, false);
++	ret = snvs_rtc_enable(data, false);
++	if (ret)
++		return ret;
+ 
+ 	/* Write 32-bit time to 47-bit timer, leaving 15 LSBs blank */
+ 	regmap_write(data->regmap, data->offset + SNVS_LPSRTCLR, time << CNTR_TO_SECS_SH);
+ 	regmap_write(data->regmap, data->offset + SNVS_LPSRTCMR, time >> (32 - CNTR_TO_SECS_SH));
+ 
+ 	/* Enable RTC again */
+-	snvs_rtc_enable(data, true);
++	ret = snvs_rtc_enable(data, true);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+@@ -288,7 +291,11 @@ static int snvs_rtc_probe(struct platform_device *pdev)
+ 	regmap_write(data->regmap, data->offset + SNVS_LPSR, 0xffffffff);
+ 
+ 	/* Enable RTC */
+-	snvs_rtc_enable(data, true);
++	ret = snvs_rtc_enable(data, true);
++	if (ret) {
++		dev_err(&pdev->dev, "failed to enable rtc %d\n", ret);
++		goto error_rtc_device_register;
++	}
+ 
+ 	device_init_wakeup(&pdev->dev, true);
+ 
+diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
+index 560d9a5e0225..a9528083061d 100644
+--- a/drivers/rtc/rtc-tx4939.c
++++ b/drivers/rtc/rtc-tx4939.c
+@@ -86,7 +86,8 @@ static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ 	for (i = 2; i < 6; i++)
+ 		buf[i] = __raw_readl(&rtcreg->dat);
+ 	spin_unlock_irq(&pdata->lock);
+-	sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
++	sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
++		(buf[3] << 8) | buf[2];
+ 	rtc_time_to_tm(sec, tm);
+ 	return rtc_valid_tm(tm);
+ }
+@@ -147,7 +148,8 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+ 	alrm->enabled = (ctl & TX4939_RTCCTL_ALME) ? 1 : 0;
+ 	alrm->pending = (ctl & TX4939_RTCCTL_ALMD) ? 1 : 0;
+ 	spin_unlock_irq(&pdata->lock);
+-	sec = (buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2];
++	sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) |
++		(buf[3] << 8) | buf[2];
+ 	rtc_time_to_tm(sec, &alrm->time);
+ 	return rtc_valid_tm(&alrm->time);
+ }
+diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
+index a8b831000b2d..18c4f933e8b9 100644
+--- a/drivers/s390/scsi/zfcp_dbf.c
++++ b/drivers/s390/scsi/zfcp_dbf.c
+@@ -4,7 +4,7 @@
+  *
+  * Debug traces for zfcp.
+  *
+- * Copyright IBM Corp. 2002, 2017
++ * Copyright IBM Corp. 2002, 2018
+  */
+ 
+ #define KMSG_COMPONENT "zfcp"
+@@ -308,6 +308,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
+ 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
+ }
+ 
++/**
++ * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
++ * @tag: identifier for event
++ * @adapter: adapter on which the erp_action should run
++ * @port: remote port involved in the erp_action
++ * @sdev: scsi device involved in the erp_action
++ * @want: wanted erp_action
++ * @need: required erp_action
++ *
++ * The adapter->erp_lock must not be held.
++ */
++void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
++			    struct zfcp_port *port, struct scsi_device *sdev,
++			    u8 want, u8 need)
++{
++	unsigned long flags;
++
++	read_lock_irqsave(&adapter->erp_lock, flags);
++	zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
++	read_unlock_irqrestore(&adapter->erp_lock, flags);
++}
+ 
+ /**
+  * zfcp_dbf_rec_run_lvl - trace event related to running recovery
+diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
+index bf8ea4df2bb8..e5eed8aac0ce 100644
+--- a/drivers/s390/scsi/zfcp_ext.h
++++ b/drivers/s390/scsi/zfcp_ext.h
+@@ -4,7 +4,7 @@
+  *
+  * External function declarations.
+  *
+- * Copyright IBM Corp. 2002, 2016
++ * Copyright IBM Corp. 2002, 2018
+  */
+ 
+ #ifndef ZFCP_EXT_H
+@@ -35,6 +35,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
+ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
+ extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
+ 			      struct zfcp_port *, struct scsi_device *, u8, u8);
++extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
++				   struct zfcp_port *port,
++				   struct scsi_device *sdev, u8 want, u8 need);
+ extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+ extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
+ 				 struct zfcp_erp_action *erp);
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index 4d2ba5682493..22f9562f415c 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -4,7 +4,7 @@
+  *
+  * Interface to Linux SCSI midlayer.
+  *
+- * Copyright IBM Corp. 2002, 2017
++ * Copyright IBM Corp. 2002, 2018
+  */
+ 
+ #define KMSG_COMPONENT "zfcp"
+@@ -618,9 +618,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
+ 	ids.port_id = port->d_id;
+ 	ids.roles = FC_RPORT_ROLE_FCP_TARGET;
+ 
+-	zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
+-			  ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+-			  ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
++	zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
++			       ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
++			       ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
+ 	rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
+ 	if (!rport) {
+ 		dev_err(&port->adapter->ccw_device->dev,
+@@ -642,9 +642,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
+ 	struct fc_rport *rport = port->rport;
+ 
+ 	if (rport) {
+-		zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
+-				  ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+-				  ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
++		zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
++				       ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
++				       ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
+ 		fc_remote_port_delete(rport);
+ 		port->rport = NULL;
+ 	}
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index d9f2229664ad..d62ddd63f4fe 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -1502,9 +1502,10 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
+ 	host = aac->scsi_host_ptr;
+ 	scsi_block_requests(host);
+ 	aac_adapter_disable_int(aac);
+-	if (aac->thread->pid != current->pid) {
++	if (aac->thread && aac->thread->pid != current->pid) {
+ 		spin_unlock_irq(host->host_lock);
+ 		kthread_stop(aac->thread);
++		aac->thread = NULL;
+ 		jafo = 1;
+ 	}
+ 
+@@ -1591,6 +1592,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
+ 					  aac->name);
+ 		if (IS_ERR(aac->thread)) {
+ 			retval = PTR_ERR(aac->thread);
++			aac->thread = NULL;
+ 			goto out;
+ 		}
+ 	}
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index 2664ea0df35f..f24fb942065d 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -1562,6 +1562,7 @@ static void __aac_shutdown(struct aac_dev * aac)
+ 				up(&fib->event_wait);
+ 		}
+ 		kthread_stop(aac->thread);
++		aac->thread = NULL;
+ 	}
+ 
+ 	aac_send_shutdown(aac);
+diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
+index ac77081e6e9e..b07612562c39 100644
+--- a/drivers/scsi/lpfc/lpfc_attr.c
++++ b/drivers/scsi/lpfc/lpfc_attr.c
+@@ -905,7 +905,12 @@ lpfc_issue_lip(struct Scsi_Host *shost)
+ 	LPFC_MBOXQ_t *pmboxq;
+ 	int mbxstatus = MBXERR_ERROR;
+ 
++	/*
++	 * If the link is offline, disabled or BLOCK_MGMT_IO
++	 * it doesn't make any sense to allow issue_lip
++	 */
+ 	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
++	    (phba->hba_flag & LINK_DISABLED) ||
+ 	    (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
+ 		return -EPERM;
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index b159a5c4e388..9265906d956e 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -696,8 +696,9 @@ lpfc_work_done(struct lpfc_hba *phba)
+ 		      phba->hba_flag & HBA_SP_QUEUE_EVT)) {
+ 		if (pring->flag & LPFC_STOP_IOCB_EVENT) {
+ 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
+-			/* Set the lpfc data pending flag */
+-			set_bit(LPFC_DATA_READY, &phba->data_flags);
++			/* Preserve legacy behavior. */
++			if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
++				set_bit(LPFC_DATA_READY, &phba->data_flags);
+ 		} else {
+ 			if (phba->link_state >= LPFC_LINK_UP ||
+ 			    phba->link_flag & LS_MDS_LOOPBACK) {
+diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
+index d841aa42f607..730393a65e25 100644
+--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
++++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
+@@ -1998,8 +1998,14 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 			ndlp->nlp_type |= NLP_NVME_TARGET;
+ 			if (bf_get_be32(prli_disc, nvpr))
+ 				ndlp->nlp_type |= NLP_NVME_DISCOVERY;
++
++			/*
++			 * If prli_fba is set, the Target supports FirstBurst.
++			 * If prli_fb_sz is 0, the FirstBurst size is unlimited,
++			 * otherwise it defines the actual size supported by
++			 * the NVME Target.
++			 */
+ 			if ((bf_get_be32(prli_fba, nvpr) == 1) &&
+-			    (bf_get_be32(prli_fb_sz, nvpr) > 0) &&
+ 			    (phba->cfg_nvme_enable_fb) &&
+ 			    (!phba->nvmet_support)) {
+ 				/* Both sides support FB. The target's first
+@@ -2008,6 +2014,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ 				ndlp->nlp_flag |= NLP_FIRSTBURST;
+ 				ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz,
+ 								 nvpr);
++
++				/* Expressed in units of 512 bytes */
++				if (ndlp->nvme_fb_size)
++					ndlp->nvme_fb_size <<=
++						LPFC_NVME_FB_SHIFT;
++				else
++					ndlp->nvme_fb_size = LPFC_NVME_MAX_FB;
+ 			}
+ 		}
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
+index 81e3a4f10c3c..6327f858c4c8 100644
+--- a/drivers/scsi/lpfc/lpfc_nvme.c
++++ b/drivers/scsi/lpfc/lpfc_nvme.c
+@@ -241,10 +241,11 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ 	ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ 			 "6047 nvme cmpl Enter "
+-			 "Data %p DID %x Xri: %x status %x cmd:%p lsreg:%p "
+-			 "bmp:%p ndlp:%p\n",
++			 "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
++			 "lsreg:%p bmp:%p ndlp:%p\n",
+ 			 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
+ 			 cmdwqe->sli4_xritag, status,
++			 (wcqe->parameter & 0xffff),
+ 			 cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
+ 
+ 	lpfc_nvmeio_data(phba, "NVME LS  CMPL: xri x%x stat x%x parm x%x\n",
+@@ -419,6 +420,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
+ {
+ 	int ret = 0;
+ 	struct lpfc_nvme_lport *lport;
++	struct lpfc_nvme_rport *rport;
+ 	struct lpfc_vport *vport;
+ 	struct lpfc_nodelist *ndlp;
+ 	struct ulp_bde64 *bpl;
+@@ -437,19 +439,18 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
+ 	 */
+ 
+ 	lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
++	rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+ 	vport = lport->vport;
+ 
+ 	if (vport->load_flag & FC_UNLOADING)
+ 		return -ENODEV;
+ 
+-	if (vport->load_flag & FC_UNLOADING)
+-		return -ENODEV;
+-
+-	ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
++	/* Need the ndlp.  It is stored in the driver's rport. */
++	ndlp = rport->ndlp;
+ 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+-				 "6051 DID x%06x not an active rport.\n",
+-				 pnvme_rport->port_id);
++				 "6051 Remoteport %p, rport has invalid ndlp. "
++				 "Failing LS Req\n", pnvme_rport);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -500,8 +501,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
+ 
+ 	/* Expand print to include key fields. */
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+-			 "6149 ENTER.  lport %p, rport %p lsreq%p rqstlen:%d "
+-			 "rsplen:%d %pad %pad\n",
++			 "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
++			 "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
++			 ndlp->nlp_DID,
+ 			 pnvme_lport, pnvme_rport,
+ 			 pnvme_lsreq, pnvme_lsreq->rqstlen,
+ 			 pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
+@@ -517,7 +519,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
+ 				ndlp, 2, 30, 0);
+ 	if (ret != WQE_SUCCESS) {
+ 		atomic_inc(&lport->xmt_ls_err);
+-		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
++		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ 				 "6052 EXIT. issue ls wqe failed lport %p, "
+ 				 "rport %p lsreq%p Status %x DID %x\n",
+ 				 pnvme_lport, pnvme_rport, pnvme_lsreq,
+@@ -980,14 +982,14 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ 			phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
+ 	}
+ #endif
+-	freqpriv = nCmd->private;
+-	freqpriv->nvme_buf = NULL;
+ 
+ 	/* NVME targets need completion held off until the abort exchange
+ 	 * completes unless the NVME Rport is getting unregistered.
+ 	 */
+ 
+ 	if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
++		freqpriv = nCmd->private;
++		freqpriv->nvme_buf = NULL;
+ 		nCmd->done(nCmd);
+ 		lpfc_ncmd->nvmeCmd = NULL;
+ 	}
+diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
+index e79f8f75758c..48b0229ebc99 100644
+--- a/drivers/scsi/lpfc/lpfc_nvme.h
++++ b/drivers/scsi/lpfc/lpfc_nvme.h
+@@ -27,6 +27,8 @@
+ 
+ #define LPFC_NVME_WAIT_TMO              10
+ #define LPFC_NVME_EXPEDITE_XRICNT	8
++#define LPFC_NVME_FB_SHIFT		9
++#define LPFC_NVME_MAX_FB		(1 << 20)	/* 1M */
+ 
+ struct lpfc_nvme_qhandle {
+ 	uint32_t index;		/* WQ index to use */
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 5f5528a12308..149f21f53b13 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -129,6 +129,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
+ 	/* set consumption flag every once in a while */
+ 	if (!((q->host_index + 1) % q->entry_repost))
+ 		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
++	else
++		bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
+ 	if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
+ 		bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
+ 	lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
+diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
+index 7de5d8d75480..eb5471bc7263 100644
+--- a/drivers/scsi/mvsas/mv_94xx.c
++++ b/drivers/scsi/mvsas/mv_94xx.c
+@@ -1080,16 +1080,16 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv,
+ 			void __iomem *regs = mvi->regs_ex - 0x10200;
+ 
+ 			int drive = (i/3) & (4-1); /* drive number on host */
+-			u32 block = mr32(MVS_SGPIO_DCTRL +
++			int driveshift = drive * 8; /* bit offset of drive */
++			u32 block = ioread32be(regs + MVS_SGPIO_DCTRL +
+ 				MVS_SGPIO_HOST_OFFSET * mvi->id);
+ 
+-
+ 			/*
+ 			* if bit is set then create a mask with the first
+ 			* bit of the drive set in the mask ...
+ 			*/
+-			u32 bit = (write_data[i/8] & (1 << (i&(8-1)))) ?
+-				1<<(24-drive*8) : 0;
++			u32 bit = get_unaligned_be32(write_data) & (1 << i) ?
++				1 << driveshift : 0;
+ 
+ 			/*
+ 			* ... and then shift it to the right position based
+@@ -1098,26 +1098,27 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv,
+ 			switch (i%3) {
+ 			case 0: /* activity */
+ 				block &= ~((0x7 << MVS_SGPIO_DCTRL_ACT_SHIFT)
+-					<< (24-drive*8));
++					<< driveshift);
+ 					/* hardwire activity bit to SOF */
+ 				block |= LED_BLINKA_SOF << (
+ 					MVS_SGPIO_DCTRL_ACT_SHIFT +
+-					(24-drive*8));
++					driveshift);
+ 				break;
+ 			case 1: /* id */
+ 				block &= ~((0x3 << MVS_SGPIO_DCTRL_LOC_SHIFT)
+-					<< (24-drive*8));
++					<< driveshift);
+ 				block |= bit << MVS_SGPIO_DCTRL_LOC_SHIFT;
+ 				break;
+ 			case 2: /* fail */
+ 				block &= ~((0x7 << MVS_SGPIO_DCTRL_ERR_SHIFT)
+-					<< (24-drive*8));
++					<< driveshift);
+ 				block |= bit << MVS_SGPIO_DCTRL_ERR_SHIFT;
+ 				break;
+ 			}
+ 
+-			mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id,
+-				block);
++			iowrite32be(block,
++				regs + MVS_SGPIO_DCTRL +
++				MVS_SGPIO_HOST_OFFSET * mvi->id);
+ 
+ 		}
+ 
+@@ -1132,7 +1133,7 @@ static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv,
+ 			void __iomem *regs = mvi->regs_ex - 0x10200;
+ 
+ 			mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id,
+-				be32_to_cpu(((u32 *) write_data)[i]));
++				((u32 *) write_data)[i]);
+ 		}
+ 		return reg_count;
+ 	}
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index f3b117246d47..4a2d276c42eb 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -189,6 +189,7 @@ static struct {
+ 	{"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
+ 	{"HP", "DF400", "*", BLIST_REPORTLUN2},
+ 	{"HP", "DF500", "*", BLIST_REPORTLUN2},
++	{"HP", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2},
+ 	{"HP", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ 	{"HP", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ 	{"HP", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 912eacdc2d83..e93e9178978c 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -856,6 +856,17 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+ 		/* for passthrough error may be set */
+ 		error = BLK_STS_OK;
+ 	}
++	/*
++	 * Another corner case: the SCSI status byte is non-zero but 'good'.
++	 * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when
++	 * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD
++	 * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related
++	 * intermediate statuses (both obsolete in SAM-4) as good.
++	 */
++	if (status_byte(result) && scsi_status_is_good(result)) {
++		result = 0;
++		error = BLK_STS_OK;
++	}
+ 
+ 	/*
+ 	 * special case: failed zero length commands always need to
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index c198b96368dd..5c40d809830f 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1894,7 +1894,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
+ 		num = (rem_sz > scatter_elem_sz_prev) ?
+ 			scatter_elem_sz_prev : rem_sz;
+ 
+-		schp->pages[k] = alloc_pages(gfp_mask, order);
++		schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
+ 		if (!schp->pages[k])
+ 			goto out;
+ 
+diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+index 2817e67df3d5..98a51521d853 100644
+--- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
++++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c
+@@ -324,7 +324,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch)
+ 		}
+ 
+ 		fd = dpaa2_dq_fd(dq);
+-		fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq);
++		fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
+ 		fq->stats.frames++;
+ 
+ 		fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
+@@ -374,12 +374,14 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
+ 	/* Prepare the HW SGT structure */
+ 	sgt_buf_size = priv->tx_data_offset +
+ 		       sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs);
+-	sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC);
++	sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
+ 	if (unlikely(!sgt_buf)) {
+ 		err = -ENOMEM;
+ 		goto sgt_buf_alloc_failed;
+ 	}
+ 	sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
++	memset(sgt_buf, 0, sgt_buf_size);
++
+ 	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+ 
+ 	/* Fill in the HW SGT structure.
+@@ -421,7 +423,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
+ 	return 0;
+ 
+ dma_map_single_failed:
+-	kfree(sgt_buf);
++	skb_free_frag(sgt_buf);
+ sgt_buf_alloc_failed:
+ 	dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
+ dma_map_sg_failed:
+@@ -525,9 +527,9 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
+ 		return;
+ 	}
+ 
+-	/* Free SGT buffer kmalloc'ed on tx */
++	/* Free SGT buffer allocated on tx */
+ 	if (fd_format != dpaa2_fd_single)
+-		kfree(skbh);
++		skb_free_frag(skbh);
+ 
+ 	/* Move on with skb release */
+ 	dev_kfree_skb(skb);
+@@ -1906,7 +1908,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
+ 	queue.destination.id = fq->channel->dpcon_id;
+ 	queue.destination.type = DPNI_DEST_DPCON;
+ 	queue.destination.priority = 1;
+-	queue.user_context = (u64)fq;
++	queue.user_context = (u64)(uintptr_t)fq;
+ 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+ 			     DPNI_QUEUE_RX, 0, fq->flowid,
+ 			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+@@ -1958,7 +1960,7 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
+ 	queue.destination.id = fq->channel->dpcon_id;
+ 	queue.destination.type = DPNI_DEST_DPCON;
+ 	queue.destination.priority = 0;
+-	queue.user_context = (u64)fq;
++	queue.user_context = (u64)(uintptr_t)fq;
+ 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+ 			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
+ 			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
+diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
+index 975dbbb3abd0..7da3eb4ca4be 100644
+--- a/drivers/staging/ks7010/ks_hostif.c
++++ b/drivers/staging/ks7010/ks_hostif.c
+@@ -242,9 +242,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
+ 	offset = 0;
+ 
+ 	while (bsize > offset) {
+-		/* DPRINTK(4, "Element ID=%d\n",*bp); */
+-		switch (*bp) {
+-		case 0:	/* ssid */
++		switch (*bp) { /* Information Element ID */
++		case WLAN_EID_SSID:
+ 			if (*(bp + 1) <= SSID_MAX_SIZE) {
+ 				ap->ssid.size = *(bp + 1);
+ 			} else {
+@@ -254,8 +253,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
+ 			}
+ 			memcpy(ap->ssid.body, bp + 2, ap->ssid.size);
+ 			break;
+-		case 1:	/* rate */
+-		case 50:	/* ext rate */
++		case WLAN_EID_SUPP_RATES:
++		case WLAN_EID_EXT_SUPP_RATES:
+ 			if ((*(bp + 1) + ap->rate_set.size) <=
+ 			    RATE_SET_MAX_SIZE) {
+ 				memcpy(&ap->rate_set.body[ap->rate_set.size],
+@@ -271,9 +270,9 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
+ 				    (RATE_SET_MAX_SIZE - ap->rate_set.size);
+ 			}
+ 			break;
+-		case 3:	/* DS parameter */
++		case WLAN_EID_DS_PARAMS:
+ 			break;
+-		case 48:	/* RSN(WPA2) */
++		case WLAN_EID_RSN:
+ 			ap->rsn_ie.id = *bp;
+ 			if (*(bp + 1) <= RSN_IE_BODY_MAX) {
+ 				ap->rsn_ie.size = *(bp + 1);
+@@ -284,8 +283,8 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
+ 			}
+ 			memcpy(ap->rsn_ie.body, bp + 2, ap->rsn_ie.size);
+ 			break;
+-		case 221:	/* WPA */
+-			if (memcmp(bp + 2, "\x00\x50\xf2\x01", 4) == 0) {	/* WPA OUI check */
++		case WLAN_EID_VENDOR_SPECIFIC: /* WPA */
++			if (memcmp(bp + 2, "\x00\x50\xf2\x01", 4) == 0) { /* WPA OUI check */
+ 				ap->wpa_ie.id = *bp;
+ 				if (*(bp + 1) <= RSN_IE_BODY_MAX) {
+ 					ap->wpa_ie.size = *(bp + 1);
+@@ -300,18 +299,18 @@ int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
+ 			}
+ 			break;
+ 
+-		case 2:	/* FH parameter */
+-		case 4:	/* CF parameter */
+-		case 5:	/* TIM */
+-		case 6:	/* IBSS parameter */
+-		case 7:	/* Country */
+-		case 42:	/* ERP information */
+-		case 47:	/* Reserve ID 47 Broadcom AP */
++		case WLAN_EID_FH_PARAMS:
++		case WLAN_EID_CF_PARAMS:
++		case WLAN_EID_TIM:
++		case WLAN_EID_IBSS_PARAMS:
++		case WLAN_EID_COUNTRY:
++		case WLAN_EID_ERP_INFO:
+ 			break;
+ 		default:
+ 			DPRINTK(4, "unknown Element ID=%d\n", *bp);
+ 			break;
+ 		}
++
+ 		offset += 2;	/* id & size field */
+ 		offset += *(bp + 1);	/* +size offset */
+ 		bp += (*(bp + 1) + 2);	/* pointer update */
+diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h
+index 5bae8d468e23..9ac317e4b507 100644
+--- a/drivers/staging/ks7010/ks_hostif.h
++++ b/drivers/staging/ks7010/ks_hostif.h
+@@ -13,6 +13,7 @@
+ #define _KS_HOSTIF_H_
+ 
+ #include <linux/compiler.h>
++#include <linux/ieee80211.h>
+ 
+ /*
+  * HOST-MAC I/F events
+diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
+index 4368f4e9f208..f1233ca7d337 100644
+--- a/drivers/staging/lustre/lustre/include/obd.h
++++ b/drivers/staging/lustre/lustre/include/obd.h
+@@ -191,7 +191,7 @@ struct client_obd {
+ 	struct sptlrpc_flavor    cl_flvr_mgc;   /* fixed flavor of mgc->mgs */
+ 
+ 	/* the grant values are protected by loi_list_lock below */
+-	unsigned long		 cl_dirty_pages;	/* all _dirty_ in pahges */
++	unsigned long		 cl_dirty_pages;	/* all _dirty_ in pages */
+ 	unsigned long		 cl_dirty_max_pages;	/* allowed w/o rpc */
+ 	unsigned long		 cl_dirty_transit;	/* dirty synchronous */
+ 	unsigned long		 cl_avail_grant;	/* bytes of credit for ost */
+diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+index c2c57f65431e..ff9c2f96bada 100644
+--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
++++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+@@ -2695,7 +2695,7 @@ static int lmv_unpackmd(struct obd_export *exp, struct lmv_stripe_md **lsmp,
+ 	if (lsm && !lmm) {
+ 		int i;
+ 
+-		for (i = 1; i < lsm->lsm_md_stripe_count; i++) {
++		for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
+ 			/*
+ 			 * For migrating inode, the master stripe and master
+ 			 * object will be the same, so do not need iput, see
+diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
+index 5767ac2a7d16..a907d956443f 100644
+--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
++++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
+@@ -1530,7 +1530,7 @@ static int osc_enter_cache_try(struct client_obd *cli,
+ 	if (rc < 0)
+ 		return 0;
+ 
+-	if (cli->cl_dirty_pages <= cli->cl_dirty_max_pages &&
++	if (cli->cl_dirty_pages < cli->cl_dirty_max_pages &&
+ 	    atomic_long_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
+ 		osc_consume_write_grant(cli, &oap->oap_brw_page);
+ 		if (transient) {
+diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
+index 3c300f7b6a62..d607c59761cf 100644
+--- a/drivers/staging/rtl8192u/r8192U_core.c
++++ b/drivers/staging/rtl8192u/r8192U_core.c
+@@ -1706,6 +1706,8 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
+ 
+ 		priv->rx_urb[16] = usb_alloc_urb(0, GFP_KERNEL);
+ 		priv->oldaddr = kmalloc(16, GFP_KERNEL);
++		if (!priv->oldaddr)
++			return -ENOMEM;
+ 		oldaddr = priv->oldaddr;
+ 		align = ((long)oldaddr) & 3;
+ 		if (align) {
+diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
+index 045d577fe4f8..0ed21dd08170 100644
+--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
++++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
+@@ -25,6 +25,10 @@ MODULE_PARM_DESC(enable_compat_alsa,
+ static void snd_devm_unregister_child(struct device *dev, void *res)
+ {
+ 	struct device *childdev = *(struct device **)res;
++	struct bcm2835_chip *chip = dev_get_drvdata(childdev);
++	struct snd_card *card = chip->card;
++
++	snd_card_free(card);
+ 
+ 	device_unregister(childdev);
+ }
+@@ -50,6 +54,13 @@ static int snd_devm_add_child(struct device *dev, struct device *child)
+ 	return 0;
+ }
+ 
++static void snd_bcm2835_release(struct device *dev)
++{
++	struct bcm2835_chip *chip = dev_get_drvdata(dev);
++
++	kfree(chip);
++}
++
+ static struct device *
+ snd_create_device(struct device *parent,
+ 		  struct device_driver *driver,
+@@ -65,6 +76,7 @@ snd_create_device(struct device *parent,
+ 	device_initialize(device);
+ 	device->parent = parent;
+ 	device->driver = driver;
++	device->release = snd_bcm2835_release;
+ 
+ 	dev_set_name(device, "%s", name);
+ 
+@@ -75,18 +87,19 @@ snd_create_device(struct device *parent,
+ 	return device;
+ }
+ 
+-static int snd_bcm2835_free(struct bcm2835_chip *chip)
+-{
+-	kfree(chip);
+-	return 0;
+-}
+-
+ /* component-destructor
+  * (see "Management of Cards and Components")
+  */
+ static int snd_bcm2835_dev_free(struct snd_device *device)
+ {
+-	return snd_bcm2835_free(device->device_data);
++	struct bcm2835_chip *chip = device->device_data;
++	struct snd_card *card = chip->card;
++
++	/* TODO: free pcm, ctl */
++
++	snd_device_free(card, chip);
++
++	return 0;
+ }
+ 
+ /* chip-specific constructor
+@@ -111,7 +124,7 @@ static int snd_bcm2835_create(struct snd_card *card,
+ 
+ 	err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ 	if (err) {
+-		snd_bcm2835_free(chip);
++		kfree(chip);
+ 		return err;
+ 	}
+ 
+@@ -119,31 +132,14 @@ static int snd_bcm2835_create(struct snd_card *card,
+ 	return 0;
+ }
+ 
+-static void snd_devm_card_free(struct device *dev, void *res)
++static struct snd_card *snd_bcm2835_card_new(struct device *dev)
+ {
+-	struct snd_card *snd_card = *(struct snd_card **)res;
+-
+-	snd_card_free(snd_card);
+-}
+-
+-static struct snd_card *snd_devm_card_new(struct device *dev)
+-{
+-	struct snd_card **dr;
+ 	struct snd_card *card;
+ 	int ret;
+ 
+-	dr = devres_alloc(snd_devm_card_free, sizeof(*dr), GFP_KERNEL);
+-	if (!dr)
+-		return ERR_PTR(-ENOMEM);
+-
+ 	ret = snd_card_new(dev, -1, NULL, THIS_MODULE, 0, &card);
+-	if (ret) {
+-		devres_free(dr);
++	if (ret)
+ 		return ERR_PTR(ret);
+-	}
+-
+-	*dr = card;
+-	devres_add(dev, dr);
+ 
+ 	return card;
+ }
+@@ -260,7 +256,7 @@ static int snd_add_child_device(struct device *device,
+ 		return PTR_ERR(child);
+ 	}
+ 
+-	card = snd_devm_card_new(child);
++	card = snd_bcm2835_card_new(child);
+ 	if (IS_ERR(card)) {
+ 		dev_err(child, "Failed to create card");
+ 		return PTR_ERR(card);
+@@ -302,7 +298,7 @@ static int snd_add_child_device(struct device *device,
+ 		return err;
+ 	}
+ 
+-	dev_set_drvdata(child, card);
++	dev_set_drvdata(child, chip);
+ 	dev_info(child, "card created with %d channels\n", numchans);
+ 
+ 	return 0;
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 804c1af6fd33..95833cbc4338 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1867,7 +1867,8 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+ 
+ 	status = serial_port_in(port, UART_LSR);
+ 
+-	if (status & (UART_LSR_DR | UART_LSR_BI)) {
++	if (status & (UART_LSR_DR | UART_LSR_BI) &&
++	    iir & UART_IIR_RDI) {
+ 		if (!up->dma || handle_rx_dma(up, iir))
+ 			status = serial8250_rx_chars(up, status);
+ 	}
+diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
+index b88b05f8e81e..ae30398fcf56 100644
+--- a/drivers/tty/serial/altera_uart.c
++++ b/drivers/tty/serial/altera_uart.c
+@@ -327,7 +327,7 @@ static int altera_uart_startup(struct uart_port *port)
+ 
+ 	/* Enable RX interrupts now */
+ 	pp->imr = ALTERA_UART_CONTROL_RRDY_MSK;
+-	writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
++	altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
+ 
+ 	spin_unlock_irqrestore(&port->lock, flags);
+ 
+@@ -343,7 +343,7 @@ static void altera_uart_shutdown(struct uart_port *port)
+ 
+ 	/* Disable all interrupts now */
+ 	pp->imr = 0;
+-	writel(pp->imr, port->membase + ALTERA_UART_CONTROL_REG);
++	altera_uart_writel(port, pp->imr, ALTERA_UART_CONTROL_REG);
+ 
+ 	spin_unlock_irqrestore(&port->lock, flags);
+ 
+@@ -432,7 +432,7 @@ static void altera_uart_console_putc(struct uart_port *port, int c)
+ 		 ALTERA_UART_STATUS_TRDY_MSK))
+ 		cpu_relax();
+ 
+-	writel(c, port->membase + ALTERA_UART_TXDATA_REG);
++	altera_uart_writel(port, c, ALTERA_UART_TXDATA_REG);
+ }
+ 
+ static void altera_uart_console_write(struct console *co, const char *s,
+@@ -502,13 +502,13 @@ static int __init altera_uart_earlycon_setup(struct earlycon_device *dev,
+ 		return -ENODEV;
+ 
+ 	/* Enable RX interrupts now */
+-	writel(ALTERA_UART_CONTROL_RRDY_MSK,
+-	       port->membase + ALTERA_UART_CONTROL_REG);
++	altera_uart_writel(port, ALTERA_UART_CONTROL_RRDY_MSK,
++			   ALTERA_UART_CONTROL_REG);
+ 
+ 	if (dev->baud) {
+ 		unsigned int baudclk = port->uartclk / dev->baud;
+ 
+-		writel(baudclk, port->membase + ALTERA_UART_DIVISOR_REG);
++		altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
+ 	}
+ 
+ 	dev->con->write = altera_uart_earlycon_write;
+diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
+index 2599f9ecccfe..d904a3a345e7 100644
+--- a/drivers/tty/serial/arc_uart.c
++++ b/drivers/tty/serial/arc_uart.c
+@@ -593,6 +593,11 @@ static int arc_serial_probe(struct platform_device *pdev)
+ 	if (dev_id < 0)
+ 		dev_id = 0;
+ 
++	if (dev_id >= ARRAY_SIZE(arc_uart_ports)) {
++		dev_err(&pdev->dev, "serial%d out of range\n", dev_id);
++		return -EINVAL;
++	}
++
+ 	uart = &arc_uart_ports[dev_id];
+ 	port = &uart->port;
+ 
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 8cf112f2efc3..51e47a63d61a 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -2145,6 +2145,10 @@ static int lpuart_probe(struct platform_device *pdev)
+ 		dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+ 		return ret;
+ 	}
++	if (ret >= ARRAY_SIZE(lpuart_ports)) {
++		dev_err(&pdev->dev, "serial%d out of range\n", ret);
++		return -EINVAL;
++	}
+ 	sport->port.line = ret;
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index a33c685af990..961ab7d2add5 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -2042,6 +2042,12 @@ static int serial_imx_probe(struct platform_device *pdev)
+ 	else if (ret < 0)
+ 		return ret;
+ 
++	if (sport->port.line >= ARRAY_SIZE(imx_ports)) {
++		dev_err(&pdev->dev, "serial%d out of range\n",
++			sport->port.line);
++		return -EINVAL;
++	}
++
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	base = devm_ioremap_resource(&pdev->dev, res);
+ 	if (IS_ERR(base))
+diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
+index 03d26aabb0c4..2581461f92bf 100644
+--- a/drivers/tty/serial/mvebu-uart.c
++++ b/drivers/tty/serial/mvebu-uart.c
+@@ -617,7 +617,7 @@ static void wait_for_xmitr(struct uart_port *port)
+ 	u32 val;
+ 
+ 	readl_poll_timeout_atomic(port->membase + UART_STAT, val,
+-				  (val & STAT_TX_EMP), 1, 10000);
++				  (val & STAT_TX_RDY(port)), 1, 10000);
+ }
+ 
+ static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
+diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
+index 079dc47aa142..caa8a41b6e71 100644
+--- a/drivers/tty/serial/mxs-auart.c
++++ b/drivers/tty/serial/mxs-auart.c
+@@ -1663,6 +1663,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
+ 		s->port.line = pdev->id < 0 ? 0 : pdev->id;
+ 	else if (ret < 0)
+ 		return ret;
++	if (s->port.line >= ARRAY_SIZE(auart_port)) {
++		dev_err(&pdev->dev, "serial%d out of range\n", s->port.line);
++		return -EINVAL;
++	}
+ 
+ 	if (of_id) {
+ 		pdev->id_entry = of_id->data;
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index f9fecc5ed0ce..3f2f8c118ce0 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -1818,6 +1818,10 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
+ 
+ 	dbg("s3c24xx_serial_probe(%p) %d\n", pdev, index);
+ 
++	if (index >= ARRAY_SIZE(s3c24xx_serial_ports)) {
++		dev_err(&pdev->dev, "serial%d out of range\n", index);
++		return -EINVAL;
++	}
+ 	ourport = &s3c24xx_serial_ports[index];
+ 
+ 	ourport->drv_data = s3c24xx_get_driver_data(pdev);
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index 44adf9db38f8..ab757546c6db 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -3098,6 +3098,10 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
+ 		dev_err(&pdev->dev, "failed to get alias id (%d)\n", id);
+ 		return NULL;
+ 	}
++	if (id >= ARRAY_SIZE(sci_ports)) {
++		dev_err(&pdev->dev, "serial%d out of range\n", id);
++		return NULL;
++	}
+ 
+ 	sp = &sci_ports[id];
+ 	*dev_id = id;
+diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
+index b9b2bc76bcac..abcb4d09a2d8 100644
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -1110,7 +1110,7 @@ static struct uart_port *cdns_uart_get_port(int id)
+ 	struct uart_port *port;
+ 
+ 	/* Try the given port id if failed use default method */
+-	if (cdns_uart_port[id].mapbase != 0) {
++	if (id < CDNS_UART_NR_PORTS && cdns_uart_port[id].mapbase != 0) {
+ 		/* Find the next unused port */
+ 		for (id = 0; id < CDNS_UART_NR_PORTS; id++)
+ 			if (cdns_uart_port[id].mapbase == 0)
+diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
+index cd77af3b1565..d939b24ae92a 100644
+--- a/drivers/usb/dwc2/core.h
++++ b/drivers/usb/dwc2/core.h
+@@ -217,7 +217,7 @@ struct dwc2_hsotg_ep {
+ 	unsigned char           dir_in;
+ 	unsigned char           index;
+ 	unsigned char           mc;
+-	unsigned char           interval;
++	u16                     interval;
+ 
+ 	unsigned int            halted:1;
+ 	unsigned int            periodic:1;
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index a5d72fcd1603..7ee7320d3c24 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -989,6 +989,24 @@ void dwc2_hc_halt(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
+ 
+ 	if (dbg_hc(chan))
+ 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
++
++	/*
++	 * In buffer DMA or external DMA mode channel can't be halted
++	 * for non-split periodic channels. At the end of the next
++	 * uframe/frame (in the worst case), the core generates a channel
++	 * halted and disables the channel automatically.
++	 */
++	if ((hsotg->params.g_dma && !hsotg->params.g_dma_desc) ||
++	    hsotg->hw_params.arch == GHWCFG2_EXT_DMA_ARCH) {
++		if (!chan->do_split &&
++		    (chan->ep_type == USB_ENDPOINT_XFER_ISOC ||
++		     chan->ep_type == USB_ENDPOINT_XFER_INT)) {
++			dev_err(hsotg->dev, "%s() Channel can't be halted\n",
++				__func__);
++			return;
++		}
++	}
++
+ 	if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS)
+ 		dev_err(hsotg->dev, "!!! halt_status = %d !!!\n", halt_status);
+ 
+@@ -2322,10 +2340,22 @@ static int dwc2_core_init(struct dwc2_hsotg *hsotg, bool initial_setup)
+  */
+ static void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
+ {
+-	u32 hcfg, hfir, otgctl;
++	u32 hcfg, hfir, otgctl, usbcfg;
+ 
+ 	dev_dbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
+ 
++	/* Set HS/FS Timeout Calibration to 7 (max available value).
++	 * The number of PHY clocks that the application programs in
++	 * this field is added to the high/full speed interpacket timeout
++	 * duration in the core to account for any additional delays
++	 * introduced by the PHY. This can be required, because the delay
++	 * introduced by the PHY in generating the linestate condition
++	 * can vary from one PHY to another.
++	 */
++	usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
++	usbcfg |= GUSBCFG_TOUTCAL(7);
++	dwc2_writel(usbcfg, hsotg->regs + GUSBCFG);
++
+ 	/* Restart the Phy Clock */
+ 	dwc2_writel(0, hsotg->regs + PCGCTL);
+ 
+diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
+index 7ac725038f8d..025bc68094fc 100644
+--- a/drivers/usb/dwc3/Makefile
++++ b/drivers/usb/dwc3/Makefile
+@@ -6,7 +6,7 @@ obj-$(CONFIG_USB_DWC3)			+= dwc3.o
+ 
+ dwc3-y					:= core.o
+ 
+-ifneq ($(CONFIG_FTRACE),)
++ifneq ($(CONFIG_TRACING),)
+ 	dwc3-y				+= trace.o
+ endif
+ 
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index df4569df7eaf..ddef1ae0c708 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -232,7 +232,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
+ 	do {
+ 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ 		if (!(reg & DWC3_DCTL_CSFTRST))
+-			return 0;
++			goto done;
+ 
+ 		udelay(1);
+ 	} while (--retries);
+@@ -241,6 +241,17 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
+ 	phy_exit(dwc->usb2_generic_phy);
+ 
+ 	return -ETIMEDOUT;
++
++done:
++	/*
++	 * For DWC_usb31 controller, once DWC3_DCTL_CSFTRST bit is cleared,
++	 * we must wait at least 50ms before accessing the PHY domain
++	 * (synchronization delay). DWC_usb31 programming guide section 1.3.2.
++	 */
++	if (dwc3_is_usb31(dwc))
++		msleep(50);
++
++	return 0;
+ }
+ 
+ /*
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 860d2bc184d1..cdd609930443 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -241,6 +241,8 @@
+ #define DWC3_GUSB3PIPECTL_TX_DEEPH(n)	((n) << 1)
+ 
+ /* Global TX Fifo Size Register */
++#define DWC31_GTXFIFOSIZ_TXFRAMNUM	BIT(15)		/* DWC_usb31 only */
++#define DWC31_GTXFIFOSIZ_TXFDEF(n)	((n) & 0x7fff)	/* DWC_usb31 only */
+ #define DWC3_GTXFIFOSIZ_TXFDEF(n)	((n) & 0xffff)
+ #define DWC3_GTXFIFOSIZ_TXFSTADDR(n)	((n) & 0xffff0000)
+ 
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 77c7ecca816a..b8b629c615d3 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -1422,7 +1422,7 @@ static int count_ext_compat(struct usb_configuration *c)
+ 	return res;
+ }
+ 
+-static void fill_ext_compat(struct usb_configuration *c, u8 *buf)
++static int fill_ext_compat(struct usb_configuration *c, u8 *buf)
+ {
+ 	int i, count;
+ 
+@@ -1449,10 +1449,12 @@ static void fill_ext_compat(struct usb_configuration *c, u8 *buf)
+ 				buf += 23;
+ 			}
+ 			count += 24;
+-			if (count >= 4096)
+-				return;
++			if (count + 24 >= USB_COMP_EP0_OS_DESC_BUFSIZ)
++				return count;
+ 		}
+ 	}
++
++	return count;
+ }
+ 
+ static int count_ext_prop(struct usb_configuration *c, int interface)
+@@ -1497,25 +1499,20 @@ static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf)
+ 	struct usb_os_desc *d;
+ 	struct usb_os_desc_ext_prop *ext_prop;
+ 	int j, count, n, ret;
+-	u8 *start = buf;
+ 
+ 	f = c->interface[interface];
++	count = 10; /* header length */
+ 	for (j = 0; j < f->os_desc_n; ++j) {
+ 		if (interface != f->os_desc_table[j].if_id)
+ 			continue;
+ 		d = f->os_desc_table[j].os_desc;
+ 		if (d)
+ 			list_for_each_entry(ext_prop, &d->ext_prop, entry) {
+-				/* 4kB minus header length */
+-				n = buf - start;
+-				if (n >= 4086)
+-					return 0;
+-
+-				count = ext_prop->data_len +
++				n = ext_prop->data_len +
+ 					ext_prop->name_len + 14;
+-				if (count > 4086 - n)
+-					return -EINVAL;
+-				usb_ext_prop_put_size(buf, count);
++				if (count + n >= USB_COMP_EP0_OS_DESC_BUFSIZ)
++					return count;
++				usb_ext_prop_put_size(buf, n);
+ 				usb_ext_prop_put_type(buf, ext_prop->type);
+ 				ret = usb_ext_prop_put_name(buf, ext_prop->name,
+ 							    ext_prop->name_len);
+@@ -1541,11 +1538,12 @@ static int fill_ext_prop(struct usb_configuration *c, int interface, u8 *buf)
+ 				default:
+ 					return -EINVAL;
+ 				}
+-				buf += count;
++				buf += n;
++				count += n;
+ 			}
+ 	}
+ 
+-	return 0;
++	return count;
+ }
+ 
+ /*
+@@ -1827,6 +1825,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 			req->complete = composite_setup_complete;
+ 			buf = req->buf;
+ 			os_desc_cfg = cdev->os_desc_config;
++			w_length = min_t(u16, w_length, USB_COMP_EP0_OS_DESC_BUFSIZ);
+ 			memset(buf, 0, w_length);
+ 			buf[5] = 0x01;
+ 			switch (ctrl->bRequestType & USB_RECIP_MASK) {
+@@ -1850,8 +1849,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 					count += 16; /* header */
+ 					put_unaligned_le32(count, buf);
+ 					buf += 16;
+-					fill_ext_compat(os_desc_cfg, buf);
+-					value = w_length;
++					value = fill_ext_compat(os_desc_cfg, buf);
++					value = min_t(u16, w_length, value);
+ 				}
+ 				break;
+ 			case USB_RECIP_INTERFACE:
+@@ -1880,8 +1879,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 							      interface, buf);
+ 					if (value < 0)
+ 						return value;
+-
+-					value = w_length;
++					value = min_t(u16, w_length, value);
+ 				}
+ 				break;
+ 			}
+@@ -2156,8 +2154,8 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
+ 		goto end;
+ 	}
+ 
+-	/* OS feature descriptor length <= 4kB */
+-	cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
++	cdev->os_desc_req->buf = kmalloc(USB_COMP_EP0_OS_DESC_BUFSIZ,
++					 GFP_KERNEL);
+ 	if (!cdev->os_desc_req->buf) {
+ 		ret = -ENOMEM;
+ 		usb_ep_free_request(ep0, cdev->os_desc_req);
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index d2428a9e8900..0294e4f18873 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -758,9 +758,13 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ 	bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
+ 
+ 	if (io_data->read && ret > 0) {
++		mm_segment_t oldfs = get_fs();
++
++		set_fs(USER_DS);
+ 		use_mm(io_data->mm);
+ 		ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
+ 		unuse_mm(io_data->mm);
++		set_fs(oldfs);
+ 	}
+ 
+ 	io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
+@@ -3238,7 +3242,7 @@ static int ffs_func_setup(struct usb_function *f,
+ 	__ffs_event_add(ffs, FUNCTIONFS_SETUP);
+ 	spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+ 
+-	return 0;
++	return USB_GADGET_DELAYED_STATUS;
+ }
+ 
+ static bool ffs_func_req_match(struct usb_function *f,
+diff --git a/drivers/usb/gadget/udc/goku_udc.h b/drivers/usb/gadget/udc/goku_udc.h
+index 26601bf4e7a9..70023d401079 100644
+--- a/drivers/usb/gadget/udc/goku_udc.h
++++ b/drivers/usb/gadget/udc/goku_udc.h
+@@ -25,7 +25,7 @@ struct goku_udc_regs {
+ #	define INT_EP1DATASET		0x00040
+ #	define INT_EP2DATASET		0x00080
+ #	define INT_EP3DATASET		0x00100
+-#define INT_EPnNAK(n)		(0x00100 < (n))		/* 0 < n < 4 */
++#define INT_EPnNAK(n)		(0x00100 << (n))	/* 0 < n < 4 */
+ #	define INT_EP1NAK		0x00200
+ #	define INT_EP2NAK		0x00400
+ #	define INT_EP3NAK		0x00800
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 332420d10be9..e5ace8995b3b 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -913,6 +913,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
+ 	if (dev->out_ctx)
+ 		xhci_free_container_ctx(xhci, dev->out_ctx);
+ 
++	if (dev->udev && dev->udev->slot_id)
++		dev->udev->slot_id = 0;
+ 	kfree(xhci->devs[slot_id]);
+ 	xhci->devs[slot_id] = NULL;
+ }
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index b60a02c50b89..bd281a96485c 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -4769,6 +4769,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ 	 * quirks
+ 	 */
+ 	struct device		*dev = hcd->self.sysdev;
++	unsigned int		minor_rev;
+ 	int			retval;
+ 
+ 	/* Accept arbitrarily long scatter-gather lists */
+@@ -4796,12 +4797,19 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ 		 */
+ 		hcd->has_tt = 1;
+ 	} else {
+-		/* Some 3.1 hosts return sbrn 0x30, can't rely on sbrn alone */
+-		if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) {
+-			xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
++		/*
++		 * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
++		 * minor revision instead of sbrn
++		 */
++		minor_rev = xhci->usb3_rhub.min_rev;
++		if (minor_rev) {
+ 			hcd->speed = HCD_USB31;
+ 			hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
+ 		}
++		xhci_info(xhci, "Host supports USB 3.%x %s SuperSpeed\n",
++			  minor_rev,
++			  minor_rev ? "Enhanced" : "");
++
+ 		/* xHCI private pointer was set in xhci_pci_probe for the second
+ 		 * registered roothub.
+ 		 */
+diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig
+index eeefa29f8aa2..a20b65cb6678 100644
+--- a/drivers/usb/usbip/Kconfig
++++ b/drivers/usb/usbip/Kconfig
+@@ -27,7 +27,7 @@ config USBIP_VHCI_HCD
+ 
+ config USBIP_VHCI_HC_PORTS
+ 	int "Number of ports per USB/IP virtual host controller"
+-	range 1 31
++	range 1 15
+ 	default 8
+ 	depends on USBIP_VHCI_HCD
+ 	---help---
+diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
+index 9b2ac55ac34f..8cf2aa973b50 100644
+--- a/fs/ext2/inode.c
++++ b/fs/ext2/inode.c
+@@ -1261,21 +1261,11 @@ static void __ext2_truncate_blocks(struct inode *inode, loff_t offset)
+ 
+ static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
+ {
+-	/*
+-	 * XXX: it seems like a bug here that we don't allow
+-	 * IS_APPEND inode to have blocks-past-i_size trimmed off.
+-	 * review and fix this.
+-	 *
+-	 * Also would be nice to be able to handle IO errors and such,
+-	 * but that's probably too much to ask.
+-	 */
+ 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ 	    S_ISLNK(inode->i_mode)))
+ 		return;
+ 	if (ext2_inode_is_fast_symlink(inode))
+ 		return;
+-	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+-		return;
+ 
+ 	dax_sem_down_write(EXT2_I(inode));
+ 	__ext2_truncate_blocks(inode, offset);
+diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
+index 513c357c734b..a6c0f54c48c3 100644
+--- a/fs/hfsplus/super.c
++++ b/fs/hfsplus/super.c
+@@ -588,6 +588,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
+ 	return 0;
+ 
+ out_put_hidden_dir:
++	cancel_delayed_work_sync(&sbi->sync_work);
+ 	iput(sbi->hidden_dir);
+ out_put_root:
+ 	dput(sb->s_root);
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 1352b1b990a7..3ebb2f6ace79 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -1271,17 +1271,7 @@ enum {
+ static inline const struct cpumask *
+ mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
+ {
+-	struct irq_desc *desc;
+-	unsigned int irq;
+-	int eqn;
+-	int err;
+-
+-	err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
+-	if (err)
+-		return NULL;
+-
+-	desc = irq_to_desc(irq);
+-	return desc->affinity_hint;
++	return dev->priv.irq_info[vector].mask;
+ }
+ 
+ #endif /* MLX5_DRIVER_H */
+diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
+index cef0e44601f8..4b6b9283fa7b 100644
+--- a/include/linux/usb/composite.h
++++ b/include/linux/usb/composite.h
+@@ -54,6 +54,9 @@
+ /* big enough to hold our biggest descriptor */
+ #define USB_COMP_EP0_BUFSIZ	1024
+ 
++/* OS feature descriptor length <= 4kB */
++#define USB_COMP_EP0_OS_DESC_BUFSIZ	4096
++
+ #define USB_MS_TO_HS_INTERVAL(x)	(ilog2((x * 1000 / 125)) + 1)
+ struct usb_configuration;
+ 
+diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
+index cb85eddb47ea..eb7853c1a23b 100644
+--- a/include/scsi/scsi.h
++++ b/include/scsi/scsi.h
+@@ -47,6 +47,8 @@ static inline int scsi_status_is_good(int status)
+ 	 */
+ 	status &= 0xfe;
+ 	return ((status == SAM_STAT_GOOD) ||
++		(status == SAM_STAT_CONDITION_MET) ||
++		/* Next two "intermediate" statuses are obsolete in SAM-4 */
+ 		(status == SAM_STAT_INTERMEDIATE) ||
+ 		(status == SAM_STAT_INTERMEDIATE_CONDITION_MET) ||
+ 		/* FIXME: this is obsolete in SAM-3 */
+diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
+index c587a61c32bf..2e08c6f3ac3e 100644
+--- a/include/uapi/linux/nl80211.h
++++ b/include/uapi/linux/nl80211.h
+@@ -2618,6 +2618,8 @@ enum nl80211_attrs {
+ #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
+ #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
+ 
++#define NL80211_WIPHY_NAME_MAXLEN		128
++
+ #define NL80211_MAX_SUPP_RATES			32
+ #define NL80211_MAX_SUPP_HT_RATES		77
+ #define NL80211_MAX_SUPP_REG_RULES		64
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 3e550507e9f0..ace13bea3e50 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2097,7 +2097,7 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
+ 		int i, j;
+ 
+ 		for (i = count, j = offset; i--; j++) {
+-			if (!remove_xps_queue(dev_maps, cpu, j))
++			if (!remove_xps_queue(dev_maps, tci, j))
+ 				break;
+ 		}
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 85b0b64e7f9d..81c2df84f953 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1603,7 +1603,7 @@ static void __sk_free(struct sock *sk)
+ 	if (likely(sk->sk_net_refcnt))
+ 		sock_inuse_add(sock_net(sk), -1);
+ 
+-	if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
++	if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
+ 		sock_diag_broadcast_destroy(sk);
+ 	else
+ 		sk_destruct(sk);
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index adf50fbc4c13..47725250b4ca 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -258,11 +258,13 @@ static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
+ static int dsa_port_setup(struct dsa_port *dp)
+ {
+ 	struct dsa_switch *ds = dp->ds;
+-	int err;
++	int err = 0;
+ 
+ 	memset(&dp->devlink_port, 0, sizeof(dp->devlink_port));
+ 
+-	err = devlink_port_register(ds->devlink, &dp->devlink_port, dp->index);
++	if (dp->type != DSA_PORT_TYPE_UNUSED)
++		err = devlink_port_register(ds->devlink, &dp->devlink_port,
++					    dp->index);
+ 	if (err)
+ 		return err;
+ 
+@@ -293,7 +295,8 @@ static int dsa_port_setup(struct dsa_port *dp)
+ 
+ static void dsa_port_teardown(struct dsa_port *dp)
+ {
+-	devlink_port_unregister(&dp->devlink_port);
++	if (dp->type != DSA_PORT_TYPE_UNUSED)
++		devlink_port_unregister(&dp->devlink_port);
+ 
+ 	switch (dp->type) {
+ 	case DSA_PORT_TYPE_UNUSED:
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 66340ab750e6..e7daec7c7421 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1040,7 +1040,8 @@ static int __ip_append_data(struct sock *sk,
+ 		if (copy > length)
+ 			copy = length;
+ 
+-		if (!(rt->dst.dev->features&NETIF_F_SG)) {
++		if (!(rt->dst.dev->features&NETIF_F_SG) &&
++		    skb_tailroom(skb) >= copy) {
+ 			unsigned int off;
+ 
+ 			off = skb->len;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 6818042cd8a9..3a0211692c28 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2860,8 +2860,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
+ 		return -EBUSY;
+ 
+ 	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
+-		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
+-			BUG();
++		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
++			WARN_ON_ONCE(1);
++			return -EINVAL;
++		}
+ 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
+ 			return -ENOMEM;
+ 	}
+@@ -3369,6 +3371,7 @@ static void tcp_connect_init(struct sock *sk)
+ 	sock_reset_flag(sk, SOCK_DONE);
+ 	tp->snd_wnd = 0;
+ 	tcp_init_wl(tp, 0);
++	tcp_write_queue_purge(sk);
+ 	tp->snd_una = tp->write_seq;
+ 	tp->snd_sml = tp->write_seq;
+ 	tp->snd_up = tp->write_seq;
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 197fcae855ca..9539bdb15edb 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -71,6 +71,7 @@ struct ip6gre_net {
+ 	struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
+ 
+ 	struct ip6_tnl __rcu *collect_md_tun;
++	struct ip6_tnl __rcu *collect_md_tun_erspan;
+ 	struct net_device *fb_tunnel_dev;
+ };
+ 
+@@ -81,6 +82,7 @@ static int ip6gre_tunnel_init(struct net_device *dev);
+ static void ip6gre_tunnel_setup(struct net_device *dev);
+ static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
+ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
++static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
+ 
+ /* Tunnel hash table */
+ 
+@@ -232,7 +234,12 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
+ 	if (cand)
+ 		return cand;
+ 
+-	t = rcu_dereference(ign->collect_md_tun);
++	if (gre_proto == htons(ETH_P_ERSPAN) ||
++	    gre_proto == htons(ETH_P_ERSPAN2))
++		t = rcu_dereference(ign->collect_md_tun_erspan);
++	else
++		t = rcu_dereference(ign->collect_md_tun);
++
+ 	if (t && t->dev->flags & IFF_UP)
+ 		return t;
+ 
+@@ -261,6 +268,31 @@ static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
+ 	return &ign->tunnels[prio][h];
+ }
+ 
++static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
++{
++	if (t->parms.collect_md)
++		rcu_assign_pointer(ign->collect_md_tun, t);
++}
++
++static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
++{
++	if (t->parms.collect_md)
++		rcu_assign_pointer(ign->collect_md_tun_erspan, t);
++}
++
++static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
++{
++	if (t->parms.collect_md)
++		rcu_assign_pointer(ign->collect_md_tun, NULL);
++}
++
++static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
++				       struct ip6_tnl *t)
++{
++	if (t->parms.collect_md)
++		rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
++}
++
+ static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
+ 		const struct ip6_tnl *t)
+ {
+@@ -271,9 +303,6 @@ static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
+ {
+ 	struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
+ 
+-	if (t->parms.collect_md)
+-		rcu_assign_pointer(ign->collect_md_tun, t);
+-
+ 	rcu_assign_pointer(t->next, rtnl_dereference(*tp));
+ 	rcu_assign_pointer(*tp, t);
+ }
+@@ -283,9 +312,6 @@ static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
+ 	struct ip6_tnl __rcu **tp;
+ 	struct ip6_tnl *iter;
+ 
+-	if (t->parms.collect_md)
+-		rcu_assign_pointer(ign->collect_md_tun, NULL);
+-
+ 	for (tp = ip6gre_bucket(ign, t);
+ 	     (iter = rtnl_dereference(*tp)) != NULL;
+ 	     tp = &iter->next) {
+@@ -374,11 +400,23 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
+ 	return NULL;
+ }
+ 
++static void ip6erspan_tunnel_uninit(struct net_device *dev)
++{
++	struct ip6_tnl *t = netdev_priv(dev);
++	struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
++
++	ip6erspan_tunnel_unlink_md(ign, t);
++	ip6gre_tunnel_unlink(ign, t);
++	dst_cache_reset(&t->dst_cache);
++	dev_put(dev);
++}
++
+ static void ip6gre_tunnel_uninit(struct net_device *dev)
+ {
+ 	struct ip6_tnl *t = netdev_priv(dev);
+ 	struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
+ 
++	ip6gre_tunnel_unlink_md(ign, t);
+ 	ip6gre_tunnel_unlink(ign, t);
+ 	dst_cache_reset(&t->dst_cache);
+ 	dev_put(dev);
+@@ -701,6 +739,9 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
+ 	if (tunnel->parms.o_flags & TUNNEL_SEQ)
+ 		tunnel->o_seqno++;
+ 
++	if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
++		return -ENOMEM;
++
+ 	/* Push GRE header. */
+ 	protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
+ 
+@@ -905,7 +946,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ 		truncate = true;
+ 	}
+ 
+-	if (skb_cow_head(skb, dev->needed_headroom))
++	if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
+ 		goto tx_err;
+ 
+ 	t->parms.o_flags &= ~TUNNEL_KEY;
+@@ -1016,12 +1057,11 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ 	return NETDEV_TX_OK;
+ }
+ 
+-static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
++static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
+ {
+ 	struct net_device *dev = t->dev;
+ 	struct __ip6_tnl_parm *p = &t->parms;
+ 	struct flowi6 *fl6 = &t->fl.u.ip6;
+-	int t_hlen;
+ 
+ 	if (dev->type != ARPHRD_ETHER) {
+ 		memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+@@ -1048,12 +1088,13 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
+ 		dev->flags |= IFF_POINTOPOINT;
+ 	else
+ 		dev->flags &= ~IFF_POINTOPOINT;
++}
+ 
+-	t->tun_hlen = gre_calc_hlen(t->parms.o_flags);
+-
+-	t->hlen = t->encap_hlen + t->tun_hlen;
+-
+-	t_hlen = t->hlen + sizeof(struct ipv6hdr);
++static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
++					 int t_hlen)
++{
++	const struct __ip6_tnl_parm *p = &t->parms;
++	struct net_device *dev = t->dev;
+ 
+ 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
+ 		int strict = (ipv6_addr_type(&p->raddr) &
+@@ -1085,8 +1126,26 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
+ 	}
+ }
+ 
+-static int ip6gre_tnl_change(struct ip6_tnl *t,
+-	const struct __ip6_tnl_parm *p, int set_mtu)
++static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
++{
++	int t_hlen;
++
++	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
++	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
++
++	t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
++	tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
++	return t_hlen;
++}
++
++static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
++{
++	ip6gre_tnl_link_config_common(t);
++	ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t));
++}
++
++static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
++				     const struct __ip6_tnl_parm *p)
+ {
+ 	t->parms.laddr = p->laddr;
+ 	t->parms.raddr = p->raddr;
+@@ -1102,6 +1161,12 @@ static int ip6gre_tnl_change(struct ip6_tnl *t,
+ 	t->parms.o_flags = p->o_flags;
+ 	t->parms.fwmark = p->fwmark;
+ 	dst_cache_reset(&t->dst_cache);
++}
++
++static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
++			     int set_mtu)
++{
++	ip6gre_tnl_copy_tnl_parm(t, p);
+ 	ip6gre_tnl_link_config(t, set_mtu);
+ 	return 0;
+ }
+@@ -1378,11 +1443,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
+ 		return ret;
+ 	}
+ 
+-	tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
+-	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
+-	t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
+-
+-	dev->hard_header_len = LL_MAX_HEADER + t_hlen;
++	t_hlen = ip6gre_calc_hlen(tunnel);
+ 	dev->mtu = ETH_DATA_LEN - t_hlen;
+ 	if (dev->type == ARPHRD_ETHER)
+ 		dev->mtu -= ETH_HLEN;
+@@ -1723,6 +1784,19 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
+ 	.ndo_get_iflink = ip6_tnl_get_iflink,
+ };
+ 
++static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
++{
++	int t_hlen;
++
++	tunnel->tun_hlen = 8;
++	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
++		       erspan_hdr_len(tunnel->parms.erspan_ver);
++
++	t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
++	tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
++	return t_hlen;
++}
++
+ static int ip6erspan_tap_init(struct net_device *dev)
+ {
+ 	struct ip6_tnl *tunnel;
+@@ -1746,12 +1820,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
+ 		return ret;
+ 	}
+ 
+-	tunnel->tun_hlen = 8;
+-	tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
+-		       erspan_hdr_len(tunnel->parms.erspan_ver);
+-	t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
+-
+-	dev->hard_header_len = LL_MAX_HEADER + t_hlen;
++	t_hlen = ip6erspan_calc_hlen(tunnel);
+ 	dev->mtu = ETH_DATA_LEN - t_hlen;
+ 	if (dev->type == ARPHRD_ETHER)
+ 		dev->mtu -= ETH_HLEN;
+@@ -1760,14 +1829,14 @@ static int ip6erspan_tap_init(struct net_device *dev)
+ 
+ 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ 	tunnel = netdev_priv(dev);
+-	ip6gre_tnl_link_config(tunnel, 1);
++	ip6erspan_tnl_link_config(tunnel, 1);
+ 
+ 	return 0;
+ }
+ 
+ static const struct net_device_ops ip6erspan_netdev_ops = {
+ 	.ndo_init =		ip6erspan_tap_init,
+-	.ndo_uninit =		ip6gre_tunnel_uninit,
++	.ndo_uninit =		ip6erspan_tunnel_uninit,
+ 	.ndo_start_xmit =	ip6erspan_tunnel_xmit,
+ 	.ndo_set_mac_address =	eth_mac_addr,
+ 	.ndo_validate_addr =	eth_validate_addr,
+@@ -1825,13 +1894,11 @@ static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
+ 	return ret;
+ }
+ 
+-static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
+-			  struct nlattr *tb[], struct nlattr *data[],
+-			  struct netlink_ext_ack *extack)
++static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev,
++				 struct nlattr *tb[], struct nlattr *data[],
++				 struct netlink_ext_ack *extack)
+ {
+ 	struct ip6_tnl *nt;
+-	struct net *net = dev_net(dev);
+-	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+ 	struct ip_tunnel_encap ipencap;
+ 	int err;
+ 
+@@ -1844,16 +1911,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
+ 			return err;
+ 	}
+ 
+-	ip6gre_netlink_parms(data, &nt->parms);
+-
+-	if (nt->parms.collect_md) {
+-		if (rtnl_dereference(ign->collect_md_tun))
+-			return -EEXIST;
+-	} else {
+-		if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
+-			return -EEXIST;
+-	}
+-
+ 	if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
+ 		eth_hw_addr_random(dev);
+ 
+@@ -1864,51 +1921,94 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
+ 	if (err)
+ 		goto out;
+ 
+-	ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
+-
+ 	if (tb[IFLA_MTU])
+ 		ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+ 
+ 	dev_hold(dev);
+-	ip6gre_tunnel_link(ign, nt);
+ 
+ out:
+ 	return err;
+ }
+ 
+-static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
+-			     struct nlattr *data[],
+-			     struct netlink_ext_ack *extack)
++static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
++			  struct nlattr *tb[], struct nlattr *data[],
++			  struct netlink_ext_ack *extack)
++{
++	struct ip6_tnl *nt = netdev_priv(dev);
++	struct net *net = dev_net(dev);
++	struct ip6gre_net *ign;
++	int err;
++
++	ip6gre_netlink_parms(data, &nt->parms);
++	ign = net_generic(net, ip6gre_net_id);
++
++	if (nt->parms.collect_md) {
++		if (rtnl_dereference(ign->collect_md_tun))
++			return -EEXIST;
++	} else {
++		if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
++			return -EEXIST;
++	}
++
++	err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
++	if (!err) {
++		ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
++		ip6gre_tunnel_link_md(ign, nt);
++		ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
++	}
++	return err;
++}
++
++static struct ip6_tnl *
++ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
++			 struct nlattr *data[], struct __ip6_tnl_parm *p_p,
++			 struct netlink_ext_ack *extack)
+ {
+ 	struct ip6_tnl *t, *nt = netdev_priv(dev);
+ 	struct net *net = nt->net;
+ 	struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+-	struct __ip6_tnl_parm p;
+ 	struct ip_tunnel_encap ipencap;
+ 
+ 	if (dev == ign->fb_tunnel_dev)
+-		return -EINVAL;
++		return ERR_PTR(-EINVAL);
+ 
+ 	if (ip6gre_netlink_encap_parms(data, &ipencap)) {
+ 		int err = ip6_tnl_encap_setup(nt, &ipencap);
+ 
+ 		if (err < 0)
+-			return err;
++			return ERR_PTR(err);
+ 	}
+ 
+-	ip6gre_netlink_parms(data, &p);
++	ip6gre_netlink_parms(data, p_p);
+ 
+-	t = ip6gre_tunnel_locate(net, &p, 0);
++	t = ip6gre_tunnel_locate(net, p_p, 0);
+ 
+ 	if (t) {
+ 		if (t->dev != dev)
+-			return -EEXIST;
++			return ERR_PTR(-EEXIST);
+ 	} else {
+ 		t = nt;
+ 	}
+ 
++	return t;
++}
++
++static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
++			     struct nlattr *data[],
++			     struct netlink_ext_ack *extack)
++{
++	struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
++	struct __ip6_tnl_parm p;
++	struct ip6_tnl *t;
++
++	t = ip6gre_changelink_common(dev, tb, data, &p, extack);
++	if (IS_ERR(t))
++		return PTR_ERR(t);
++
++	ip6gre_tunnel_unlink_md(ign, t);
+ 	ip6gre_tunnel_unlink(ign, t);
+ 	ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
++	ip6gre_tunnel_link_md(ign, t);
+ 	ip6gre_tunnel_link(ign, t);
+ 	return 0;
+ }
+@@ -2058,6 +2158,69 @@ static void ip6erspan_tap_setup(struct net_device *dev)
+ 	netif_keep_dst(dev);
+ }
+ 
++static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
++			     struct nlattr *tb[], struct nlattr *data[],
++			     struct netlink_ext_ack *extack)
++{
++	struct ip6_tnl *nt = netdev_priv(dev);
++	struct net *net = dev_net(dev);
++	struct ip6gre_net *ign;
++	int err;
++
++	ip6gre_netlink_parms(data, &nt->parms);
++	ign = net_generic(net, ip6gre_net_id);
++
++	if (nt->parms.collect_md) {
++		if (rtnl_dereference(ign->collect_md_tun_erspan))
++			return -EEXIST;
++	} else {
++		if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
++			return -EEXIST;
++	}
++
++	err = ip6gre_newlink_common(src_net, dev, tb, data, extack);
++	if (!err) {
++		ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
++		ip6erspan_tunnel_link_md(ign, nt);
++		ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
++	}
++	return err;
++}
++
++static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
++{
++	ip6gre_tnl_link_config_common(t);
++	ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
++}
++
++static int ip6erspan_tnl_change(struct ip6_tnl *t,
++				const struct __ip6_tnl_parm *p, int set_mtu)
++{
++	ip6gre_tnl_copy_tnl_parm(t, p);
++	ip6erspan_tnl_link_config(t, set_mtu);
++	return 0;
++}
++
++static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
++				struct nlattr *data[],
++				struct netlink_ext_ack *extack)
++{
++	struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
++	struct __ip6_tnl_parm p;
++	struct ip6_tnl *t;
++
++	t = ip6gre_changelink_common(dev, tb, data, &p, extack);
++	if (IS_ERR(t))
++		return PTR_ERR(t);
++
++	ip6gre_tunnel_unlink_md(ign, t);
++	ip6gre_tunnel_unlink(ign, t);
++	ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
++	ip6erspan_tunnel_link_md(ign, t);
++	ip6gre_tunnel_link(ign, t);
++	return 0;
++}
++
+ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
+ 	.kind		= "ip6gre",
+ 	.maxtype	= IFLA_GRE_MAX,
+@@ -2094,8 +2257,8 @@ static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
+ 	.priv_size	= sizeof(struct ip6_tnl),
+ 	.setup		= ip6erspan_tap_setup,
+ 	.validate	= ip6erspan_tap_validate,
+-	.newlink	= ip6gre_newlink,
+-	.changelink	= ip6gre_changelink,
++	.newlink	= ip6erspan_newlink,
++	.changelink	= ip6erspan_changelink,
+ 	.get_size	= ip6gre_get_size,
+ 	.fill_info	= ip6gre_fill_info,
+ 	.get_link_net	= ip6_tnl_get_link_net,
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 4065ae0c32a0..072333760a52 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1489,7 +1489,8 @@ static int __ip6_append_data(struct sock *sk,
+ 		if (copy > length)
+ 			copy = length;
+ 
+-		if (!(rt->dst.dev->features&NETIF_F_SG)) {
++		if (!(rt->dst.dev->features&NETIF_F_SG) &&
++		    skb_tailroom(skb) >= copy) {
+ 			unsigned int off;
+ 
+ 			off = skb->len;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 3b43b1fcd618..c6a2dd890de3 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2903,13 +2903,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 	if (skb == NULL)
+ 		goto out_unlock;
+ 
+-	skb_set_network_header(skb, reserve);
++	skb_reset_network_header(skb);
+ 
+ 	err = -EINVAL;
+ 	if (sock->type == SOCK_DGRAM) {
+ 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
+ 		if (unlikely(offset < 0))
+ 			goto out_free;
++	} else if (reserve) {
++		skb_push(skb, reserve);
+ 	}
+ 
+ 	/* Returns -EFAULT on error */
+diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
+index c49cb61adedf..64ca017f2e00 100644
+--- a/net/sched/act_vlan.c
++++ b/net/sched/act_vlan.c
+@@ -161,6 +161,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
+ 			case htons(ETH_P_8021AD):
+ 				break;
+ 			default:
++				if (exists)
++					tcf_idr_release(*a, bind);
+ 				return -EPROTONOSUPPORT;
+ 			}
+ 		} else {
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index 16644b3d2362..56c181c3feeb 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -222,10 +222,11 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
+ 					 extack);
+ 		if (IS_ERR(child))
+ 			return PTR_ERR(child);
+-	}
+ 
+-	if (child != &noop_qdisc)
++		/* child is fifo, no need to check for noop_qdisc */
+ 		qdisc_hash_add(child, true);
++	}
++
+ 	sch_tree_lock(sch);
+ 	q->flags = ctl->flags;
+ 	q->limit = ctl->limit;
+diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
+index 03225a8df973..6f74a426f159 100644
+--- a/net/sched/sch_tbf.c
++++ b/net/sched/sch_tbf.c
+@@ -383,6 +383,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
+ 			err = PTR_ERR(child);
+ 			goto done;
+ 		}
++
++		/* child is fifo, no need to check for noop_qdisc */
++		qdisc_hash_add(child, true);
+ 	}
+ 
+ 	sch_tree_lock(sch);
+@@ -391,8 +394,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
+ 					  q->qdisc->qstats.backlog);
+ 		qdisc_destroy(q->qdisc);
+ 		q->qdisc = child;
+-		if (child != &noop_qdisc)
+-			qdisc_hash_add(child, true);
+ 	}
+ 	q->limit = qopt->limit;
+ 	if (tb[TCA_TBF_PBURST])
+diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
+index 74568cdbca70..d7b88b2d1b22 100644
+--- a/net/smc/smc_pnet.c
++++ b/net/smc/smc_pnet.c
+@@ -245,40 +245,45 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
+ static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem,
+ 			       struct nlattr *tb[])
+ {
+-	char *string, *ibname = NULL;
+-	int rc = 0;
++	char *string, *ibname;
++	int rc;
+ 
+ 	memset(pnetelem, 0, sizeof(*pnetelem));
+ 	INIT_LIST_HEAD(&pnetelem->list);
+-	if (tb[SMC_PNETID_NAME]) {
+-		string = (char *)nla_data(tb[SMC_PNETID_NAME]);
+-		if (!smc_pnetid_valid(string, pnetelem->pnet_name)) {
+-			rc = -EINVAL;
+-			goto error;
+-		}
+-	}
+-	if (tb[SMC_PNETID_ETHNAME]) {
+-		string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]);
+-		pnetelem->ndev = dev_get_by_name(net, string);
+-		if (!pnetelem->ndev)
+-			return -ENOENT;
+-	}
+-	if (tb[SMC_PNETID_IBNAME]) {
+-		ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]);
+-		ibname = strim(ibname);
+-		pnetelem->smcibdev = smc_pnet_find_ib(ibname);
+-		if (!pnetelem->smcibdev) {
+-			rc = -ENOENT;
+-			goto error;
+-		}
+-	}
+-	if (tb[SMC_PNETID_IBPORT]) {
+-		pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]);
+-		if (pnetelem->ib_port > SMC_MAX_PORTS) {
+-			rc = -EINVAL;
+-			goto error;
+-		}
+-	}
++
++	rc = -EINVAL;
++	if (!tb[SMC_PNETID_NAME])
++		goto error;
++	string = (char *)nla_data(tb[SMC_PNETID_NAME]);
++	if (!smc_pnetid_valid(string, pnetelem->pnet_name))
++		goto error;
++
++	rc = -EINVAL;
++	if (!tb[SMC_PNETID_ETHNAME])
++		goto error;
++	rc = -ENOENT;
++	string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]);
++	pnetelem->ndev = dev_get_by_name(net, string);
++	if (!pnetelem->ndev)
++		goto error;
++
++	rc = -EINVAL;
++	if (!tb[SMC_PNETID_IBNAME])
++		goto error;
++	rc = -ENOENT;
++	ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]);
++	ibname = strim(ibname);
++	pnetelem->smcibdev = smc_pnet_find_ib(ibname);
++	if (!pnetelem->smcibdev)
++		goto error;
++
++	rc = -EINVAL;
++	if (!tb[SMC_PNETID_IBPORT])
++		goto error;
++	pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]);
++	if (pnetelem->ib_port < 1 || pnetelem->ib_port > SMC_MAX_PORTS)
++		goto error;
++
+ 	return 0;
+ 
+ error:
+@@ -307,6 +312,8 @@ static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info)
+ 	void *hdr;
+ 	int rc;
+ 
++	if (!info->attrs[SMC_PNETID_NAME])
++		return -EINVAL;
+ 	pnetelem = smc_pnet_find_pnetid(
+ 				(char *)nla_data(info->attrs[SMC_PNETID_NAME]));
+ 	if (!pnetelem)
+@@ -359,6 +366,8 @@ static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info)
+ 
+ static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info)
+ {
++	if (!info->attrs[SMC_PNETID_NAME])
++		return -EINVAL;
+ 	return smc_pnet_remove_by_pnetid(
+ 				(char *)nla_data(info->attrs[SMC_PNETID_NAME]));
+ }
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index a6f3cac8c640..c0fd8a85e7f7 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -95,6 +95,9 @@ static int cfg80211_dev_check_name(struct cfg80211_registered_device *rdev,
+ 
+ 	ASSERT_RTNL();
+ 
++	if (strlen(newname) > NL80211_WIPHY_NAME_MAXLEN)
++		return -EINVAL;
++
+ 	/* prohibit calling the thing phy%d when %d is not its number */
+ 	sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken);
+ 	if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) {
+diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig
+index b0825370d262..957046ac6c8c 100644
+--- a/sound/soc/rockchip/Kconfig
++++ b/sound/soc/rockchip/Kconfig
+@@ -56,6 +56,9 @@ config SND_SOC_RK3288_HDMI_ANALOG
+ 	depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB && CLKDEV_LOOKUP
+ 	select SND_SOC_ROCKCHIP_I2S
+ 	select SND_SOC_HDMI_CODEC
++	select SND_SOC_ES8328_I2C
++	select SND_SOC_ES8328_SPI if SPI_MASTER
++	select DRM_DW_HDMI_I2S_AUDIO if DRM_DW_HDMI
+ 	help
+ 	  Say Y or M here if you want to add support for SoC audio on Rockchip
+ 	  RK3288 boards using an analog output and the built-in HDMI audio.
+diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
+index 233f1c9a4b6c..aeba0ae890ea 100644
+--- a/sound/soc/samsung/i2s.c
++++ b/sound/soc/samsung/i2s.c
+@@ -656,8 +656,12 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
+ 		tmp |= mod_slave;
+ 		break;
+ 	case SND_SOC_DAIFMT_CBS_CFS:
+-		/* Set default source clock in Master mode */
+-		if (i2s->rclk_srcrate == 0)
++		/*
++		 * Set default source clock in Master mode, only when the
++		 * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any
++		 * clock configuration assigned in DT is not overwritten.
++		 */
++		if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL)
+ 			i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
+ 							0, SND_SOC_CLOCK_IN);
+ 		break;
+@@ -881,6 +885,11 @@ static int config_setup(struct i2s_dai *i2s)
+ 		return 0;
+ 
+ 	if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
++		struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC];
++
++		if (i2s->rclk_srcrate == 0 && rclksrc && !IS_ERR(rclksrc))
++			i2s->rclk_srcrate = clk_get_rate(rclksrc);
++
+ 		psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
+ 		writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
+ 		dev_dbg(&i2s->pdev->dev,
+diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c
+index 44b6de5a331a..06a31a9585a0 100644
+--- a/sound/soc/samsung/odroid.c
++++ b/sound/soc/samsung/odroid.c
+@@ -36,23 +36,26 @@ static int odroid_card_hw_params(struct snd_pcm_substream *substream,
+ {
+ 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ 	struct odroid_priv *priv = snd_soc_card_get_drvdata(rtd->card);
+-	unsigned int pll_freq, rclk_freq;
++	unsigned int pll_freq, rclk_freq, rfs;
+ 	int ret;
+ 
+ 	switch (params_rate(params)) {
+-	case 32000:
+ 	case 64000:
+-		pll_freq = 131072006U;
++		pll_freq = 196608001U;
++		rfs = 384;
+ 		break;
+ 	case 44100:
+ 	case 88200:
+ 	case 176400:
+ 		pll_freq = 180633609U;
++		rfs = 512;
+ 		break;
++	case 32000:
+ 	case 48000:
+ 	case 96000:
+ 	case 192000:
+ 		pll_freq = 196608001U;
++		rfs = 512;
+ 		break;
+ 	default:
+ 		return -EINVAL;
+@@ -67,7 +70,7 @@ static int odroid_card_hw_params(struct snd_pcm_substream *substream,
+ 	 *  frequency values due to the EPLL output frequency not being exact
+ 	 *  multiple of the audio sampling rate.
+ 	 */
+-	rclk_freq = params_rate(params) * 256 + 1;
++	rclk_freq = params_rate(params) * rfs + 1;
+ 
+ 	ret = clk_set_rate(priv->sclk_i2s, rclk_freq);
+ 	if (ret < 0)
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index 782c580b7aa3..e5049fbfc4f1 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -1276,6 +1276,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
+ 			kfree(sm);
+ 			continue;
+ 		}
++
++		/* create any TLV data */
++		soc_tplg_create_tlv(tplg, &kc[i], &mc->hdr);
+ 	}
+ 	return kc;
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 794224e1d6df..006da37ad0d9 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1149,24 +1149,27 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	return false;
+ }
+ 
+-/* Marantz/Denon USB DACs need a vendor cmd to switch
++/* ITF-USB DSD based DACs need a vendor cmd to switch
+  * between PCM and native DSD mode
++ * (2 altsets version)
+  */
+-static bool is_marantz_denon_dac(unsigned int id)
++static bool is_itf_usb_dsd_2alts_dac(unsigned int id)
+ {
+ 	switch (id) {
+ 	case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
+ 	case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
+ 	case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
++	case USB_ID(0x1852, 0x5065): /* Luxman DA-06 */
+ 		return true;
+ 	}
+ 	return false;
+ }
+ 
+-/* TEAC UD-501/UD-503/NT-503 USB DACs need a vendor cmd to switch
+- * between PCM/DOP and native DSD mode
++/* ITF-USB DSD based DACs need a vendor cmd to switch
++ * between PCM and native DSD mode
++ * (3 altsets version)
+  */
+-static bool is_teac_dsd_dac(unsigned int id)
++static bool is_itf_usb_dsd_3alts_dac(unsigned int id)
+ {
+ 	switch (id) {
+ 	case USB_ID(0x0644, 0x8043): /* TEAC UD-501/UD-503/NT-503 */
+@@ -1183,7 +1186,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
+ 	struct usb_device *dev = subs->dev;
+ 	int err;
+ 
+-	if (is_marantz_denon_dac(subs->stream->chip->usb_id)) {
++	if (is_itf_usb_dsd_2alts_dac(subs->stream->chip->usb_id)) {
+ 		/* First switch to alt set 0, otherwise the mode switch cmd
+ 		 * will not be accepted by the DAC
+ 		 */
+@@ -1204,7 +1207,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
+ 			break;
+ 		}
+ 		mdelay(20);
+-	} else if (is_teac_dsd_dac(subs->stream->chip->usb_id)) {
++	} else if (is_itf_usb_dsd_3alts_dac(subs->stream->chip->usb_id)) {
+ 		/* Vendor mode switch cmd is required. */
+ 		switch (fmt->altsetting) {
+ 		case 3: /* DSD mode (DSD_U32) requested */
+@@ -1300,10 +1303,10 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
+ 	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ 		mdelay(20);
+ 
+-	/* Marantz/Denon devices with USB DAC functionality need a delay
++	/* ITF-USB DSD based DACs functionality need a delay
+ 	 * after each class compliant request
+ 	 */
+-	if (is_marantz_denon_dac(chip->usb_id)
++	if (is_itf_usb_dsd_2alts_dac(chip->usb_id)
+ 	    && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ 		mdelay(20);
+ 
+@@ -1390,14 +1393,14 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 		break;
+ 	}
+ 
+-	/* Denon/Marantz devices with USB DAC functionality */
+-	if (is_marantz_denon_dac(chip->usb_id)) {
++	/* ITF-USB DSD based DACs (2 altsets version) */
++	if (is_itf_usb_dsd_2alts_dac(chip->usb_id)) {
+ 		if (fp->altsetting == 2)
+ 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ 	}
+ 
+-	/* TEAC devices with USB DAC functionality */
+-	if (is_teac_dsd_dac(chip->usb_id)) {
++	/* ITF-USB DSD based DACs (3 altsets version) */
++	if (is_itf_usb_dsd_3alts_dac(chip->usb_id)) {
+ 		if (fp->altsetting == 3)
+ 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ 	}


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-05-22 19:13 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-05-22 19:13 UTC (permalink / raw
  To: gentoo-commits

commit:     28809242df5420fd00f36a9bab63be9b45ecfb2b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue May 22 19:13:24 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue May 22 19:13:24 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=28809242

Linux patch 4.16.11

 0000_README              |    4 +
 1010_linux-4.16.11.patch | 4480 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4484 insertions(+)

diff --git a/0000_README b/0000_README
index 89eb684..a5237c6 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1009_linux-4.16.10.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.10
 
+Patch:  1010_linux-4.16.11.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.11
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1010_linux-4.16.11.patch b/1010_linux-4.16.11.patch
new file mode 100644
index 0000000..03e8496
--- /dev/null
+++ b/1010_linux-4.16.11.patch
@@ -0,0 +1,4480 @@
+diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
+index 4ed63b6cfb15..2e5e0232b8fa 100644
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -453,6 +453,7 @@ What:		/sys/devices/system/cpu/vulnerabilities
+ 		/sys/devices/system/cpu/vulnerabilities/meltdown
+ 		/sys/devices/system/cpu/vulnerabilities/spectre_v1
+ 		/sys/devices/system/cpu/vulnerabilities/spectre_v2
++		/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+ Date:		January 2018
+ Contact:	Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description:	Information about CPU vulnerabilities
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 1d1d53f85ddd..9824d049367e 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2647,6 +2647,9 @@
+ 			allow data leaks with this option, which is equivalent
+ 			to spectre_v2=off.
+ 
++	nospec_store_bypass_disable
++			[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
++
+ 	noxsave		[BUGS=X86] Disables x86 extended register state save
+ 			and restore using xsave. The kernel will fallback to
+ 			enabling legacy floating-point and sse state.
+@@ -3997,6 +4000,48 @@
+ 			Not specifying this option is equivalent to
+ 			spectre_v2=auto.
+ 
++	spec_store_bypass_disable=
++			[HW] Control Speculative Store Bypass (SSB) Disable mitigation
++			(Speculative Store Bypass vulnerability)
++
++			Certain CPUs are vulnerable to an exploit against a
++			a common industry wide performance optimization known
++			as "Speculative Store Bypass" in which recent stores
++			to the same memory location may not be observed by
++			later loads during speculative execution. The idea
++			is that such stores are unlikely and that they can
++			be detected prior to instruction retirement at the
++			end of a particular speculation execution window.
++
++			In vulnerable processors, the speculatively forwarded
++			store can be used in a cache side channel attack, for
++			example to read memory to which the attacker does not
++			directly have access (e.g. inside sandboxed code).
++
++			This parameter controls whether the Speculative Store
++			Bypass optimization is used.
++
++			on      - Unconditionally disable Speculative Store Bypass
++			off     - Unconditionally enable Speculative Store Bypass
++			auto    - Kernel detects whether the CPU model contains an
++				  implementation of Speculative Store Bypass and
++				  picks the most appropriate mitigation. If the
++				  CPU is not vulnerable, "off" is selected. If the
++				  CPU is vulnerable the default mitigation is
++				  architecture and Kconfig dependent. See below.
++			prctl   - Control Speculative Store Bypass per thread
++				  via prctl. Speculative Store Bypass is enabled
++				  for a process by default. The state of the control
++				  is inherited on fork.
++			seccomp - Same as "prctl" above, but all seccomp threads
++				  will disable SSB unless they explicitly opt out.
++
++			Not specifying this option is equivalent to
++			spec_store_bypass_disable=auto.
++
++			Default mitigations:
++			X86:	If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
++
+ 	spia_io_base=	[HW,MTD]
+ 	spia_fio_base=
+ 	spia_pedr=
+diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt
+index 1814fa13f6ab..fc019df0d863 100644
+--- a/Documentation/devicetree/bindings/net/marvell-pp2.txt
++++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt
+@@ -21,9 +21,10 @@ Required properties:
+ 	- main controller clock (for both armada-375-pp2 and armada-7k-pp2)
+ 	- GOP clock (for both armada-375-pp2 and armada-7k-pp2)
+ 	- MG clock (only for armada-7k-pp2)
++	- MG Core clock (only for armada-7k-pp2)
+ 	- AXI clock (only for armada-7k-pp2)
+-- clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk"
+-  and "axi_clk" (the 2 latter only for armada-7k-pp2).
++- clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk",
++  "mg_core_clk" and "axi_clk" (the 3 latter only for armada-7k-pp2).
+ 
+ The ethernet ports are represented by subnodes. At least one port is
+ required.
+@@ -80,8 +81,8 @@ cpm_ethernet: ethernet@0 {
+ 	compatible = "marvell,armada-7k-pp22";
+ 	reg = <0x0 0x100000>, <0x129000 0xb000>;
+ 	clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>,
+-		 <&cpm_syscon0 1 5>, <&cpm_syscon0 1 18>;
+-	clock-names = "pp_clk", "gop_clk", "gp_clk", "axi_clk";
++		 <&cpm_syscon0 1 5>, <&cpm_syscon0 1 6>, <&cpm_syscon0 1 18>;
++	clock-names = "pp_clk", "gop_clk", "mg_clk", "mg_core_clk", "axi_clk";
+ 
+ 	eth0: eth0 {
+ 		interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
+index 7b2eb1b7d4ca..a3233da7fa88 100644
+--- a/Documentation/userspace-api/index.rst
++++ b/Documentation/userspace-api/index.rst
+@@ -19,6 +19,7 @@ place where this information is gathered.
+    no_new_privs
+    seccomp_filter
+    unshare
++   spec_ctrl
+ 
+ .. only::  subproject and html
+ 
+diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst
+new file mode 100644
+index 000000000000..32f3d55c54b7
+--- /dev/null
++++ b/Documentation/userspace-api/spec_ctrl.rst
+@@ -0,0 +1,94 @@
++===================
++Speculation Control
++===================
++
++Quite some CPUs have speculation-related misfeatures which are in
++fact vulnerabilities causing data leaks in various forms even across
++privilege domains.
++
++The kernel provides mitigation for such vulnerabilities in various
++forms. Some of these mitigations are compile-time configurable and some
++can be supplied on the kernel command line.
++
++There is also a class of mitigations which are very expensive, but they can
++be restricted to a certain set of processes or tasks in controlled
++environments. The mechanism to control these mitigations is via
++:manpage:`prctl(2)`.
++
++There are two prctl options which are related to this:
++
++ * PR_GET_SPECULATION_CTRL
++
++ * PR_SET_SPECULATION_CTRL
++
++PR_GET_SPECULATION_CTRL
++-----------------------
++
++PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
++which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
++the following meaning:
++
++==== ===================== ===================================================
++Bit  Define                Description
++==== ===================== ===================================================
++0    PR_SPEC_PRCTL         Mitigation can be controlled per task by
++                           PR_SET_SPECULATION_CTRL.
++1    PR_SPEC_ENABLE        The speculation feature is enabled, mitigation is
++                           disabled.
++2    PR_SPEC_DISABLE       The speculation feature is disabled, mitigation is
++                           enabled.
++3    PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
++                           subsequent prctl(..., PR_SPEC_ENABLE) will fail.
++==== ===================== ===================================================
++
++If all bits are 0 the CPU is not affected by the speculation misfeature.
++
++If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
++available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
++misfeature will fail.
++
++PR_SET_SPECULATION_CTRL
++-----------------------
++
++PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
++is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
++in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
++PR_SPEC_FORCE_DISABLE.
++
++Common error codes
++------------------
++======= =================================================================
++Value   Meaning
++======= =================================================================
++EINVAL  The prctl is not implemented by the architecture or unused
++        prctl(2) arguments are not 0.
++
++ENODEV  arg2 is selecting a not supported speculation misfeature.
++======= =================================================================
++
++PR_SET_SPECULATION_CTRL error codes
++-----------------------------------
++======= =================================================================
++Value   Meaning
++======= =================================================================
++0       Success
++
++ERANGE  arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
++        PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
++
++ENXIO   Control of the selected speculation misfeature is not possible.
++        See PR_GET_SPECULATION_CTRL.
++
++EPERM   Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
++        tried to enable it again.
++======= =================================================================
++
++Speculation misfeature controls
++-------------------------------
++- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
++
++  Invocations:
++   * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
++   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
++   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
++   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
+diff --git a/Makefile b/Makefile
+index 33f3c94f02ca..79c191442771 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
+index bc8d4bbd82e2..9342904cccca 100644
+--- a/arch/arm/include/asm/assembler.h
++++ b/arch/arm/include/asm/assembler.h
+@@ -536,4 +536,14 @@ THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
+ #endif
+ 	.endm
+ 
++#ifdef CONFIG_KPROBES
++#define _ASM_NOKPROBE(entry)				\
++	.pushsection "_kprobe_blacklist", "aw" ;	\
++	.balign 4 ;					\
++	.long entry;					\
++	.popsection
++#else
++#define _ASM_NOKPROBE(entry)
++#endif
++
+ #endif /* __ASM_ASSEMBLER_H__ */
+diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
+index de1b919404e4..1f0b07aef85b 100644
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -295,6 +295,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
+ 	return 8;
+ }
+ 
++/*
++ * We are not in the kvm->srcu critical section most of the time, so we take
++ * the SRCU read lock here. Since we copy the data from the user page, we
++ * can immediately drop the lock again.
++ */
++static inline int kvm_read_guest_lock(struct kvm *kvm,
++				      gpa_t gpa, void *data, unsigned long len)
++{
++	int srcu_idx = srcu_read_lock(&kvm->srcu);
++	int ret = kvm_read_guest(kvm, gpa, data, len);
++
++	srcu_read_unlock(&kvm->srcu, srcu_idx);
++
++	return ret;
++}
++
+ static inline void *kvm_get_hyp_vector(void)
+ {
+ 	return kvm_ksym_ref(__kvm_hyp_vector);
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 5e3633c24e63..2fe87109ae46 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -19,6 +19,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/hardirq.h>
+ #include <linux/kdebug.h>
++#include <linux/kprobes.h>
+ #include <linux/module.h>
+ #include <linux/kexec.h>
+ #include <linux/bug.h>
+@@ -417,7 +418,8 @@ void unregister_undef_hook(struct undef_hook *hook)
+ 	raw_spin_unlock_irqrestore(&undef_lock, flags);
+ }
+ 
+-static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
++static nokprobe_inline
++int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+ {
+ 	struct undef_hook *hook;
+ 	unsigned long flags;
+@@ -490,6 +492,7 @@ asmlinkage void do_undefinstr(struct pt_regs *regs)
+ 
+ 	arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
+ }
++NOKPROBE_SYMBOL(do_undefinstr)
+ 
+ /*
+  * Handle FIQ similarly to NMI on x86 systems.
+diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
+index df73914e81c8..746e7801dcdf 100644
+--- a/arch/arm/lib/getuser.S
++++ b/arch/arm/lib/getuser.S
+@@ -38,6 +38,7 @@ ENTRY(__get_user_1)
+ 	mov	r0, #0
+ 	ret	lr
+ ENDPROC(__get_user_1)
++_ASM_NOKPROBE(__get_user_1)
+ 
+ ENTRY(__get_user_2)
+ 	check_uaccess r0, 2, r1, r2, __get_user_bad
+@@ -58,6 +59,7 @@ rb	.req	r0
+ 	mov	r0, #0
+ 	ret	lr
+ ENDPROC(__get_user_2)
++_ASM_NOKPROBE(__get_user_2)
+ 
+ ENTRY(__get_user_4)
+ 	check_uaccess r0, 4, r1, r2, __get_user_bad
+@@ -65,6 +67,7 @@ ENTRY(__get_user_4)
+ 	mov	r0, #0
+ 	ret	lr
+ ENDPROC(__get_user_4)
++_ASM_NOKPROBE(__get_user_4)
+ 
+ ENTRY(__get_user_8)
+ 	check_uaccess r0, 8, r1, r2, __get_user_bad8
+@@ -78,6 +81,7 @@ ENTRY(__get_user_8)
+ 	mov	r0, #0
+ 	ret	lr
+ ENDPROC(__get_user_8)
++_ASM_NOKPROBE(__get_user_8)
+ 
+ #ifdef __ARMEB__
+ ENTRY(__get_user_32t_8)
+@@ -91,6 +95,7 @@ ENTRY(__get_user_32t_8)
+ 	mov	r0, #0
+ 	ret	lr
+ ENDPROC(__get_user_32t_8)
++_ASM_NOKPROBE(__get_user_32t_8)
+ 
+ ENTRY(__get_user_64t_1)
+ 	check_uaccess r0, 1, r1, r2, __get_user_bad8
+@@ -98,6 +103,7 @@ ENTRY(__get_user_64t_1)
+ 	mov	r0, #0
+ 	ret	lr
+ ENDPROC(__get_user_64t_1)
++_ASM_NOKPROBE(__get_user_64t_1)
+ 
+ ENTRY(__get_user_64t_2)
+ 	check_uaccess r0, 2, r1, r2, __get_user_bad8
+@@ -114,6 +120,7 @@ rb	.req	r0
+ 	mov	r0, #0
+ 	ret	lr
+ ENDPROC(__get_user_64t_2)
++_ASM_NOKPROBE(__get_user_64t_2)
+ 
+ ENTRY(__get_user_64t_4)
+ 	check_uaccess r0, 4, r1, r2, __get_user_bad8
+@@ -121,6 +128,7 @@ ENTRY(__get_user_64t_4)
+ 	mov	r0, #0
+ 	ret	lr
+ ENDPROC(__get_user_64t_4)
++_ASM_NOKPROBE(__get_user_64t_4)
+ #endif
+ 
+ __get_user_bad8:
+@@ -131,6 +139,8 @@ __get_user_bad:
+ 	ret	lr
+ ENDPROC(__get_user_bad)
+ ENDPROC(__get_user_bad8)
++_ASM_NOKPROBE(__get_user_bad)
++_ASM_NOKPROBE(__get_user_bad8)
+ 
+ .pushsection __ex_table, "a"
+ 	.long	1b, __get_user_bad
+diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
+index bcdecc25461b..b2aa9b32bff2 100644
+--- a/arch/arm/probes/kprobes/opt-arm.c
++++ b/arch/arm/probes/kprobes/opt-arm.c
+@@ -165,13 +165,14 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
+ {
+ 	unsigned long flags;
+ 	struct kprobe *p = &op->kp;
+-	struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
++	struct kprobe_ctlblk *kcb;
+ 
+ 	/* Save skipped registers */
+ 	regs->ARM_pc = (unsigned long)op->kp.addr;
+ 	regs->ARM_ORIG_r0 = ~0UL;
+ 
+ 	local_irq_save(flags);
++	kcb = get_kprobe_ctlblk();
+ 
+ 	if (kprobe_running()) {
+ 		kprobes_inc_nmissed_count(&op->kp);
+@@ -191,6 +192,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
+ 
+ 	local_irq_restore(flags);
+ }
++NOKPROBE_SYMBOL(optimized_callback)
+ 
+ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
+ {
+diff --git a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
+index a8af4136dbe7..a97d9245e883 100644
+--- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
+@@ -40,9 +40,10 @@
+ 			compatible = "marvell,armada-7k-pp22";
+ 			reg = <0x0 0x100000>, <0x129000 0xb000>;
+ 			clocks = <&CP110_LABEL(clk) 1 3>, <&CP110_LABEL(clk) 1 9>,
+-				 <&CP110_LABEL(clk) 1 5>, <&CP110_LABEL(clk) 1 18>;
++				 <&CP110_LABEL(clk) 1 5>, <&CP110_LABEL(clk) 1 6>,
++				 <&CP110_LABEL(clk) 1 18>;
+ 			clock-names = "pp_clk", "gop_clk",
+-				      "mg_clk", "axi_clk";
++				      "mg_clk", "mg_core_clk", "axi_clk";
+ 			marvell,system-controller = <&CP110_LABEL(syscon0)>;
+ 			status = "disabled";
+ 			dma-coherent;
+@@ -143,6 +144,8 @@
+ 			#size-cells = <0>;
+ 			compatible = "marvell,xmdio";
+ 			reg = <0x12a600 0x10>;
++			clocks = <&CP110_LABEL(clk) 1 5>,
++				 <&CP110_LABEL(clk) 1 6>, <&CP110_LABEL(clk) 1 18>;
+ 			status = "disabled";
+ 		};
+ 
+diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
+index 7faed6e48b46..c2b7a77702e7 100644
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -348,6 +348,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
+ 	return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
+ }
+ 
++/*
++ * We are not in the kvm->srcu critical section most of the time, so we take
++ * the SRCU read lock here. Since we copy the data from the user page, we
++ * can immediately drop the lock again.
++ */
++static inline int kvm_read_guest_lock(struct kvm *kvm,
++				      gpa_t gpa, void *data, unsigned long len)
++{
++	int srcu_idx = srcu_read_lock(&kvm->srcu);
++	int ret = kvm_read_guest(kvm, gpa, data, len);
++
++	srcu_read_unlock(&kvm->srcu, srcu_idx);
++
++	return ret;
++}
++
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ #include <asm/mmu.h>
+ 
+diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
+index 1bceb95f422d..5584247f5029 100644
+--- a/arch/powerpc/platforms/powernv/opal-nvram.c
++++ b/arch/powerpc/platforms/powernv/opal-nvram.c
+@@ -44,6 +44,10 @@ static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index)
+ 	return count;
+ }
+ 
++/*
++ * This can be called in the panic path with interrupts off, so use
++ * mdelay in that case.
++ */
+ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
+ {
+ 	s64 rc = OPAL_BUSY;
+@@ -58,10 +62,16 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
+ 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ 		rc = opal_write_nvram(__pa(buf), count, off);
+ 		if (rc == OPAL_BUSY_EVENT) {
+-			msleep(OPAL_BUSY_DELAY_MS);
++			if (in_interrupt() || irqs_disabled())
++				mdelay(OPAL_BUSY_DELAY_MS);
++			else
++				msleep(OPAL_BUSY_DELAY_MS);
+ 			opal_poll_events(NULL);
+ 		} else if (rc == OPAL_BUSY) {
+-			msleep(OPAL_BUSY_DELAY_MS);
++			if (in_interrupt() || irqs_disabled())
++				mdelay(OPAL_BUSY_DELAY_MS);
++			else
++				msleep(OPAL_BUSY_DELAY_MS);
+ 		}
+ 	}
+ 
+diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
+index 94f2099bceb0..3d17c41074ca 100644
+--- a/arch/s390/kernel/irq.c
++++ b/arch/s390/kernel/irq.c
+@@ -176,10 +176,9 @@ void do_softirq_own_stack(void)
+ 		new -= STACK_FRAME_OVERHEAD;
+ 		((struct stack_frame *) new)->back_chain = old;
+ 		asm volatile("   la    15,0(%0)\n"
+-			     "   basr  14,%2\n"
++			     "   brasl 14,__do_softirq\n"
+ 			     "   la    15,0(%1)\n"
+-			     : : "a" (new), "a" (old),
+-			         "a" (__do_softirq)
++			     : : "a" (new), "a" (old)
+ 			     : "0", "1", "2", "3", "4", "5", "14",
+ 			       "cc", "memory" );
+ 	} else {
+diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
+index 1c9ddd7aa5ec..0292d68e7dde 100644
+--- a/arch/s390/kernel/perf_cpum_sf.c
++++ b/arch/s390/kernel/perf_cpum_sf.c
+@@ -753,6 +753,10 @@ static int __hw_perf_event_init(struct perf_event *event)
+ 	 */
+ 	rate = 0;
+ 	if (attr->freq) {
++		if (!attr->sample_freq) {
++			err = -EINVAL;
++			goto out;
++		}
+ 		rate = freq_to_sample_rate(&si, attr->sample_freq);
+ 		rate = hw_limit_rate(&si, rate);
+ 		attr->freq = 0;
+diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
+index 886a9115af62..48db9732b684 100644
+--- a/arch/x86/boot/compressed/eboot.c
++++ b/arch/x86/boot/compressed/eboot.c
+@@ -163,7 +163,8 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom)
+ 	if (status != EFI_SUCCESS)
+ 		goto free_struct;
+ 
+-	memcpy(rom->romdata, pci->romimage, pci->romsize);
++	memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
++	       pci->romsize);
+ 	return status;
+ 
+ free_struct:
+@@ -269,7 +270,8 @@ __setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom)
+ 	if (status != EFI_SUCCESS)
+ 		goto free_struct;
+ 
+-	memcpy(rom->romdata, pci->romimage, pci->romsize);
++	memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
++	       pci->romsize);
+ 	return status;
+ 
+ free_struct:
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index d554c11e01ff..2464ad88287c 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -198,7 +198,6 @@
+ #define X86_FEATURE_CAT_L2		( 7*32+ 5) /* Cache Allocation Technology L2 */
+ #define X86_FEATURE_CDP_L3		( 7*32+ 6) /* Code and Data Prioritization L3 */
+ #define X86_FEATURE_INVPCID_SINGLE	( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
+-
+ #define X86_FEATURE_HW_PSTATE		( 7*32+ 8) /* AMD HW-PState */
+ #define X86_FEATURE_PROC_FEEDBACK	( 7*32+ 9) /* AMD ProcFeedbackInterface */
+ #define X86_FEATURE_SME			( 7*32+10) /* AMD Secure Memory Encryption */
+@@ -207,13 +206,19 @@
+ #define X86_FEATURE_RETPOLINE_AMD	( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+ #define X86_FEATURE_INTEL_PPIN		( 7*32+14) /* Intel Processor Inventory Number */
+ #define X86_FEATURE_CDP_L2		( 7*32+15) /* Code and Data Prioritization L2 */
+-
++#define X86_FEATURE_MSR_SPEC_CTRL	( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
++#define X86_FEATURE_SSBD		( 7*32+17) /* Speculative Store Bypass Disable */
+ #define X86_FEATURE_MBA			( 7*32+18) /* Memory Bandwidth Allocation */
+ #define X86_FEATURE_RSB_CTXSW		( 7*32+19) /* "" Fill RSB on context switches */
+ #define X86_FEATURE_SEV			( 7*32+20) /* AMD Secure Encrypted Virtualization */
+-
+ #define X86_FEATURE_USE_IBPB		( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW		( 7*32+22) /* "" Use IBRS during runtime firmware calls */
++#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE	( 7*32+23) /* "" Disable Speculative Store Bypass. */
++#define X86_FEATURE_LS_CFG_SSBD		( 7*32+24)  /* "" AMD SSBD implementation via LS_CFG MSR */
++#define X86_FEATURE_IBRS		( 7*32+25) /* Indirect Branch Restricted Speculation */
++#define X86_FEATURE_IBPB		( 7*32+26) /* Indirect Branch Prediction Barrier */
++#define X86_FEATURE_STIBP		( 7*32+27) /* Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_ZEN			( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+ 
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW		( 8*32+ 0) /* Intel TPR Shadow */
+@@ -274,9 +279,10 @@
+ #define X86_FEATURE_CLZERO		(13*32+ 0) /* CLZERO instruction */
+ #define X86_FEATURE_IRPERF		(13*32+ 1) /* Instructions Retired Count */
+ #define X86_FEATURE_XSAVEERPTR		(13*32+ 2) /* Always save/restore FP error pointers */
+-#define X86_FEATURE_IBPB		(13*32+12) /* Indirect Branch Prediction Barrier */
+-#define X86_FEATURE_IBRS		(13*32+14) /* Indirect Branch Restricted Speculation */
+-#define X86_FEATURE_STIBP		(13*32+15) /* Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_AMD_IBPB		(13*32+12) /* "" Indirect Branch Prediction Barrier */
++#define X86_FEATURE_AMD_IBRS		(13*32+14) /* "" Indirect Branch Restricted Speculation */
++#define X86_FEATURE_AMD_STIBP		(13*32+15) /* "" Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_VIRT_SSBD		(13*32+25) /* Virtualized Speculative Store Bypass Disable */
+ 
+ /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
+ #define X86_FEATURE_DTHERM		(14*32+ 0) /* Digital Thermal Sensor */
+@@ -333,6 +339,7 @@
+ #define X86_FEATURE_SPEC_CTRL		(18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP		(18*32+27) /* "" Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ARCH_CAPABILITIES	(18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
++#define X86_FEATURE_SPEC_CTRL_SSBD	(18*32+31) /* "" Speculative Store Bypass Disable */
+ 
+ /*
+  * BUG word(s)
+@@ -362,5 +369,6 @@
+ #define X86_BUG_CPU_MELTDOWN		X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+ #define X86_BUG_SPECTRE_V1		X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+ #define X86_BUG_SPECTRE_V2		X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
++#define X86_BUG_SPEC_STORE_BYPASS	X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+ 
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index b605a5b6a30c..4b0539a52c4c 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -933,7 +933,7 @@ struct kvm_x86_ops {
+ 	int (*hardware_setup)(void);               /* __init */
+ 	void (*hardware_unsetup)(void);            /* __exit */
+ 	bool (*cpu_has_accelerated_tpr)(void);
+-	bool (*cpu_has_high_real_mode_segbase)(void);
++	bool (*has_emulated_msr)(int index);
+ 	void (*cpuid_update)(struct kvm_vcpu *vcpu);
+ 
+ 	int (*vm_init)(struct kvm *kvm);
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index 1de72ce514cd..ed97ef3b48a7 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -192,7 +192,7 @@ static inline int init_new_context(struct task_struct *tsk,
+ 
+ #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+ 	if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
+-		/* pkey 0 is the default and always allocated */
++		/* pkey 0 is the default and allocated implicitly */
+ 		mm->context.pkey_allocation_map = 0x1;
+ 		/* -1 means unallocated or invalid */
+ 		mm->context.execute_only_pkey = -1;
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index c9084dedfcfa..1fce70c0f799 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -42,6 +42,8 @@
+ #define MSR_IA32_SPEC_CTRL		0x00000048 /* Speculation Control */
+ #define SPEC_CTRL_IBRS			(1 << 0)   /* Indirect Branch Restricted Speculation */
+ #define SPEC_CTRL_STIBP			(1 << 1)   /* Single Thread Indirect Branch Predictors */
++#define SPEC_CTRL_SSBD_SHIFT		2	   /* Speculative Store Bypass Disable bit */
++#define SPEC_CTRL_SSBD			(1 << SPEC_CTRL_SSBD_SHIFT)   /* Speculative Store Bypass Disable */
+ 
+ #define MSR_IA32_PRED_CMD		0x00000049 /* Prediction Command */
+ #define PRED_CMD_IBPB			(1 << 0)   /* Indirect Branch Prediction Barrier */
+@@ -68,6 +70,11 @@
+ #define MSR_IA32_ARCH_CAPABILITIES	0x0000010a
+ #define ARCH_CAP_RDCL_NO		(1 << 0)   /* Not susceptible to Meltdown */
+ #define ARCH_CAP_IBRS_ALL		(1 << 1)   /* Enhanced IBRS support */
++#define ARCH_CAP_SSB_NO			(1 << 4)   /*
++						    * Not susceptible to Speculative Store Bypass
++						    * attack, so no Speculative Store Bypass
++						    * control required.
++						    */
+ 
+ #define MSR_IA32_BBL_CR_CTL		0x00000119
+ #define MSR_IA32_BBL_CR_CTL3		0x0000011e
+@@ -340,6 +347,8 @@
+ #define MSR_AMD64_SEV_ENABLED_BIT	0
+ #define MSR_AMD64_SEV_ENABLED		BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
+ 
++#define MSR_AMD64_VIRT_SPEC_CTRL	0xc001011f
++
+ /* Fam 17h MSRs */
+ #define MSR_F17H_IRPERF			0xc00000e9
+ 
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index f928ad9b143f..8b38df98548e 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -217,6 +217,14 @@ enum spectre_v2_mitigation {
+ 	SPECTRE_V2_IBRS,
+ };
+ 
++/* The Speculative Store Bypass disable variants */
++enum ssb_mitigation {
++	SPEC_STORE_BYPASS_NONE,
++	SPEC_STORE_BYPASS_DISABLE,
++	SPEC_STORE_BYPASS_PRCTL,
++	SPEC_STORE_BYPASS_SECCOMP,
++};
++
+ extern char __indirect_thunk_start[];
+ extern char __indirect_thunk_end[];
+ 
+@@ -241,22 +249,27 @@ static inline void vmexit_fill_RSB(void)
+ #endif
+ }
+ 
+-#define alternative_msr_write(_msr, _val, _feature)		\
+-	asm volatile(ALTERNATIVE("",				\
+-				 "movl %[msr], %%ecx\n\t"	\
+-				 "movl %[val], %%eax\n\t"	\
+-				 "movl $0, %%edx\n\t"		\
+-				 "wrmsr",			\
+-				 _feature)			\
+-		     : : [msr] "i" (_msr), [val] "i" (_val)	\
+-		     : "eax", "ecx", "edx", "memory")
++static __always_inline
++void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
++{
++	asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
++		: : "c" (msr),
++		    "a" ((u32)val),
++		    "d" ((u32)(val >> 32)),
++		    [feature] "i" (feature)
++		: "memory");
++}
+ 
+ static inline void indirect_branch_prediction_barrier(void)
+ {
+-	alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
+-			      X86_FEATURE_USE_IBPB);
++	u64 val = PRED_CMD_IBPB;
++
++	alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
+ }
+ 
++/* The Intel SPEC CTRL MSR base value cache */
++extern u64 x86_spec_ctrl_base;
++
+ /*
+  * With retpoline, we must use IBRS to restrict branch prediction
+  * before calling into firmware.
+@@ -265,14 +278,18 @@ static inline void indirect_branch_prediction_barrier(void)
+  */
+ #define firmware_restrict_branch_speculation_start()			\
+ do {									\
++	u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS;			\
++									\
+ 	preempt_disable();						\
+-	alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS,	\
++	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
+ 			      X86_FEATURE_USE_IBRS_FW);			\
+ } while (0)
+ 
+ #define firmware_restrict_branch_speculation_end()			\
+ do {									\
+-	alternative_msr_write(MSR_IA32_SPEC_CTRL, 0,			\
++	u64 val = x86_spec_ctrl_base;					\
++									\
++	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
+ 			      X86_FEATURE_USE_IBRS_FW);			\
+ 	preempt_enable();						\
+ } while (0)
+diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
+index a0ba1ffda0df..851c04b7a092 100644
+--- a/arch/x86/include/asm/pkeys.h
++++ b/arch/x86/include/asm/pkeys.h
+@@ -2,6 +2,8 @@
+ #ifndef _ASM_X86_PKEYS_H
+ #define _ASM_X86_PKEYS_H
+ 
++#define ARCH_DEFAULT_PKEY	0
++
+ #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
+ 
+ extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+@@ -15,7 +17,7 @@ extern int __execute_only_pkey(struct mm_struct *mm);
+ static inline int execute_only_pkey(struct mm_struct *mm)
+ {
+ 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
+-		return 0;
++		return ARCH_DEFAULT_PKEY;
+ 
+ 	return __execute_only_pkey(mm);
+ }
+@@ -49,13 +51,21 @@ bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
+ {
+ 	/*
+ 	 * "Allocated" pkeys are those that have been returned
+-	 * from pkey_alloc().  pkey 0 is special, and never
+-	 * returned from pkey_alloc().
++	 * from pkey_alloc() or pkey 0 which is allocated
++	 * implicitly when the mm is created.
+ 	 */
+-	if (pkey <= 0)
++	if (pkey < 0)
+ 		return false;
+ 	if (pkey >= arch_max_pkey())
+ 		return false;
++	/*
++	 * The exec-only pkey is set in the allocation map, but
++	 * is not available to any of the user interfaces like
++	 * mprotect_pkey().
++	 */
++	if (pkey == mm->context.execute_only_pkey)
++		return false;
++
+ 	return mm_pkey_allocation_map(mm) & (1U << pkey);
+ }
+ 
+diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
+new file mode 100644
+index 000000000000..ae7c2c5cd7f0
+--- /dev/null
++++ b/arch/x86/include/asm/spec-ctrl.h
+@@ -0,0 +1,80 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_X86_SPECCTRL_H_
++#define _ASM_X86_SPECCTRL_H_
++
++#include <linux/thread_info.h>
++#include <asm/nospec-branch.h>
++
++/*
++ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
++ * the guest has, while on VMEXIT we restore the host view. This
++ * would be easier if SPEC_CTRL were architecturally maskable or
++ * shadowable for guests but this is not (currently) the case.
++ * Takes the guest view of SPEC_CTRL MSR as a parameter and also
++ * the guest's version of VIRT_SPEC_CTRL, if emulated.
++ */
++extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
++
++/**
++ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
++ * @guest_spec_ctrl:		The guest content of MSR_SPEC_CTRL
++ * @guest_virt_spec_ctrl:	The guest controlled bits of MSR_VIRT_SPEC_CTRL
++ *				(may get translated to MSR_AMD64_LS_CFG bits)
++ *
++ * Avoids writing to the MSR if the content/bits are the same
++ */
++static inline
++void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
++{
++	x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
++}
++
++/**
++ * x86_spec_ctrl_restore_host - Restore host speculation control registers
++ * @guest_spec_ctrl:		The guest content of MSR_SPEC_CTRL
++ * @guest_virt_spec_ctrl:	The guest controlled bits of MSR_VIRT_SPEC_CTRL
++ *				(may get translated to MSR_AMD64_LS_CFG bits)
++ *
++ * Avoids writing to the MSR if the content/bits are the same
++ */
++static inline
++void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
++{
++	x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
++}
++
++/* AMD specific Speculative Store Bypass MSR data */
++extern u64 x86_amd_ls_cfg_base;
++extern u64 x86_amd_ls_cfg_ssbd_mask;
++
++static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
++{
++	BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
++	return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
++}
++
++static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
++{
++	BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
++	return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
++}
++
++static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
++{
++	return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
++}
++
++#ifdef CONFIG_SMP
++extern void speculative_store_bypass_ht_init(void);
++#else
++static inline void speculative_store_bypass_ht_init(void) { }
++#endif
++
++extern void speculative_store_bypass_update(unsigned long tif);
++
++static inline void speculative_store_bypass_update_current(void)
++{
++	speculative_store_bypass_update(current_thread_info()->flags);
++}
++
++#endif
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index a5d9521bb2cb..2ff2a30a264f 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -79,6 +79,7 @@ struct thread_info {
+ #define TIF_SIGPENDING		2	/* signal pending */
+ #define TIF_NEED_RESCHED	3	/* rescheduling necessary */
+ #define TIF_SINGLESTEP		4	/* reenable singlestep on user return*/
++#define TIF_SSBD			5	/* Reduced data speculation */
+ #define TIF_SYSCALL_EMU		6	/* syscall emulation active */
+ #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
+ #define TIF_SECCOMP		8	/* secure computing */
+@@ -105,6 +106,7 @@ struct thread_info {
+ #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
+ #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
+ #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
++#define _TIF_SSBD		(1 << TIF_SSBD)
+ #define _TIF_SYSCALL_EMU	(1 << TIF_SYSCALL_EMU)
+ #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
+ #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
+@@ -144,7 +146,7 @@ struct thread_info {
+ 
+ /* flags to check in __switch_to() */
+ #define _TIF_WORK_CTXSW							\
+-	(_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP)
++	(_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
+ 
+ #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index c88e0b127810..b481b95bd8f6 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -14,8 +14,11 @@
+ #include <asm/amd_nb.h>
+ 
+ #define PCI_DEVICE_ID_AMD_17H_ROOT	0x1450
++#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT	0x15d0
+ #define PCI_DEVICE_ID_AMD_17H_DF_F3	0x1463
+ #define PCI_DEVICE_ID_AMD_17H_DF_F4	0x1464
++#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
++#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
+ 
+ /* Protect the PCI config register pairs used for SMN and DF indirect access. */
+ static DEFINE_MUTEX(smn_mutex);
+@@ -24,6 +27,7 @@ static u32 *flush_words;
+ 
+ static const struct pci_device_id amd_root_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
+ 	{}
+ };
+ 
+@@ -39,6 +43,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
+ 	{}
+ };
+@@ -51,6 +56,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
++	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
+ 	{}
+ };
+diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
+index 8b04234e010b..7685444a106b 100644
+--- a/arch/x86/kernel/apic/x2apic_cluster.c
++++ b/arch/x86/kernel/apic/x2apic_cluster.c
+@@ -116,6 +116,7 @@ static void init_x2apic_ldr(void)
+ 			goto update;
+ 	}
+ 	cmsk = cluster_hotplug_mask;
++	cmsk->clusterid = cluster;
+ 	cluster_hotplug_mask = NULL;
+ update:
+ 	this_cpu_write(cluster_masks, cmsk);
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index f0e6456ca7d3..718fae428124 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -10,6 +10,7 @@
+ #include <asm/processor.h>
+ #include <asm/apic.h>
+ #include <asm/cpu.h>
++#include <asm/spec-ctrl.h>
+ #include <asm/smp.h>
+ #include <asm/pci-direct.h>
+ #include <asm/delay.h>
+@@ -554,6 +555,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
+ 		rdmsrl(MSR_FAM10H_NODE_ID, value);
+ 		nodes_per_socket = ((value >> 3) & 7) + 1;
+ 	}
++
++	if (c->x86 >= 0x15 && c->x86 <= 0x17) {
++		unsigned int bit;
++
++		switch (c->x86) {
++		case 0x15: bit = 54; break;
++		case 0x16: bit = 33; break;
++		case 0x17: bit = 10; break;
++		default: return;
++		}
++		/*
++		 * Try to cache the base value so further operations can
++		 * avoid RMW. If that faults, do not enable SSBD.
++		 */
++		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
++			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
++			setup_force_cpu_cap(X86_FEATURE_SSBD);
++			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
++		}
++	}
+ }
+ 
+ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
+@@ -791,6 +812,7 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
+ 
+ static void init_amd_zn(struct cpuinfo_x86 *c)
+ {
++	set_cpu_cap(c, X86_FEATURE_ZEN);
+ 	/*
+ 	 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
+ 	 * all up to and including B1.
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index bfca937bdcc3..7416fc206b4a 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -12,8 +12,10 @@
+ #include <linux/utsname.h>
+ #include <linux/cpu.h>
+ #include <linux/module.h>
++#include <linux/nospec.h>
++#include <linux/prctl.h>
+ 
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+ #include <asm/cmdline.h>
+ #include <asm/bugs.h>
+ #include <asm/processor.h>
+@@ -27,6 +29,27 @@
+ #include <asm/intel-family.h>
+ 
+ static void __init spectre_v2_select_mitigation(void);
++static void __init ssb_select_mitigation(void);
++
++/*
++ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
++ * writes to SPEC_CTRL contain whatever reserved bits have been set.
++ */
++u64 __ro_after_init x86_spec_ctrl_base;
++EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
++
++/*
++ * The vendor and possibly platform specific bits which can be modified in
++ * x86_spec_ctrl_base.
++ */
++static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
++
++/*
++ * AMD specific MSR info for Speculative Store Bypass control.
++ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
++ */
++u64 __ro_after_init x86_amd_ls_cfg_base;
++u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
+ 
+ void __init check_bugs(void)
+ {
+@@ -37,9 +60,27 @@ void __init check_bugs(void)
+ 		print_cpu_info(&boot_cpu_data);
+ 	}
+ 
++	/*
++	 * Read the SPEC_CTRL MSR to account for reserved bits which may
++	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
++	 * init code as it is not enumerated and depends on the family.
++	 */
++	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
++		rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++
++	/* Allow STIBP in MSR_SPEC_CTRL if supported */
++	if (boot_cpu_has(X86_FEATURE_STIBP))
++		x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
++
+ 	/* Select the proper spectre mitigation before patching alternatives */
+ 	spectre_v2_select_mitigation();
+ 
++	/*
++	 * Select proper mitigation for any exposure to the Speculative Store
++	 * Bypass vulnerability.
++	 */
++	ssb_select_mitigation();
++
+ #ifdef CONFIG_X86_32
+ 	/*
+ 	 * Check whether we are able to run this kernel safely on SMP.
+@@ -93,7 +134,76 @@ static const char *spectre_v2_strings[] = {
+ #undef pr_fmt
+ #define pr_fmt(fmt)     "Spectre V2 : " fmt
+ 
+-static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
++static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
++	SPECTRE_V2_NONE;
++
++void
++x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
++{
++	u64 msrval, guestval, hostval = x86_spec_ctrl_base;
++	struct thread_info *ti = current_thread_info();
++
++	/* Is MSR_SPEC_CTRL implemented ? */
++	if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
++		/*
++		 * Restrict guest_spec_ctrl to supported values. Clear the
++		 * modifiable bits in the host base value and or the
++		 * modifiable bits from the guest value.
++		 */
++		guestval = hostval & ~x86_spec_ctrl_mask;
++		guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
++
++		/* SSBD controlled in MSR_SPEC_CTRL */
++		if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
++			hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
++
++		if (hostval != guestval) {
++			msrval = setguest ? guestval : hostval;
++			wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
++		}
++	}
++
++	/*
++	 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
++	 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
++	 */
++	if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
++	    !static_cpu_has(X86_FEATURE_VIRT_SSBD))
++		return;
++
++	/*
++	 * If the host has SSBD mitigation enabled, force it in the host's
++	 * virtual MSR value. If its not permanently enabled, evaluate
++	 * current's TIF_SSBD thread flag.
++	 */
++	if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
++		hostval = SPEC_CTRL_SSBD;
++	else
++		hostval = ssbd_tif_to_spec_ctrl(ti->flags);
++
++	/* Sanitize the guest value */
++	guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
++
++	if (hostval != guestval) {
++		unsigned long tif;
++
++		tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
++				 ssbd_spec_ctrl_to_tif(hostval);
++
++		speculative_store_bypass_update(tif);
++	}
++}
++EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
++
++static void x86_amd_ssb_disable(void)
++{
++	u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
++
++	if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
++		wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
++	else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
++		wrmsrl(MSR_AMD64_LS_CFG, msrval);
++}
+ 
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+@@ -312,32 +422,289 @@ static void __init spectre_v2_select_mitigation(void)
+ }
+ 
+ #undef pr_fmt
++#define pr_fmt(fmt)	"Speculative Store Bypass: " fmt
++
++static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
++
++/* The kernel command line selection */
++enum ssb_mitigation_cmd {
++	SPEC_STORE_BYPASS_CMD_NONE,
++	SPEC_STORE_BYPASS_CMD_AUTO,
++	SPEC_STORE_BYPASS_CMD_ON,
++	SPEC_STORE_BYPASS_CMD_PRCTL,
++	SPEC_STORE_BYPASS_CMD_SECCOMP,
++};
++
++static const char *ssb_strings[] = {
++	[SPEC_STORE_BYPASS_NONE]	= "Vulnerable",
++	[SPEC_STORE_BYPASS_DISABLE]	= "Mitigation: Speculative Store Bypass disabled",
++	[SPEC_STORE_BYPASS_PRCTL]	= "Mitigation: Speculative Store Bypass disabled via prctl",
++	[SPEC_STORE_BYPASS_SECCOMP]	= "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
++};
++
++static const struct {
++	const char *option;
++	enum ssb_mitigation_cmd cmd;
++} ssb_mitigation_options[] = {
++	{ "auto",	SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
++	{ "on",		SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
++	{ "off",	SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
++	{ "prctl",	SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
++	{ "seccomp",	SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
++};
++
++static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
++{
++	enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
++	char arg[20];
++	int ret, i;
++
++	if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
++		return SPEC_STORE_BYPASS_CMD_NONE;
++	} else {
++		ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
++					  arg, sizeof(arg));
++		if (ret < 0)
++			return SPEC_STORE_BYPASS_CMD_AUTO;
++
++		for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
++			if (!match_option(arg, ret, ssb_mitigation_options[i].option))
++				continue;
++
++			cmd = ssb_mitigation_options[i].cmd;
++			break;
++		}
++
++		if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
++			pr_err("unknown option (%s). Switching to AUTO select\n", arg);
++			return SPEC_STORE_BYPASS_CMD_AUTO;
++		}
++	}
++
++	return cmd;
++}
++
++static enum ssb_mitigation __init __ssb_select_mitigation(void)
++{
++	enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
++	enum ssb_mitigation_cmd cmd;
++
++	if (!boot_cpu_has(X86_FEATURE_SSBD))
++		return mode;
++
++	cmd = ssb_parse_cmdline();
++	if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
++	    (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
++	     cmd == SPEC_STORE_BYPASS_CMD_AUTO))
++		return mode;
++
++	switch (cmd) {
++	case SPEC_STORE_BYPASS_CMD_AUTO:
++	case SPEC_STORE_BYPASS_CMD_SECCOMP:
++		/*
++		 * Choose prctl+seccomp as the default mode if seccomp is
++		 * enabled.
++		 */
++		if (IS_ENABLED(CONFIG_SECCOMP))
++			mode = SPEC_STORE_BYPASS_SECCOMP;
++		else
++			mode = SPEC_STORE_BYPASS_PRCTL;
++		break;
++	case SPEC_STORE_BYPASS_CMD_ON:
++		mode = SPEC_STORE_BYPASS_DISABLE;
++		break;
++	case SPEC_STORE_BYPASS_CMD_PRCTL:
++		mode = SPEC_STORE_BYPASS_PRCTL;
++		break;
++	case SPEC_STORE_BYPASS_CMD_NONE:
++		break;
++	}
++
++	/*
++	 * We have three CPU feature flags that are in play here:
++	 *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
++	 *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
++	 *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
++	 */
++	if (mode == SPEC_STORE_BYPASS_DISABLE) {
++		setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
++		/*
++		 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
++		 * a completely different MSR and bit dependent on family.
++		 */
++		switch (boot_cpu_data.x86_vendor) {
++		case X86_VENDOR_INTEL:
++			x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
++			x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
++			wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++			break;
++		case X86_VENDOR_AMD:
++			x86_amd_ssb_disable();
++			break;
++		}
++	}
++
++	return mode;
++}
++
++static void ssb_select_mitigation(void)
++{
++	ssb_mode = __ssb_select_mitigation();
++
++	if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
++		pr_info("%s\n", ssb_strings[ssb_mode]);
++}
++
++#undef pr_fmt
++#define pr_fmt(fmt)     "Speculation prctl: " fmt
++
++static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
++{
++	bool update;
++
++	if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
++	    ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
++		return -ENXIO;
++
++	switch (ctrl) {
++	case PR_SPEC_ENABLE:
++		/* If speculation is force disabled, enable is not allowed */
++		if (task_spec_ssb_force_disable(task))
++			return -EPERM;
++		task_clear_spec_ssb_disable(task);
++		update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
++		break;
++	case PR_SPEC_DISABLE:
++		task_set_spec_ssb_disable(task);
++		update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
++		break;
++	case PR_SPEC_FORCE_DISABLE:
++		task_set_spec_ssb_disable(task);
++		task_set_spec_ssb_force_disable(task);
++		update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
++		break;
++	default:
++		return -ERANGE;
++	}
++
++	/*
++	 * If being set on non-current task, delay setting the CPU
++	 * mitigation until it is next scheduled.
++	 */
++	if (task == current && update)
++		speculative_store_bypass_update_current();
++
++	return 0;
++}
++
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++			     unsigned long ctrl)
++{
++	switch (which) {
++	case PR_SPEC_STORE_BYPASS:
++		return ssb_prctl_set(task, ctrl);
++	default:
++		return -ENODEV;
++	}
++}
++
++#ifdef CONFIG_SECCOMP
++void arch_seccomp_spec_mitigate(struct task_struct *task)
++{
++	if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
++		ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
++}
++#endif
++
++static int ssb_prctl_get(struct task_struct *task)
++{
++	switch (ssb_mode) {
++	case SPEC_STORE_BYPASS_DISABLE:
++		return PR_SPEC_DISABLE;
++	case SPEC_STORE_BYPASS_SECCOMP:
++	case SPEC_STORE_BYPASS_PRCTL:
++		if (task_spec_ssb_force_disable(task))
++			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
++		if (task_spec_ssb_disable(task))
++			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
++		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
++	default:
++		if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
++			return PR_SPEC_ENABLE;
++		return PR_SPEC_NOT_AFFECTED;
++	}
++}
++
++int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
++{
++	switch (which) {
++	case PR_SPEC_STORE_BYPASS:
++		return ssb_prctl_get(task);
++	default:
++		return -ENODEV;
++	}
++}
++
++void x86_spec_ctrl_setup_ap(void)
++{
++	if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
++		wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
++
++	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
++		x86_amd_ssb_disable();
++}
+ 
+ #ifdef CONFIG_SYSFS
+-ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
++
++static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
++			       char *buf, unsigned int bug)
+ {
+-	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
++	if (!boot_cpu_has_bug(bug))
+ 		return sprintf(buf, "Not affected\n");
+-	if (boot_cpu_has(X86_FEATURE_PTI))
+-		return sprintf(buf, "Mitigation: PTI\n");
++
++	switch (bug) {
++	case X86_BUG_CPU_MELTDOWN:
++		if (boot_cpu_has(X86_FEATURE_PTI))
++			return sprintf(buf, "Mitigation: PTI\n");
++
++		break;
++
++	case X86_BUG_SPECTRE_V1:
++		return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++
++	case X86_BUG_SPECTRE_V2:
++		return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
++			       boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
++			       boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
++			       spectre_v2_module_string());
++
++	case X86_BUG_SPEC_STORE_BYPASS:
++		return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
++
++	default:
++		break;
++	}
++
+ 	return sprintf(buf, "Vulnerable\n");
+ }
+ 
++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
++}
++
+ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+-	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
+-		return sprintf(buf, "Not affected\n");
+-	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
+ }
+ 
+ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+-	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+-		return sprintf(buf, "Not affected\n");
++	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
++}
+ 
+-	return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+-		       boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+-		       boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+-		       spectre_v2_module_string());
++ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
+ }
+ #endif
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 348cf4821240..5f74f94244e1 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -757,17 +757,32 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
+ 	 * and they also have a different bit for STIBP support. Also,
+ 	 * a hypervisor might have set the individual AMD bits even on
+ 	 * Intel CPUs, for finer-grained selection of what's available.
+-	 *
+-	 * We use the AMD bits in 0x8000_0008 EBX as the generic hardware
+-	 * features, which are visible in /proc/cpuinfo and used by the
+-	 * kernel. So set those accordingly from the Intel bits.
+ 	 */
+ 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
+ 		set_cpu_cap(c, X86_FEATURE_IBRS);
+ 		set_cpu_cap(c, X86_FEATURE_IBPB);
++		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ 	}
++
+ 	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ 		set_cpu_cap(c, X86_FEATURE_STIBP);
++
++	if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
++	    cpu_has(c, X86_FEATURE_VIRT_SSBD))
++		set_cpu_cap(c, X86_FEATURE_SSBD);
++
++	if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
++		set_cpu_cap(c, X86_FEATURE_IBRS);
++		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
++	}
++
++	if (cpu_has(c, X86_FEATURE_AMD_IBPB))
++		set_cpu_cap(c, X86_FEATURE_IBPB);
++
++	if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
++		set_cpu_cap(c, X86_FEATURE_STIBP);
++		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
++	}
+ }
+ 
+ void get_cpu_cap(struct cpuinfo_x86 *c)
+@@ -918,21 +933,55 @@ static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
+ 	{}
+ };
+ 
+-static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
++static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
++	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_PINEVIEW	},
++	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_LINCROFT	},
++	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_PENWELL		},
++	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_CLOVERVIEW	},
++	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_CEDARVIEW	},
++	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT1	},
++	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_AIRMONT		},
++	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT2	},
++	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MERRIFIELD	},
++	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_CORE_YONAH		},
++	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNL		},
++	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNM		},
++	{ X86_VENDOR_CENTAUR,	5,					},
++	{ X86_VENDOR_INTEL,	5,					},
++	{ X86_VENDOR_NSC,	5,					},
++	{ X86_VENDOR_AMD,	0x12,					},
++	{ X86_VENDOR_AMD,	0x11,					},
++	{ X86_VENDOR_AMD,	0x10,					},
++	{ X86_VENDOR_AMD,	0xf,					},
++	{ X86_VENDOR_ANY,	4,					},
++	{}
++};
++
++static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ 	u64 ia32_cap = 0;
+ 
+-	if (x86_match_cpu(cpu_no_meltdown))
+-		return false;
+-
+ 	if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ 		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+ 
++	if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
++	   !(ia32_cap & ARCH_CAP_SSB_NO))
++		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
++
++	if (x86_match_cpu(cpu_no_speculation))
++		return;
++
++	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
++	setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
++
++	if (x86_match_cpu(cpu_no_meltdown))
++		return;
++
+ 	/* Rogue Data Cache Load? No! */
+ 	if (ia32_cap & ARCH_CAP_RDCL_NO)
+-		return false;
++		return;
+ 
+-	return true;
++	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+ }
+ 
+ /*
+@@ -982,12 +1031,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
+ 
+ 	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
+ 
+-	if (!x86_match_cpu(cpu_no_speculation)) {
+-		if (cpu_vulnerable_to_meltdown(c))
+-			setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+-		setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+-		setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+-	}
++	cpu_set_bug_bits(c);
+ 
+ 	fpu__init_system(c);
+ 
+@@ -1347,6 +1391,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
+ #endif
+ 	mtrr_ap_init();
+ 	validate_apic_and_package_id(c);
++	x86_spec_ctrl_setup_ap();
+ }
+ 
+ static __init int setup_noclflush(char *arg)
+diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
+index e806b11a99af..37672d299e35 100644
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -50,4 +50,6 @@ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+ 
+ unsigned int aperfmperf_get_khz(int cpu);
+ 
++extern void x86_spec_ctrl_setup_ap(void);
++
+ #endif /* ARCH_X86_CPU_H */
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index c3af167d0a70..c895f38a7a5e 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -188,7 +188,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)
+ 		setup_clear_cpu_cap(X86_FEATURE_IBPB);
+ 		setup_clear_cpu_cap(X86_FEATURE_STIBP);
+ 		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
++		setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
+ 		setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
++		setup_clear_cpu_cap(X86_FEATURE_SSBD);
++		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
+ 	}
+ 
+ 	/*
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 03408b942adb..30ca2d1a9231 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -38,6 +38,7 @@
+ #include <asm/switch_to.h>
+ #include <asm/desc.h>
+ #include <asm/prctl.h>
++#include <asm/spec-ctrl.h>
+ 
+ /*
+  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+@@ -278,6 +279,148 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
+ 	}
+ }
+ 
++#ifdef CONFIG_SMP
++
++struct ssb_state {
++	struct ssb_state	*shared_state;
++	raw_spinlock_t		lock;
++	unsigned int		disable_state;
++	unsigned long		local_state;
++};
++
++#define LSTATE_SSB	0
++
++static DEFINE_PER_CPU(struct ssb_state, ssb_state);
++
++void speculative_store_bypass_ht_init(void)
++{
++	struct ssb_state *st = this_cpu_ptr(&ssb_state);
++	unsigned int this_cpu = smp_processor_id();
++	unsigned int cpu;
++
++	st->local_state = 0;
++
++	/*
++	 * Shared state setup happens once on the first bringup
++	 * of the CPU. It's not destroyed on CPU hotunplug.
++	 */
++	if (st->shared_state)
++		return;
++
++	raw_spin_lock_init(&st->lock);
++
++	/*
++	 * Go over HT siblings and check whether one of them has set up the
++	 * shared state pointer already.
++	 */
++	for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
++		if (cpu == this_cpu)
++			continue;
++
++		if (!per_cpu(ssb_state, cpu).shared_state)
++			continue;
++
++		/* Link it to the state of the sibling: */
++		st->shared_state = per_cpu(ssb_state, cpu).shared_state;
++		return;
++	}
++
++	/*
++	 * First HT sibling to come up on the core.  Link shared state of
++	 * the first HT sibling to itself. The siblings on the same core
++	 * which come up later will see the shared state pointer and link
++	 * themself to the state of this CPU.
++	 */
++	st->shared_state = st;
++}
++
++/*
++ * Logic is: First HT sibling enables SSBD for both siblings in the core
++ * and last sibling to disable it, disables it for the whole core. This how
++ * MSR_SPEC_CTRL works in "hardware":
++ *
++ *  CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
++ */
++static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
++{
++	struct ssb_state *st = this_cpu_ptr(&ssb_state);
++	u64 msr = x86_amd_ls_cfg_base;
++
++	if (!static_cpu_has(X86_FEATURE_ZEN)) {
++		msr |= ssbd_tif_to_amd_ls_cfg(tifn);
++		wrmsrl(MSR_AMD64_LS_CFG, msr);
++		return;
++	}
++
++	if (tifn & _TIF_SSBD) {
++		/*
++		 * Since this can race with prctl(), block reentry on the
++		 * same CPU.
++		 */
++		if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
++			return;
++
++		msr |= x86_amd_ls_cfg_ssbd_mask;
++
++		raw_spin_lock(&st->shared_state->lock);
++		/* First sibling enables SSBD: */
++		if (!st->shared_state->disable_state)
++			wrmsrl(MSR_AMD64_LS_CFG, msr);
++		st->shared_state->disable_state++;
++		raw_spin_unlock(&st->shared_state->lock);
++	} else {
++		if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
++			return;
++
++		raw_spin_lock(&st->shared_state->lock);
++		st->shared_state->disable_state--;
++		if (!st->shared_state->disable_state)
++			wrmsrl(MSR_AMD64_LS_CFG, msr);
++		raw_spin_unlock(&st->shared_state->lock);
++	}
++}
++#else
++static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
++{
++	u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
++
++	wrmsrl(MSR_AMD64_LS_CFG, msr);
++}
++#endif
++
++static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
++{
++	/*
++	 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
++	 * so ssbd_tif_to_spec_ctrl() just works.
++	 */
++	wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
++}
++
++static __always_inline void intel_set_ssb_state(unsigned long tifn)
++{
++	u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
++
++	wrmsrl(MSR_IA32_SPEC_CTRL, msr);
++}
++
++static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
++{
++	if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
++		amd_set_ssb_virt_state(tifn);
++	else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
++		amd_set_core_ssb_state(tifn);
++	else
++		intel_set_ssb_state(tifn);
++}
++
++void speculative_store_bypass_update(unsigned long tif)
++{
++	preempt_disable();
++	__speculative_store_bypass_update(tif);
++	preempt_enable();
++}
++
+ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ 		      struct tss_struct *tss)
+ {
+@@ -309,6 +452,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ 
+ 	if ((tifp ^ tifn) & _TIF_NOCPUID)
+ 		set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
++
++	if ((tifp ^ tifn) & _TIF_SSBD)
++		__speculative_store_bypass_update(tifn);
+ }
+ 
+ /*
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 9eb448c7859d..fa093b77689f 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -528,6 +528,7 @@ void set_personality_64bit(void)
+ 	clear_thread_flag(TIF_X32);
+ 	/* Pretend that this comes from a 64bit execve */
+ 	task_pt_regs(current)->orig_ax = __NR_execve;
++	current_thread_info()->status &= ~TS_COMPAT;
+ 
+ 	/* Ensure the corresponding mm is not marked. */
+ 	if (current->mm)
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 12599e55e040..d50bc80f5172 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -77,6 +77,7 @@
+ #include <asm/i8259.h>
+ #include <asm/misc.h>
+ #include <asm/qspinlock.h>
++#include <asm/spec-ctrl.h>
+ 
+ /* Number of siblings per CPU package */
+ int smp_num_siblings = 1;
+@@ -242,6 +243,8 @@ static void notrace start_secondary(void *unused)
+ 	 */
+ 	check_tsc_sync_target();
+ 
++	speculative_store_bypass_ht_init();
++
+ 	/*
+ 	 * Lock vector_lock, set CPU online and bring the vector
+ 	 * allocator online. Online must be set with vector_lock held
+@@ -1257,6 +1260,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
+ 	set_mtrr_aps_delayed_init();
+ 
+ 	smp_quirk_init_udelay();
++
++	speculative_store_bypass_ht_init();
+ }
+ 
+ void arch_enable_nonboot_cpus_begin(void)
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index b671fc2d0422..3f400004f602 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -374,7 +374,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 
+ 	/* cpuid 0x80000008.ebx */
+ 	const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+-		F(IBPB) | F(IBRS);
++		F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
+ 
+ 	/* cpuid 0xC0000001.edx */
+ 	const u32 kvm_cpuid_C000_0001_edx_x86_features =
+@@ -402,7 +402,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 
+ 	/* cpuid 7.0.edx*/
+ 	const u32 kvm_cpuid_7_0_edx_x86_features =
+-		F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
++		F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | F(SSBD) |
+ 		F(ARCH_CAPABILITIES);
+ 
+ 	/* all calls to cpuid_count() should be made on the same cpu */
+@@ -642,13 +642,20 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 			g_phys_as = phys_as;
+ 		entry->eax = g_phys_as | (virt_as << 8);
+ 		entry->edx = 0;
+-		/* IBRS and IBPB aren't necessarily present in hardware cpuid */
+-		if (boot_cpu_has(X86_FEATURE_IBPB))
+-			entry->ebx |= F(IBPB);
+-		if (boot_cpu_has(X86_FEATURE_IBRS))
+-			entry->ebx |= F(IBRS);
++		/*
++		 * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
++		 * hardware cpuid
++		 */
++		if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
++			entry->ebx |= F(AMD_IBPB);
++		if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
++			entry->ebx |= F(AMD_IBRS);
++		if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
++			entry->ebx |= F(VIRT_SSBD);
+ 		entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
+ 		cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
++		if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
++			entry->ebx |= F(VIRT_SSBD);
+ 		break;
+ 	}
+ 	case 0x80000019:
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index be9c839e2c89..dbbd762359a9 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -50,7 +50,7 @@
+ #include <asm/kvm_para.h>
+ #include <asm/irq_remapping.h>
+ #include <asm/microcode.h>
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+ 
+ #include <asm/virtext.h>
+ #include "trace.h"
+@@ -192,6 +192,12 @@ struct vcpu_svm {
+ 	} host;
+ 
+ 	u64 spec_ctrl;
++	/*
++	 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
++	 * translated into the appropriate L2_CFG bits on the host to
++	 * perform speculative control.
++	 */
++	u64 virt_spec_ctrl;
+ 
+ 	u32 *msrpm;
+ 
+@@ -1910,6 +1916,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+ 
+ 	vcpu->arch.microcode_version = 0x01000065;
+ 	svm->spec_ctrl = 0;
++	svm->virt_spec_ctrl = 0;
+ 
+ 	if (!init_event) {
+ 		svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
+@@ -3959,11 +3966,18 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		break;
+ 	case MSR_IA32_SPEC_CTRL:
+ 		if (!msr_info->host_initiated &&
+-		    !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
++		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
+ 			return 1;
+ 
+ 		msr_info->data = svm->spec_ctrl;
+ 		break;
++	case MSR_AMD64_VIRT_SPEC_CTRL:
++		if (!msr_info->host_initiated &&
++		    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
++			return 1;
++
++		msr_info->data = svm->virt_spec_ctrl;
++		break;
+ 	case MSR_F15H_IC_CFG: {
+ 
+ 		int family, model;
+@@ -4057,7 +4071,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ 		break;
+ 	case MSR_IA32_SPEC_CTRL:
+ 		if (!msr->host_initiated &&
+-		    !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
++		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
+ 			return 1;
+ 
+ 		/* The STIBP bit doesn't fault even if it's not advertised */
+@@ -4084,7 +4098,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ 		break;
+ 	case MSR_IA32_PRED_CMD:
+ 		if (!msr->host_initiated &&
+-		    !guest_cpuid_has(vcpu, X86_FEATURE_IBPB))
++		    !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
+ 			return 1;
+ 
+ 		if (data & ~PRED_CMD_IBPB)
+@@ -4098,6 +4112,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ 			break;
+ 		set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
+ 		break;
++	case MSR_AMD64_VIRT_SPEC_CTRL:
++		if (!msr->host_initiated &&
++		    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
++			return 1;
++
++		if (data & ~SPEC_CTRL_SSBD)
++			return 1;
++
++		svm->virt_spec_ctrl = data;
++		break;
+ 	case MSR_STAR:
+ 		svm->vmcb->save.star = data;
+ 		break;
+@@ -5401,8 +5425,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ 	 * is no need to worry about the conditional branch over the wrmsr
+ 	 * being speculatively taken.
+ 	 */
+-	if (svm->spec_ctrl)
+-		native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
++	x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
+ 
+ 	asm volatile (
+ 		"push %%" _ASM_BP "; \n\t"
+@@ -5496,6 +5519,18 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ #endif
+ 		);
+ 
++	/* Eliminate branch target predictions from guest mode */
++	vmexit_fill_RSB();
++
++#ifdef CONFIG_X86_64
++	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
++#else
++	loadsegment(fs, svm->host.fs);
++#ifndef CONFIG_X86_32_LAZY_GS
++	loadsegment(gs, svm->host.gs);
++#endif
++#endif
++
+ 	/*
+ 	 * We do not use IBRS in the kernel. If this vCPU has used the
+ 	 * SPEC_CTRL MSR it may have left it on; save the value and
+@@ -5514,20 +5549,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ 	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ 		svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+ 
+-	if (svm->spec_ctrl)
+-		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+-
+-	/* Eliminate branch target predictions from guest mode */
+-	vmexit_fill_RSB();
+-
+-#ifdef CONFIG_X86_64
+-	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+-#else
+-	loadsegment(fs, svm->host.fs);
+-#ifndef CONFIG_X86_32_LAZY_GS
+-	loadsegment(gs, svm->host.gs);
+-#endif
+-#endif
++	x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
+ 
+ 	reload_tss(vcpu);
+ 
+@@ -5630,7 +5652,7 @@ static bool svm_cpu_has_accelerated_tpr(void)
+ 	return false;
+ }
+ 
+-static bool svm_has_high_real_mode_segbase(void)
++static bool svm_has_emulated_msr(int index)
+ {
+ 	return true;
+ }
+@@ -6854,7 +6876,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ 	.hardware_enable = svm_hardware_enable,
+ 	.hardware_disable = svm_hardware_disable,
+ 	.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
+-	.cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
++	.has_emulated_msr = svm_has_emulated_msr,
+ 
+ 	.vcpu_create = svm_create_vcpu,
+ 	.vcpu_free = svm_free_vcpu,
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 657c93409042..3deb153bf9d9 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -52,7 +52,7 @@
+ #include <asm/irq_remapping.h>
+ #include <asm/mmu_context.h>
+ #include <asm/microcode.h>
+-#include <asm/nospec-branch.h>
++#include <asm/spec-ctrl.h>
+ 
+ #include "trace.h"
+ #include "pmu.h"
+@@ -1314,6 +1314,12 @@ static inline bool cpu_has_vmx_vmfunc(void)
+ 		SECONDARY_EXEC_ENABLE_VMFUNC;
+ }
+ 
++static bool vmx_umip_emulated(void)
++{
++	return vmcs_config.cpu_based_2nd_exec_ctrl &
++		SECONDARY_EXEC_DESC;
++}
++
+ static inline bool report_flexpriority(void)
+ {
+ 	return flexpriority_enabled;
+@@ -3269,7 +3275,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		break;
+ 	case MSR_IA32_SPEC_CTRL:
+ 		if (!msr_info->host_initiated &&
+-		    !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
+ 		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+ 			return 1;
+ 
+@@ -3390,12 +3395,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		break;
+ 	case MSR_IA32_SPEC_CTRL:
+ 		if (!msr_info->host_initiated &&
+-		    !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
+ 		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+ 			return 1;
+ 
+ 		/* The STIBP bit doesn't fault even if it's not advertised */
+-		if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
++		if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
+ 			return 1;
+ 
+ 		vmx->spec_ctrl = data;
+@@ -3421,7 +3425,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		break;
+ 	case MSR_IA32_PRED_CMD:
+ 		if (!msr_info->host_initiated &&
+-		    !guest_cpuid_has(vcpu, X86_FEATURE_IBPB) &&
+ 		    !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+ 			return 1;
+ 
+@@ -4494,14 +4497,16 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ 		(to_vmx(vcpu)->rmode.vm86_active ?
+ 		 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
+ 
+-	if ((cr4 & X86_CR4_UMIP) && !boot_cpu_has(X86_FEATURE_UMIP)) {
+-		vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
+-			      SECONDARY_EXEC_DESC);
+-		hw_cr4 &= ~X86_CR4_UMIP;
+-	} else if (!is_guest_mode(vcpu) ||
+-	           !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
+-		vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
++	if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
++		if (cr4 & X86_CR4_UMIP) {
++			vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
+ 				SECONDARY_EXEC_DESC);
++			hw_cr4 &= ~X86_CR4_UMIP;
++		} else if (!is_guest_mode(vcpu) ||
++			!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
++			vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
++					SECONDARY_EXEC_DESC);
++	}
+ 
+ 	if (cr4 & X86_CR4_VMXE) {
+ 		/*
+@@ -9226,9 +9231,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
+ }
+ STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
+ 
+-static bool vmx_has_high_real_mode_segbase(void)
++static bool vmx_has_emulated_msr(int index)
+ {
+-	return enable_unrestricted_guest || emulate_invalid_guest_state;
++	switch (index) {
++	case MSR_IA32_SMBASE:
++		/*
++		 * We cannot do SMM unless we can run the guest in big
++		 * real mode.
++		 */
++		return enable_unrestricted_guest || emulate_invalid_guest_state;
++	case MSR_AMD64_VIRT_SPEC_CTRL:
++		/* This is AMD only.  */
++		return false;
++	default:
++		return true;
++	}
+ }
+ 
+ static bool vmx_mpx_supported(void)
+@@ -9243,12 +9260,6 @@ static bool vmx_xsaves_supported(void)
+ 		SECONDARY_EXEC_XSAVES;
+ }
+ 
+-static bool vmx_umip_emulated(void)
+-{
+-	return vmcs_config.cpu_based_2nd_exec_ctrl &
+-		SECONDARY_EXEC_DESC;
+-}
+-
+ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
+ {
+ 	u32 exit_intr_info;
+@@ -9466,10 +9477,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ 	 * is no need to worry about the conditional branch over the wrmsr
+ 	 * being speculatively taken.
+ 	 */
+-	if (vmx->spec_ctrl)
+-		native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
++	x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+ 
+ 	vmx->__launched = vmx->loaded_vmcs->launched;
++
+ 	asm(
+ 		/* Store host registers */
+ 		"push %%" _ASM_DX "; push %%" _ASM_BP ";"
+@@ -9605,8 +9616,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ 	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ 		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+ 
+-	if (vmx->spec_ctrl)
+-		native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
++	x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
+ 
+ 	/* Eliminate branch target predictions from guest mode */
+ 	vmexit_fill_RSB();
+@@ -12299,7 +12309,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
+ 	.hardware_enable = hardware_enable,
+ 	.hardware_disable = hardware_disable,
+ 	.cpu_has_accelerated_tpr = report_flexpriority,
+-	.cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
++	.has_emulated_msr = vmx_has_emulated_msr,
+ 
+ 	.vcpu_create = vmx_create_vcpu,
+ 	.vcpu_free = vmx_free_vcpu,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 18b5ca7a3197..f3df3a934733 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1045,6 +1045,7 @@ static u32 emulated_msrs[] = {
+ 	MSR_SMI_COUNT,
+ 	MSR_PLATFORM_INFO,
+ 	MSR_MISC_FEATURES_ENABLES,
++	MSR_AMD64_VIRT_SPEC_CTRL,
+ };
+ 
+ static unsigned num_emulated_msrs;
+@@ -2843,7 +2844,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ 		 * fringe case that is not enabled except via specific settings
+ 		 * of the module parameters.
+ 		 */
+-		r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
++		r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
+ 		break;
+ 	case KVM_CAP_VAPIC:
+ 		r = !kvm_x86_ops->cpu_has_accelerated_tpr();
+@@ -4522,14 +4523,8 @@ static void kvm_init_msr_list(void)
+ 	num_msrs_to_save = j;
+ 
+ 	for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
+-		switch (emulated_msrs[i]) {
+-		case MSR_IA32_SMBASE:
+-			if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
+-				continue;
+-			break;
+-		default:
+-			break;
+-		}
++		if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
++			continue;
+ 
+ 		if (j < i)
+ 			emulated_msrs[j] = emulated_msrs[i];
+diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
+index d7bc0eea20a5..6e98e0a7c923 100644
+--- a/arch/x86/mm/pkeys.c
++++ b/arch/x86/mm/pkeys.c
+@@ -94,26 +94,27 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey
+ 	 */
+ 	if (pkey != -1)
+ 		return pkey;
+-	/*
+-	 * Look for a protection-key-drive execute-only mapping
+-	 * which is now being given permissions that are not
+-	 * execute-only.  Move it back to the default pkey.
+-	 */
+-	if (vma_is_pkey_exec_only(vma) &&
+-	    (prot & (PROT_READ|PROT_WRITE))) {
+-		return 0;
+-	}
++
+ 	/*
+ 	 * The mapping is execute-only.  Go try to get the
+ 	 * execute-only protection key.  If we fail to do that,
+ 	 * fall through as if we do not have execute-only
+-	 * support.
++	 * support in this mm.
+ 	 */
+ 	if (prot == PROT_EXEC) {
+ 		pkey = execute_only_pkey(vma->vm_mm);
+ 		if (pkey > 0)
+ 			return pkey;
++	} else if (vma_is_pkey_exec_only(vma)) {
++		/*
++		 * Protections are *not* PROT_EXEC, but the mapping
++		 * is using the exec-only pkey.  This mapping was
++		 * PROT_EXEC and will no longer be.  Move back to
++		 * the default pkey.
++		 */
++		return ARCH_DEFAULT_PKEY;
+ 	}
++
+ 	/*
+ 	 * This is a vanilla, non-pkey mprotect (or we failed to
+ 	 * setup execute-only), inherit the pkey from the VMA we
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index d33e7dbe3129..2d76106788a3 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -42,13 +42,11 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
+ }
+ EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
+ 
+-static void xen_flush_tlb_all(void)
++static noinline void xen_flush_tlb_all(void)
+ {
+ 	struct mmuext_op *op;
+ 	struct multicall_space mcs;
+ 
+-	trace_xen_mmu_flush_tlb_all(0);
+-
+ 	preempt_disable();
+ 
+ 	mcs = xen_mc_entry(sizeof(*op));
+diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
+index aae88fec9941..ac17c206afd1 100644
+--- a/arch/x86/xen/mmu_pv.c
++++ b/arch/x86/xen/mmu_pv.c
+@@ -1280,13 +1280,11 @@ unsigned long xen_read_cr2_direct(void)
+ 	return this_cpu_read(xen_vcpu_info.arch.cr2);
+ }
+ 
+-static void xen_flush_tlb(void)
++static noinline void xen_flush_tlb(void)
+ {
+ 	struct mmuext_op *op;
+ 	struct multicall_space mcs;
+ 
+-	trace_xen_mmu_flush_tlb(0);
+-
+ 	preempt_disable();
+ 
+ 	mcs = xen_mc_entry(sizeof(*op));
+diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
+index d21a2d913107..827905794b48 100644
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -532,14 +532,22 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev,
+ 	return sprintf(buf, "Not affected\n");
+ }
+ 
++ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
++					  struct device_attribute *attr, char *buf)
++{
++	return sprintf(buf, "Not affected\n");
++}
++
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+ static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
++static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
+ 
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ 	&dev_attr_meltdown.attr,
+ 	&dev_attr_spectre_v1.attr,
+ 	&dev_attr_spectre_v2.attr,
++	&dev_attr_spec_store_bypass.attr,
+ 	NULL
+ };
+ 
+diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
+index fb586e09682d..a8bec064d14a 100644
+--- a/drivers/cpufreq/Kconfig.arm
++++ b/drivers/cpufreq/Kconfig.arm
+@@ -20,7 +20,7 @@ config ACPI_CPPC_CPUFREQ
+ 
+ config ARM_ARMADA_37XX_CPUFREQ
+ 	tristate "Armada 37xx CPUFreq support"
+-	depends on ARCH_MVEBU
++	depends on ARCH_MVEBU && CPUFREQ_DT
+ 	help
+ 	  This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
+ 	  The Armada 37xx PMU supports 4 frequency and VDD levels.
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 9acc1e157813..6d3e01b2bde9 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -716,7 +716,7 @@ static void remove_compat_control_link(struct drm_device *dev)
+ 	if (!minor)
+ 		return;
+ 
+-	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index);
++	name = kasprintf(GFP_KERNEL, "controlD%d", minor->index + 64);
+ 	if (!name)
+ 		return;
+ 
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 175d552c8bae..a2917dfd45cd 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -7139,6 +7139,9 @@ enum {
+ #define SLICE_ECO_CHICKEN0			_MMIO(0x7308)
+ #define   PIXEL_MASK_CAMMING_DISABLE		(1 << 14)
+ 
++#define GEN9_WM_CHICKEN3			_MMIO(0x5588)
++#define   GEN9_FACTOR_IN_CLR_VAL_HIZ		(1 << 9)
++
+ /* WaCatErrorRejectionIssue */
+ #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG		_MMIO(0x9030)
+ #define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB	(1<<11)
+diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
+index fa960cfd2764..02ec078b0979 100644
+--- a/drivers/gpu/drm/i915/intel_engine_cs.c
++++ b/drivers/gpu/drm/i915/intel_engine_cs.c
+@@ -1098,6 +1098,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
+ 	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ 			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
+ 
++	/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
++	if (IS_GEN9_LP(dev_priv))
++		WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
++
+ 	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
+ 	ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
+ 	if (ret)
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index ef23553ff5cb..001a80dfad31 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -272,7 +272,7 @@ config SENSORS_K8TEMP
+ 
+ config SENSORS_K10TEMP
+ 	tristate "AMD Family 10h+ temperature sensor"
+-	depends on X86 && PCI
++	depends on X86 && PCI && AMD_NB
+ 	help
+ 	  If you say yes here you get support for the temperature
+ 	  sensor(s) inside your CPU. Supported are later revisions of
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index d2cc55e21374..3b73dee6fdc6 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -23,6 +23,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
++#include <asm/amd_nb.h>
+ #include <asm/processor.h>
+ 
+ MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor");
+@@ -40,8 +41,8 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
+ #define PCI_DEVICE_ID_AMD_17H_DF_F3	0x1463
+ #endif
+ 
+-#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB
+-#define PCI_DEVICE_ID_AMD_17H_RR_NB	0x15d0
++#ifndef PCI_DEVICE_ID_AMD_17H_M10H_DF_F3
++#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3	0x15eb
+ #endif
+ 
+ /* CPUID function 0x80000001, ebx */
+@@ -63,10 +64,12 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
+ #define  NB_CAP_HTC			0x00000400
+ 
+ /*
+- * For F15h M60h, functionality of REG_REPORTED_TEMPERATURE
+- * has been moved to D0F0xBC_xD820_0CA4 [Reported Temperature
+- * Control]
++ * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL
++ * and REG_REPORTED_TEMPERATURE have been moved to
++ * D0F0xBC_xD820_0C64 [Hardware Temperature Control]
++ * D0F0xBC_xD820_0CA4 [Reported Temperature Control]
+  */
++#define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET	0xd8200c64
+ #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET	0xd8200ca4
+ 
+ /* F17h M01h Access througn SMN */
+@@ -74,6 +77,7 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
+ 
+ struct k10temp_data {
+ 	struct pci_dev *pdev;
++	void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
+ 	void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
+ 	int temp_offset;
+ 	u32 temp_adjust_mask;
+@@ -98,6 +102,11 @@ static const struct tctl_offset tctl_offset_table[] = {
+ 	{ 0x17, "AMD Ryzen Threadripper 1910", 10000 },
+ };
+ 
++static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
++{
++	pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
++}
++
+ static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
+ {
+ 	pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval);
+@@ -114,6 +123,12 @@ static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
+ 	mutex_unlock(&nb_smu_ind_mutex);
+ }
+ 
++static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval)
++{
++	amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
++			  F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval);
++}
++
+ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
+ {
+ 	amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8,
+@@ -122,8 +137,8 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
+ 
+ static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval)
+ {
+-	amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60,
+-			  F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
++	amd_smn_read(amd_pci_dev_to_node_id(pdev),
++		     F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval);
+ }
+ 
+ static ssize_t temp1_input_show(struct device *dev,
+@@ -160,8 +175,7 @@ static ssize_t show_temp_crit(struct device *dev,
+ 	u32 regval;
+ 	int value;
+ 
+-	pci_read_config_dword(data->pdev,
+-			      REG_HARDWARE_THERMAL_CONTROL, &regval);
++	data->read_htcreg(data->pdev, &regval);
+ 	value = ((regval >> 16) & 0x7f) * 500 + 52000;
+ 	if (show_hyst)
+ 		value -= ((regval >> 24) & 0xf) * 500;
+@@ -181,13 +195,18 @@ static umode_t k10temp_is_visible(struct kobject *kobj,
+ 	struct pci_dev *pdev = data->pdev;
+ 
+ 	if (index >= 2) {
+-		u32 reg_caps, reg_htc;
++		u32 reg;
++
++		if (!data->read_htcreg)
++			return 0;
+ 
+ 		pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES,
+-				      &reg_caps);
+-		pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL,
+-				      &reg_htc);
+-		if (!(reg_caps & NB_CAP_HTC) || !(reg_htc & HTC_ENABLE))
++				      &reg);
++		if (!(reg & NB_CAP_HTC))
++			return 0;
++
++		data->read_htcreg(data->pdev, &reg);
++		if (!(reg & HTC_ENABLE))
+ 			return 0;
+ 	}
+ 	return attr->mode;
+@@ -268,11 +287,13 @@ static int k10temp_probe(struct pci_dev *pdev,
+ 
+ 	if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
+ 					  boot_cpu_data.x86_model == 0x70)) {
++		data->read_htcreg = read_htcreg_nb_f15;
+ 		data->read_tempreg = read_tempreg_nb_f15;
+ 	} else if (boot_cpu_data.x86 == 0x17) {
+ 		data->temp_adjust_mask = 0x80000;
+ 		data->read_tempreg = read_tempreg_nb_f17;
+ 	} else {
++		data->read_htcreg = read_htcreg_pci;
+ 		data->read_tempreg = read_tempreg_pci;
+ 	}
+ 
+@@ -302,7 +323,7 @@ static const struct pci_device_id k10temp_id_table[] = {
+ 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
+ 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
+-	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) },
++	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(pci, k10temp_id_table);
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
+index 05732531829f..d64d6ee1de01 100644
+--- a/drivers/i2c/busses/i2c-designware-master.c
++++ b/drivers/i2c/busses/i2c-designware-master.c
+@@ -209,7 +209,10 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
+ 	i2c_dw_disable_int(dev);
+ 
+ 	/* Enable the adapter */
+-	__i2c_dw_enable_and_wait(dev, true);
++	__i2c_dw_enable(dev, true);
++
++	/* Dummy read to avoid the register getting stuck on Bay Trail */
++	dw_readl(dev, DW_IC_ENABLE_STATUS);
+ 
+ 	/* Clear and enable interrupts */
+ 	dw_readl(dev, DW_IC_CLR_INTR);
+diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
+index af89408befe8..b218426a6493 100644
+--- a/drivers/md/bcache/debug.c
++++ b/drivers/md/bcache/debug.c
+@@ -251,6 +251,9 @@ void bch_debug_exit(void)
+ 
+ int __init bch_debug_init(struct kobject *kobj)
+ {
++	if (!IS_ENABLED(CONFIG_DEBUG_FS))
++		return 0;
++
+ 	debug = debugfs_create_dir("bcache", NULL);
+ 
+ 	return IS_ERR_OR_NULL(debug);
+diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
+index f978c06fbd7d..e81ad33ab849 100644
+--- a/drivers/mtd/nand/marvell_nand.c
++++ b/drivers/mtd/nand/marvell_nand.c
+@@ -1190,11 +1190,13 @@ static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
+ 				  NDCB0_CMD2(NAND_CMD_READSTART);
+ 
+ 	/*
+-	 * Trigger the naked read operation only on the last chunk.
+-	 * Otherwise, use monolithic read.
++	 * Trigger the monolithic read on the first chunk, then naked read on
++	 * intermediate chunks and finally a last naked read on the last chunk.
+ 	 */
+-	if (lt->nchunks == 1 || (chunk < lt->nchunks - 1))
++	if (chunk == 0)
+ 		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
++	else if (chunk < lt->nchunks - 1)
++		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
+ 	else
+ 		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ 
+diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
+index 98f3cfdc0d02..5a116db5a65f 100644
+--- a/drivers/s390/cio/qdio_setup.c
++++ b/drivers/s390/cio/qdio_setup.c
+@@ -141,7 +141,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
+ 	int i;
+ 
+ 	for (i = 0; i < nr_queues; i++) {
+-		q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
++		q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
+ 		if (!q)
+ 			return -ENOMEM;
+ 
+@@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
+ {
+ 	struct ciw *ciw;
+ 	struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
+-	int rc;
+ 
+ 	memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
+ 	memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
+@@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
+ 	ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
+ 	if (!ciw) {
+ 		DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
+-		rc = -EINVAL;
+-		goto out_err;
++		return -EINVAL;
+ 	}
+ 	irq_ptr->equeue = *ciw;
+ 
+ 	ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
+ 	if (!ciw) {
+ 		DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
+-		rc = -EINVAL;
+-		goto out_err;
++		return -EINVAL;
+ 	}
+ 	irq_ptr->aqueue = *ciw;
+ 
+@@ -510,9 +507,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
+ 	irq_ptr->orig_handler = init_data->cdev->handler;
+ 	init_data->cdev->handler = qdio_int_handler;
+ 	return 0;
+-out_err:
+-	qdio_release_memory(irq_ptr);
+-	return rc;
+ }
+ 
+ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
+diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
+index 2c7550797ec2..dce92b2a895d 100644
+--- a/drivers/s390/cio/vfio_ccw_cp.c
++++ b/drivers/s390/cio/vfio_ccw_cp.c
+@@ -715,6 +715,10 @@ void cp_free(struct channel_program *cp)
+  * and stores the result to ccwchain list. @cp must have been
+  * initialized by a previous call with cp_init(). Otherwise, undefined
+  * behavior occurs.
++ * For each chain composing the channel program:
++ * - On entry ch_len holds the count of CCWs to be translated.
++ * - On exit ch_len is adjusted to the count of successfully translated CCWs.
++ * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
+  *
+  * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
+  * as helpers to do ccw chain translation inside the kernel. Basically
+@@ -749,11 +753,18 @@ int cp_prefetch(struct channel_program *cp)
+ 		for (idx = 0; idx < len; idx++) {
+ 			ret = ccwchain_fetch_one(chain, idx, cp);
+ 			if (ret)
+-				return ret;
++				goto out_err;
+ 		}
+ 	}
+ 
+ 	return 0;
++out_err:
++	/* Only cleanup the chain elements that were actually translated. */
++	chain->ch_len = idx;
++	list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
++		chain->ch_len = 0;
++	}
++	return ret;
+ }
+ 
+ /**
+diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
+index a172ab299e80..ff01f865a173 100644
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -490,7 +490,7 @@ static int bcm_qspi_bspi_set_mode(struct bcm_qspi *qspi,
+ 
+ static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
+ {
+-	if (!has_bspi(qspi) || (qspi->bspi_enabled))
++	if (!has_bspi(qspi))
+ 		return;
+ 
+ 	qspi->bspi_enabled = 1;
+@@ -505,7 +505,7 @@ static void bcm_qspi_enable_bspi(struct bcm_qspi *qspi)
+ 
+ static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
+ {
+-	if (!has_bspi(qspi) || (!qspi->bspi_enabled))
++	if (!has_bspi(qspi))
+ 		return;
+ 
+ 	qspi->bspi_enabled = 0;
+@@ -519,16 +519,19 @@ static void bcm_qspi_disable_bspi(struct bcm_qspi *qspi)
+ 
+ static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
+ {
+-	u32 data = 0;
++	u32 rd = 0;
++	u32 wr = 0;
+ 
+-	if (qspi->curr_cs == cs)
+-		return;
+ 	if (qspi->base[CHIP_SELECT]) {
+-		data = bcm_qspi_read(qspi, CHIP_SELECT, 0);
+-		data = (data & ~0xff) | (1 << cs);
+-		bcm_qspi_write(qspi, CHIP_SELECT, 0, data);
++		rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
++		wr = (rd & ~0xff) | (1 << cs);
++		if (rd == wr)
++			return;
++		bcm_qspi_write(qspi, CHIP_SELECT, 0, wr);
+ 		usleep_range(10, 20);
+ 	}
++
++	dev_dbg(&qspi->pdev->dev, "using cs:%d\n", cs);
+ 	qspi->curr_cs = cs;
+ }
+ 
+@@ -755,8 +758,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
+ 			dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
+ 		}
+ 		mspi_cdram = MSPI_CDRAM_CONT_BIT;
+-		mspi_cdram |= (~(1 << spi->chip_select) &
+-			       MSPI_CDRAM_PCS);
++
++		if (has_bspi(qspi))
++			mspi_cdram &= ~1;
++		else
++			mspi_cdram |= (~(1 << spi->chip_select) &
++				       MSPI_CDRAM_PCS);
++
+ 		mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
+ 				MSPI_CDRAM_BITSE_BIT);
+ 
+diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
+index 94f7b0713281..02a8012a318a 100644
+--- a/drivers/spi/spi-pxa2xx.h
++++ b/drivers/spi/spi-pxa2xx.h
+@@ -38,7 +38,7 @@ struct driver_data {
+ 
+ 	/* SSP register addresses */
+ 	void __iomem *ioaddr;
+-	u32 ssdr_physical;
++	phys_addr_t ssdr_physical;
+ 
+ 	/* SSP masks*/
+ 	u32 dma_cr1;
+diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
+index 556960a1bab3..07d3be6f0780 100644
+--- a/drivers/tee/tee_shm.c
++++ b/drivers/tee/tee_shm.c
+@@ -360,9 +360,10 @@ int tee_shm_get_fd(struct tee_shm *shm)
+ 	if (!(shm->flags & TEE_SHM_DMA_BUF))
+ 		return -EINVAL;
+ 
++	get_dma_buf(shm->dmabuf);
+ 	fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
+-	if (fd >= 0)
+-		get_dma_buf(shm->dmabuf);
++	if (fd < 0)
++		dma_buf_put(shm->dmabuf);
+ 	return fd;
+ }
+ 
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 72ebbc908e19..32cd52ca8318 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -354,7 +354,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
+ 
+ 	slot_id = 0;
+ 	for (i = 0; i < MAX_HC_SLOTS; i++) {
+-		if (!xhci->devs[i])
++		if (!xhci->devs[i] || !xhci->devs[i]->udev)
+ 			continue;
+ 		speed = xhci->devs[i]->udev->speed;
+ 		if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
+diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
+index 14a72357800a..35618ceb2791 100644
+--- a/drivers/usb/usbip/stub.h
++++ b/drivers/usb/usbip/stub.h
+@@ -73,6 +73,7 @@ struct bus_id_priv {
+ 	struct stub_device *sdev;
+ 	struct usb_device *udev;
+ 	char shutdown_busid;
++	spinlock_t busid_lock;
+ };
+ 
+ /* stub_priv is allocated from stub_priv_cache */
+@@ -83,6 +84,7 @@ extern struct usb_device_driver stub_driver;
+ 
+ /* stub_main.c */
+ struct bus_id_priv *get_busid_priv(const char *busid);
++void put_busid_priv(struct bus_id_priv *bid);
+ int del_match_busid(char *busid);
+ void stub_device_cleanup_urbs(struct stub_device *sdev);
+ 
+diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
+index dd8ef36ab10e..c0d6ff1baa72 100644
+--- a/drivers/usb/usbip/stub_dev.c
++++ b/drivers/usb/usbip/stub_dev.c
+@@ -300,9 +300,9 @@ static int stub_probe(struct usb_device *udev)
+ 	struct stub_device *sdev = NULL;
+ 	const char *udev_busid = dev_name(&udev->dev);
+ 	struct bus_id_priv *busid_priv;
+-	int rc;
++	int rc = 0;
+ 
+-	dev_dbg(&udev->dev, "Enter\n");
++	dev_dbg(&udev->dev, "Enter probe\n");
+ 
+ 	/* check we should claim or not by busid_table */
+ 	busid_priv = get_busid_priv(udev_busid);
+@@ -317,13 +317,15 @@ static int stub_probe(struct usb_device *udev)
+ 		 * other matched drivers by the driver core.
+ 		 * See driver_probe_device() in driver/base/dd.c
+ 		 */
+-		return -ENODEV;
++		rc = -ENODEV;
++		goto call_put_busid_priv;
+ 	}
+ 
+ 	if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
+ 		dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
+ 			 udev_busid);
+-		return -ENODEV;
++		rc = -ENODEV;
++		goto call_put_busid_priv;
+ 	}
+ 
+ 	if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
+@@ -331,13 +333,16 @@ static int stub_probe(struct usb_device *udev)
+ 			"%s is attached on vhci_hcd... skip!\n",
+ 			udev_busid);
+ 
+-		return -ENODEV;
++		rc = -ENODEV;
++		goto call_put_busid_priv;
+ 	}
+ 
+ 	/* ok, this is my device */
+ 	sdev = stub_device_alloc(udev);
+-	if (!sdev)
+-		return -ENOMEM;
++	if (!sdev) {
++		rc = -ENOMEM;
++		goto call_put_busid_priv;
++	}
+ 
+ 	dev_info(&udev->dev,
+ 		"usbip-host: register new device (bus %u dev %u)\n",
+@@ -369,7 +374,9 @@ static int stub_probe(struct usb_device *udev)
+ 	}
+ 	busid_priv->status = STUB_BUSID_ALLOC;
+ 
+-	return 0;
++	rc = 0;
++	goto call_put_busid_priv;
++
+ err_files:
+ 	usb_hub_release_port(udev->parent, udev->portnum,
+ 			     (struct usb_dev_state *) udev);
+@@ -379,6 +386,9 @@ static int stub_probe(struct usb_device *udev)
+ 
+ 	busid_priv->sdev = NULL;
+ 	stub_device_free(sdev);
++
++call_put_busid_priv:
++	put_busid_priv(busid_priv);
+ 	return rc;
+ }
+ 
+@@ -404,7 +414,7 @@ static void stub_disconnect(struct usb_device *udev)
+ 	struct bus_id_priv *busid_priv;
+ 	int rc;
+ 
+-	dev_dbg(&udev->dev, "Enter\n");
++	dev_dbg(&udev->dev, "Enter disconnect\n");
+ 
+ 	busid_priv = get_busid_priv(udev_busid);
+ 	if (!busid_priv) {
+@@ -417,7 +427,7 @@ static void stub_disconnect(struct usb_device *udev)
+ 	/* get stub_device */
+ 	if (!sdev) {
+ 		dev_err(&udev->dev, "could not get device");
+-		return;
++		goto call_put_busid_priv;
+ 	}
+ 
+ 	dev_set_drvdata(&udev->dev, NULL);
+@@ -432,12 +442,12 @@ static void stub_disconnect(struct usb_device *udev)
+ 				  (struct usb_dev_state *) udev);
+ 	if (rc) {
+ 		dev_dbg(&udev->dev, "unable to release port\n");
+-		return;
++		goto call_put_busid_priv;
+ 	}
+ 
+ 	/* If usb reset is called from event handler */
+ 	if (usbip_in_eh(current))
+-		return;
++		goto call_put_busid_priv;
+ 
+ 	/* shutdown the current connection */
+ 	shutdown_busid(busid_priv);
+@@ -448,12 +458,11 @@ static void stub_disconnect(struct usb_device *udev)
+ 	busid_priv->sdev = NULL;
+ 	stub_device_free(sdev);
+ 
+-	if (busid_priv->status == STUB_BUSID_ALLOC) {
++	if (busid_priv->status == STUB_BUSID_ALLOC)
+ 		busid_priv->status = STUB_BUSID_ADDED;
+-	} else {
+-		busid_priv->status = STUB_BUSID_OTHER;
+-		del_match_busid((char *)udev_busid);
+-	}
++
++call_put_busid_priv:
++	put_busid_priv(busid_priv);
+ }
+ 
+ #ifdef CONFIG_PM
+diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
+index d41d0cdeec0f..bf8a5feb0ee9 100644
+--- a/drivers/usb/usbip/stub_main.c
++++ b/drivers/usb/usbip/stub_main.c
+@@ -14,6 +14,7 @@
+ #define DRIVER_DESC "USB/IP Host Driver"
+ 
+ struct kmem_cache *stub_priv_cache;
++
+ /*
+  * busid_tables defines matching busids that usbip can grab. A user can change
+  * dynamically what device is locally used and what device is exported to a
+@@ -25,6 +26,8 @@ static spinlock_t busid_table_lock;
+ 
+ static void init_busid_table(void)
+ {
++	int i;
++
+ 	/*
+ 	 * This also sets the bus_table[i].status to
+ 	 * STUB_BUSID_OTHER, which is 0.
+@@ -32,6 +35,9 @@ static void init_busid_table(void)
+ 	memset(busid_table, 0, sizeof(busid_table));
+ 
+ 	spin_lock_init(&busid_table_lock);
++
++	for (i = 0; i < MAX_BUSID; i++)
++		spin_lock_init(&busid_table[i].busid_lock);
+ }
+ 
+ /*
+@@ -43,15 +49,20 @@ static int get_busid_idx(const char *busid)
+ 	int i;
+ 	int idx = -1;
+ 
+-	for (i = 0; i < MAX_BUSID; i++)
++	for (i = 0; i < MAX_BUSID; i++) {
++		spin_lock(&busid_table[i].busid_lock);
+ 		if (busid_table[i].name[0])
+ 			if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
+ 				idx = i;
++				spin_unlock(&busid_table[i].busid_lock);
+ 				break;
+ 			}
++		spin_unlock(&busid_table[i].busid_lock);
++	}
+ 	return idx;
+ }
+ 
++/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
+ struct bus_id_priv *get_busid_priv(const char *busid)
+ {
+ 	int idx;
+@@ -59,13 +70,22 @@ struct bus_id_priv *get_busid_priv(const char *busid)
+ 
+ 	spin_lock(&busid_table_lock);
+ 	idx = get_busid_idx(busid);
+-	if (idx >= 0)
++	if (idx >= 0) {
+ 		bid = &(busid_table[idx]);
++		/* get busid_lock before returning */
++		spin_lock(&bid->busid_lock);
++	}
+ 	spin_unlock(&busid_table_lock);
+ 
+ 	return bid;
+ }
+ 
++void put_busid_priv(struct bus_id_priv *bid)
++{
++	if (bid)
++		spin_unlock(&bid->busid_lock);
++}
++
+ static int add_match_busid(char *busid)
+ {
+ 	int i;
+@@ -78,15 +98,19 @@ static int add_match_busid(char *busid)
+ 		goto out;
+ 	}
+ 
+-	for (i = 0; i < MAX_BUSID; i++)
++	for (i = 0; i < MAX_BUSID; i++) {
++		spin_lock(&busid_table[i].busid_lock);
+ 		if (!busid_table[i].name[0]) {
+ 			strlcpy(busid_table[i].name, busid, BUSID_SIZE);
+ 			if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
+ 			    (busid_table[i].status != STUB_BUSID_REMOV))
+ 				busid_table[i].status = STUB_BUSID_ADDED;
+ 			ret = 0;
++			spin_unlock(&busid_table[i].busid_lock);
+ 			break;
+ 		}
++		spin_unlock(&busid_table[i].busid_lock);
++	}
+ 
+ out:
+ 	spin_unlock(&busid_table_lock);
+@@ -107,6 +131,8 @@ int del_match_busid(char *busid)
+ 	/* found */
+ 	ret = 0;
+ 
++	spin_lock(&busid_table[idx].busid_lock);
++
+ 	if (busid_table[idx].status == STUB_BUSID_OTHER)
+ 		memset(busid_table[idx].name, 0, BUSID_SIZE);
+ 
+@@ -114,6 +140,7 @@ int del_match_busid(char *busid)
+ 	    (busid_table[idx].status != STUB_BUSID_ADDED))
+ 		busid_table[idx].status = STUB_BUSID_REMOV;
+ 
++	spin_unlock(&busid_table[idx].busid_lock);
+ out:
+ 	spin_unlock(&busid_table_lock);
+ 
+@@ -126,9 +153,12 @@ static ssize_t match_busid_show(struct device_driver *drv, char *buf)
+ 	char *out = buf;
+ 
+ 	spin_lock(&busid_table_lock);
+-	for (i = 0; i < MAX_BUSID; i++)
++	for (i = 0; i < MAX_BUSID; i++) {
++		spin_lock(&busid_table[i].busid_lock);
+ 		if (busid_table[i].name[0])
+ 			out += sprintf(out, "%s ", busid_table[i].name);
++		spin_unlock(&busid_table[i].busid_lock);
++	}
+ 	spin_unlock(&busid_table_lock);
+ 	out += sprintf(out, "\n");
+ 
+@@ -169,6 +199,51 @@ static ssize_t match_busid_store(struct device_driver *dev, const char *buf,
+ }
+ static DRIVER_ATTR_RW(match_busid);
+ 
++static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
++{
++	int ret;
++
++	/* device_attach() callers should hold parent lock for USB */
++	if (busid_priv->udev->dev.parent)
++		device_lock(busid_priv->udev->dev.parent);
++	ret = device_attach(&busid_priv->udev->dev);
++	if (busid_priv->udev->dev.parent)
++		device_unlock(busid_priv->udev->dev.parent);
++	if (ret < 0) {
++		dev_err(&busid_priv->udev->dev, "rebind failed\n");
++		return ret;
++	}
++	return 0;
++}
++
++static void stub_device_rebind(void)
++{
++#if IS_MODULE(CONFIG_USBIP_HOST)
++	struct bus_id_priv *busid_priv;
++	int i;
++
++	/* update status to STUB_BUSID_OTHER so probe ignores the device */
++	spin_lock(&busid_table_lock);
++	for (i = 0; i < MAX_BUSID; i++) {
++		if (busid_table[i].name[0] &&
++		    busid_table[i].shutdown_busid) {
++			busid_priv = &(busid_table[i]);
++			busid_priv->status = STUB_BUSID_OTHER;
++		}
++	}
++	spin_unlock(&busid_table_lock);
++
++	/* now run rebind - no need to hold locks. driver files are removed */
++	for (i = 0; i < MAX_BUSID; i++) {
++		if (busid_table[i].name[0] &&
++		    busid_table[i].shutdown_busid) {
++			busid_priv = &(busid_table[i]);
++			do_rebind(busid_table[i].name, busid_priv);
++		}
++	}
++#endif
++}
++
+ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
+ 				 size_t count)
+ {
+@@ -186,16 +261,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
+ 	if (!bid)
+ 		return -ENODEV;
+ 
+-	/* device_attach() callers should hold parent lock for USB */
+-	if (bid->udev->dev.parent)
+-		device_lock(bid->udev->dev.parent);
+-	ret = device_attach(&bid->udev->dev);
+-	if (bid->udev->dev.parent)
+-		device_unlock(bid->udev->dev.parent);
+-	if (ret < 0) {
+-		dev_err(&bid->udev->dev, "rebind failed\n");
++	/* mark the device for deletion so probe ignores it during rescan */
++	bid->status = STUB_BUSID_OTHER;
++	/* release the busid lock */
++	put_busid_priv(bid);
++
++	ret = do_rebind((char *) buf, bid);
++	if (ret < 0)
+ 		return ret;
+-	}
++
++	/* delete device from busid_table */
++	del_match_busid((char *) buf);
+ 
+ 	return count;
+ }
+@@ -317,6 +393,9 @@ static void __exit usbip_host_exit(void)
+ 	 */
+ 	usb_deregister_device_driver(&stub_driver);
+ 
++	/* initiate scan to attach devices */
++	stub_device_rebind();
++
+ 	kmem_cache_destroy(stub_priv_cache);
+ }
+ 
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index b88a79e69ddf..d3b59f14f9c2 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -2491,10 +2491,8 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
+ 	if (p->reada != READA_NONE)
+ 		reada_for_search(fs_info, p, level, slot, key->objectid);
+ 
+-	btrfs_release_path(p);
+-
+ 	ret = -EAGAIN;
+-	tmp = read_tree_block(fs_info, blocknr, 0);
++	tmp = read_tree_block(fs_info, blocknr, gen);
+ 	if (!IS_ERR(tmp)) {
+ 		/*
+ 		 * If the read above didn't mark this buffer up to date,
+@@ -2508,6 +2506,8 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
+ 	} else {
+ 		ret = PTR_ERR(tmp);
+ 	}
++
++	btrfs_release_path(p);
+ 	return ret;
+ }
+ 
+@@ -5460,12 +5460,24 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
+ 	down_read(&fs_info->commit_root_sem);
+ 	left_level = btrfs_header_level(left_root->commit_root);
+ 	left_root_level = left_level;
+-	left_path->nodes[left_level] = left_root->commit_root;
++	left_path->nodes[left_level] =
++			btrfs_clone_extent_buffer(left_root->commit_root);
++	if (!left_path->nodes[left_level]) {
++		up_read(&fs_info->commit_root_sem);
++		ret = -ENOMEM;
++		goto out;
++	}
+ 	extent_buffer_get(left_path->nodes[left_level]);
+ 
+ 	right_level = btrfs_header_level(right_root->commit_root);
+ 	right_root_level = right_level;
+-	right_path->nodes[right_level] = right_root->commit_root;
++	right_path->nodes[right_level] =
++			btrfs_clone_extent_buffer(right_root->commit_root);
++	if (!right_path->nodes[right_level]) {
++		up_read(&fs_info->commit_root_sem);
++		ret = -ENOMEM;
++		goto out;
++	}
+ 	extent_buffer_get(right_path->nodes[right_level]);
+ 	up_read(&fs_info->commit_root_sem);
+ 
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index da308774b8a4..13b66ff1719c 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -3153,6 +3153,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
+ 			      u64 *orig_start, u64 *orig_block_len,
+ 			      u64 *ram_bytes);
+ 
++void __btrfs_del_delalloc_inode(struct btrfs_root *root,
++				struct btrfs_inode *inode);
+ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
+ int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
+ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 21f34ad0d411..fea78d138073 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3744,6 +3744,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
+ 	set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags);
+ 
+ 	btrfs_free_qgroup_config(fs_info);
++	ASSERT(list_empty(&fs_info->delalloc_roots));
+ 
+ 	if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
+ 		btrfs_info(fs_info, "at unmount delalloc count %lld",
+@@ -4049,15 +4050,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info)
+ 
+ static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
+ {
++	/* cleanup FS via transaction */
++	btrfs_cleanup_transaction(fs_info);
++
+ 	mutex_lock(&fs_info->cleaner_mutex);
+ 	btrfs_run_delayed_iputs(fs_info);
+ 	mutex_unlock(&fs_info->cleaner_mutex);
+ 
+ 	down_write(&fs_info->cleanup_work_sem);
+ 	up_write(&fs_info->cleanup_work_sem);
+-
+-	/* cleanup FS via transaction */
+-	btrfs_cleanup_transaction(fs_info);
+ }
+ 
+ static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
+@@ -4182,19 +4183,23 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
+ 	list_splice_init(&root->delalloc_inodes, &splice);
+ 
+ 	while (!list_empty(&splice)) {
++		struct inode *inode = NULL;
+ 		btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
+ 					       delalloc_inodes);
+-
+-		list_del_init(&btrfs_inode->delalloc_inodes);
+-		clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+-			  &btrfs_inode->runtime_flags);
++		__btrfs_del_delalloc_inode(root, btrfs_inode);
+ 		spin_unlock(&root->delalloc_lock);
+ 
+-		btrfs_invalidate_inodes(btrfs_inode->root);
+-
++		/*
++		 * Make sure we get a live inode and that it'll not disappear
++		 * meanwhile.
++		 */
++		inode = igrab(&btrfs_inode->vfs_inode);
++		if (inode) {
++			invalidate_inode_pages2(inode->i_mapping);
++			iput(inode);
++		}
+ 		spin_lock(&root->delalloc_lock);
+ 	}
+-
+ 	spin_unlock(&root->delalloc_lock);
+ }
+ 
+@@ -4210,7 +4215,6 @@ static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
+ 	while (!list_empty(&splice)) {
+ 		root = list_first_entry(&splice, struct btrfs_root,
+ 					 delalloc_root);
+-		list_del_init(&root->delalloc_root);
+ 		root = btrfs_grab_fs_root(root);
+ 		BUG_ON(!root);
+ 		spin_unlock(&fs_info->delalloc_root_lock);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index ef1cf323832a..f370bdc126b8 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1762,12 +1762,12 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
+ 	spin_unlock(&root->delalloc_lock);
+ }
+ 
+-static void btrfs_del_delalloc_inode(struct btrfs_root *root,
+-				     struct btrfs_inode *inode)
++
++void __btrfs_del_delalloc_inode(struct btrfs_root *root,
++				struct btrfs_inode *inode)
+ {
+ 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ 
+-	spin_lock(&root->delalloc_lock);
+ 	if (!list_empty(&inode->delalloc_inodes)) {
+ 		list_del_init(&inode->delalloc_inodes);
+ 		clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+@@ -1780,6 +1780,13 @@ static void btrfs_del_delalloc_inode(struct btrfs_root *root,
+ 			spin_unlock(&fs_info->delalloc_root_lock);
+ 		}
+ 	}
++}
++
++static void btrfs_del_delalloc_inode(struct btrfs_root *root,
++				     struct btrfs_inode *inode)
++{
++	spin_lock(&root->delalloc_lock);
++	__btrfs_del_delalloc_inode(root, inode);
+ 	spin_unlock(&root->delalloc_lock);
+ }
+ 
+diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
+index b30a056963ab..29619496e358 100644
+--- a/fs/btrfs/props.c
++++ b/fs/btrfs/props.c
+@@ -393,6 +393,7 @@ static int prop_compression_apply(struct inode *inode,
+ 				  const char *value,
+ 				  size_t len)
+ {
++	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ 	int type;
+ 
+ 	if (len == 0) {
+@@ -403,14 +404,17 @@ static int prop_compression_apply(struct inode *inode,
+ 		return 0;
+ 	}
+ 
+-	if (!strncmp("lzo", value, 3))
++	if (!strncmp("lzo", value, 3)) {
+ 		type = BTRFS_COMPRESS_LZO;
+-	else if (!strncmp("zlib", value, 4))
++		btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
++	} else if (!strncmp("zlib", value, 4)) {
+ 		type = BTRFS_COMPRESS_ZLIB;
+-	else if (!strncmp("zstd", value, len))
++	} else if (!strncmp("zstd", value, len)) {
+ 		type = BTRFS_COMPRESS_ZSTD;
+-	else
++		btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
++	} else {
+ 		return -EINVAL;
++	}
+ 
+ 	BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
+ 	BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 434457794c27..ac6ea1503cd6 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4749,6 +4749,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ 	struct extent_map_tree *em_tree = &inode->extent_tree;
+ 	u64 logged_isize = 0;
+ 	bool need_log_inode_item = true;
++	bool xattrs_logged = false;
+ 
+ 	path = btrfs_alloc_path();
+ 	if (!path)
+@@ -5050,6 +5051,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ 	err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
+ 	if (err)
+ 		goto out_unlock;
++	xattrs_logged = true;
+ 	if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
+ 		btrfs_release_path(path);
+ 		btrfs_release_path(dst_path);
+@@ -5062,6 +5064,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ 	btrfs_release_path(dst_path);
+ 	if (need_log_inode_item) {
+ 		err = log_inode_item(trans, log, dst_path, inode);
++		if (!err && !xattrs_logged) {
++			err = btrfs_log_all_xattrs(trans, root, inode, path,
++						   dst_path);
++			btrfs_release_path(path);
++		}
+ 		if (err)
+ 			goto out_unlock;
+ 	}
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index b2d05c6b1c56..854cb4533f88 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -4046,6 +4046,15 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
+ 		return 0;
+ 	}
+ 
++	/*
++	 * A ro->rw remount sequence should continue with the paused balance
++	 * regardless of who pauses it, system or the user as of now, so set
++	 * the resume flag.
++	 */
++	spin_lock(&fs_info->balance_lock);
++	fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
++	spin_unlock(&fs_info->balance_lock);
++
+ 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
+ 	return PTR_ERR_OR_ZERO(tsk);
+ }
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 598803576e4c..9a517c03ac78 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -85,6 +85,7 @@
+ #include <linux/delayacct.h>
+ #include <linux/seq_file.h>
+ #include <linux/pid_namespace.h>
++#include <linux/prctl.h>
+ #include <linux/ptrace.h>
+ #include <linux/tracehook.h>
+ #include <linux/string_helpers.h>
+@@ -347,6 +348,30 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
+ #ifdef CONFIG_SECCOMP
+ 	seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
+ #endif
++	seq_printf(m, "\nSpeculation_Store_Bypass:\t");
++	switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
++	case -EINVAL:
++		seq_printf(m, "unknown");
++		break;
++	case PR_SPEC_NOT_AFFECTED:
++		seq_printf(m, "not vulnerable");
++		break;
++	case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
++		seq_printf(m, "thread force mitigated");
++		break;
++	case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
++		seq_printf(m, "thread mitigated");
++		break;
++	case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
++		seq_printf(m, "thread vulnerable");
++		break;
++	case PR_SPEC_DISABLE:
++		seq_printf(m, "globally mitigated");
++		break;
++	default:
++		seq_printf(m, "vulnerable");
++		break;
++	}
+ 	seq_putc(m, '\n');
+ }
+ 
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 6b66cd1aa0b9..660a7d5e4702 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -146,6 +146,7 @@ struct bpf_insn_aux_data {
+ 		s32 call_imm;			/* saved imm field of call insn */
+ 	};
+ 	int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
++	int sanitize_stack_off; /* stack slot to be cleared */
+ 	bool seen; /* this insn was processed by the verifier */
+ };
+ 
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 7b01bc11c692..a97a63eef59f 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -53,6 +53,8 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev,
+ 				   struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_spectre_v2(struct device *dev,
+ 				   struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
++					  struct device_attribute *attr, char *buf);
+ 
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index f5083aa72eae..c2520ca123aa 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -395,8 +395,8 @@ typedef struct {
+ 	u32 attributes;
+ 	u32 get_bar_attributes;
+ 	u32 set_bar_attributes;
+-	uint64_t romsize;
+-	void *romimage;
++	u64 romsize;
++	u32 romimage;
+ } efi_pci_io_protocol_32;
+ 
+ typedef struct {
+@@ -415,8 +415,8 @@ typedef struct {
+ 	u64 attributes;
+ 	u64 get_bar_attributes;
+ 	u64 set_bar_attributes;
+-	uint64_t romsize;
+-	void *romimage;
++	u64 romsize;
++	u64 romimage;
+ } efi_pci_io_protocol_64;
+ 
+ typedef struct {
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index e791ebc65c9c..0c5ef54fd416 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -7,6 +7,8 @@
+ #define _LINUX_NOSPEC_H
+ #include <asm/barrier.h>
+ 
++struct task_struct;
++
+ /**
+  * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+  * @index: array element index
+@@ -55,4 +57,12 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ 									\
+ 	(typeof(_i)) (_i & _mask);					\
+ })
++
++/* Speculation control prctl */
++int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
++int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
++			     unsigned long ctrl);
++/* Speculation control for seccomp enforced mitigation */
++void arch_seccomp_spec_mitigate(struct task_struct *task);
++
+ #endif /* _LINUX_NOSPEC_H */
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index b161ef8a902e..710508af02c8 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1365,7 +1365,8 @@ static inline bool is_percpu_thread(void)
+ #define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
+ #define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
+ #define PFA_SPREAD_SLAB			2	/* Spread some slab caches over cpuset */
+-
++#define PFA_SPEC_SSB_DISABLE		3	/* Speculative Store Bypass disabled */
++#define PFA_SPEC_SSB_FORCE_DISABLE	4	/* Speculative Store Bypass force disabled*/
+ 
+ #define TASK_PFA_TEST(name, func)					\
+ 	static inline bool task_##func(struct task_struct *p)		\
+@@ -1390,6 +1391,13 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
+ TASK_PFA_SET(SPREAD_SLAB, spread_slab)
+ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
+ 
++TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
++TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
++TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
++
++TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
++TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
++
+ static inline void
+ current_restore_flags(unsigned long orig_flags, unsigned long flags)
+ {
+diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
+index c723a5c4e3ff..e5320f6c8654 100644
+--- a/include/linux/seccomp.h
++++ b/include/linux/seccomp.h
+@@ -4,8 +4,9 @@
+ 
+ #include <uapi/linux/seccomp.h>
+ 
+-#define SECCOMP_FILTER_FLAG_MASK	(SECCOMP_FILTER_FLAG_TSYNC | \
+-					 SECCOMP_FILTER_FLAG_LOG)
++#define SECCOMP_FILTER_FLAG_MASK	(SECCOMP_FILTER_FLAG_TSYNC	| \
++					 SECCOMP_FILTER_FLAG_LOG	| \
++					 SECCOMP_FILTER_FLAG_SPEC_ALLOW)
+ 
+ #ifdef CONFIG_SECCOMP
+ 
+diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
+index 7dd8f34c37df..fdcf88bcf0ea 100644
+--- a/include/trace/events/xen.h
++++ b/include/trace/events/xen.h
+@@ -352,22 +352,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
+ DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
+ DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
+ 
+-TRACE_EVENT(xen_mmu_flush_tlb_all,
+-	    TP_PROTO(int x),
+-	    TP_ARGS(x),
+-	    TP_STRUCT__entry(__array(char, x, 0)),
+-	    TP_fast_assign((void)x),
+-	    TP_printk("%s", "")
+-	);
+-
+-TRACE_EVENT(xen_mmu_flush_tlb,
+-	    TP_PROTO(int x),
+-	    TP_ARGS(x),
+-	    TP_STRUCT__entry(__array(char, x, 0)),
+-	    TP_fast_assign((void)x),
+-	    TP_printk("%s", "")
+-	);
+-
+ TRACE_EVENT(xen_mmu_flush_tlb_one_user,
+ 	    TP_PROTO(unsigned long addr),
+ 	    TP_ARGS(addr),
+diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
+index af5f8c2df87a..db9f15f5db04 100644
+--- a/include/uapi/linux/prctl.h
++++ b/include/uapi/linux/prctl.h
+@@ -207,4 +207,16 @@ struct prctl_mm_map {
+ # define PR_SVE_VL_LEN_MASK		0xffff
+ # define PR_SVE_VL_INHERIT		(1 << 17) /* inherit across exec */
+ 
++/* Per task speculation control */
++#define PR_GET_SPECULATION_CTRL		52
++#define PR_SET_SPECULATION_CTRL		53
++/* Speculation control variants */
++# define PR_SPEC_STORE_BYPASS		0
++/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
++# define PR_SPEC_NOT_AFFECTED		0
++# define PR_SPEC_PRCTL			(1UL << 0)
++# define PR_SPEC_ENABLE			(1UL << 1)
++# define PR_SPEC_DISABLE		(1UL << 2)
++# define PR_SPEC_FORCE_DISABLE		(1UL << 3)
++
+ #endif /* _LINUX_PRCTL_H */
+diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
+index 2a0bd9dd104d..9efc0e73d50b 100644
+--- a/include/uapi/linux/seccomp.h
++++ b/include/uapi/linux/seccomp.h
+@@ -17,8 +17,9 @@
+ #define SECCOMP_GET_ACTION_AVAIL	2
+ 
+ /* Valid flags for SECCOMP_SET_MODE_FILTER */
+-#define SECCOMP_FILTER_FLAG_TSYNC	1
+-#define SECCOMP_FILTER_FLAG_LOG		2
++#define SECCOMP_FILTER_FLAG_TSYNC	(1UL << 0)
++#define SECCOMP_FILTER_FLAG_LOG		(1UL << 1)
++#define SECCOMP_FILTER_FLAG_SPEC_ALLOW	(1UL << 2)
+ 
+ /*
+  * All BPF programs must return a 32-bit value.
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index c6eff108aa99..1a17e0d84347 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -970,7 +970,7 @@ static bool register_is_null(struct bpf_reg_state *reg)
+  */
+ static int check_stack_write(struct bpf_verifier_env *env,
+ 			     struct bpf_func_state *state, /* func where register points to */
+-			     int off, int size, int value_regno)
++			     int off, int size, int value_regno, int insn_idx)
+ {
+ 	struct bpf_func_state *cur; /* state of the current function */
+ 	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
+@@ -1009,8 +1009,33 @@ static int check_stack_write(struct bpf_verifier_env *env,
+ 		state->stack[spi].spilled_ptr = cur->regs[value_regno];
+ 		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
+ 
+-		for (i = 0; i < BPF_REG_SIZE; i++)
++		for (i = 0; i < BPF_REG_SIZE; i++) {
++			if (state->stack[spi].slot_type[i] == STACK_MISC &&
++			    !env->allow_ptr_leaks) {
++				int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
++				int soff = (-spi - 1) * BPF_REG_SIZE;
++
++				/* detected reuse of integer stack slot with a pointer
++				 * which means either llvm is reusing stack slot or
++				 * an attacker is trying to exploit CVE-2018-3639
++				 * (speculative store bypass)
++				 * Have to sanitize that slot with preemptive
++				 * store of zero.
++				 */
++				if (*poff && *poff != soff) {
++					/* disallow programs where single insn stores
++					 * into two different stack slots, since verifier
++					 * cannot sanitize them
++					 */
++					verbose(env,
++						"insn %d cannot access two stack slots fp%d and fp%d",
++						insn_idx, *poff, soff);
++					return -EINVAL;
++				}
++				*poff = soff;
++			}
+ 			state->stack[spi].slot_type[i] = STACK_SPILL;
++		}
+ 	} else {
+ 		u8 type = STACK_MISC;
+ 
+@@ -1685,7 +1710,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
+ 
+ 		if (t == BPF_WRITE)
+ 			err = check_stack_write(env, state, off, size,
+-						value_regno);
++						value_regno, insn_idx);
+ 		else
+ 			err = check_stack_read(env, state, off, size,
+ 					       value_regno);
+@@ -5156,6 +5181,34 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
+ 		else
+ 			continue;
+ 
++		if (type == BPF_WRITE &&
++		    env->insn_aux_data[i + delta].sanitize_stack_off) {
++			struct bpf_insn patch[] = {
++				/* Sanitize suspicious stack slot with zero.
++				 * There are no memory dependencies for this store,
++				 * since it's only using frame pointer and immediate
++				 * constant of zero
++				 */
++				BPF_ST_MEM(BPF_DW, BPF_REG_FP,
++					   env->insn_aux_data[i + delta].sanitize_stack_off,
++					   0),
++				/* the original STX instruction will immediately
++				 * overwrite the same stack slot with appropriate value
++				 */
++				*insn,
++			};
++
++			cnt = ARRAY_SIZE(patch);
++			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
++			if (!new_prog)
++				return -ENOMEM;
++
++			delta    += cnt - 1;
++			env->prog = new_prog;
++			insn      = new_prog->insnsi + i + delta;
++			continue;
++		}
++
+ 		if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
+ 			continue;
+ 
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index dc77548167ef..e691d9a6c58d 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -19,6 +19,8 @@
+ #include <linux/compat.h>
+ #include <linux/coredump.h>
+ #include <linux/kmemleak.h>
++#include <linux/nospec.h>
++#include <linux/prctl.h>
+ #include <linux/sched.h>
+ #include <linux/sched/task_stack.h>
+ #include <linux/seccomp.h>
+@@ -227,8 +229,11 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
+ 	return true;
+ }
+ 
++void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
++
+ static inline void seccomp_assign_mode(struct task_struct *task,
+-				       unsigned long seccomp_mode)
++				       unsigned long seccomp_mode,
++				       unsigned long flags)
+ {
+ 	assert_spin_locked(&task->sighand->siglock);
+ 
+@@ -238,6 +243,9 @@ static inline void seccomp_assign_mode(struct task_struct *task,
+ 	 * filter) is set.
+ 	 */
+ 	smp_mb__before_atomic();
++	/* Assume default seccomp processes want spec flaw mitigation. */
++	if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
++		arch_seccomp_spec_mitigate(task);
+ 	set_tsk_thread_flag(task, TIF_SECCOMP);
+ }
+ 
+@@ -305,7 +313,7 @@ static inline pid_t seccomp_can_sync_threads(void)
+  * without dropping the locks.
+  *
+  */
+-static inline void seccomp_sync_threads(void)
++static inline void seccomp_sync_threads(unsigned long flags)
+ {
+ 	struct task_struct *thread, *caller;
+ 
+@@ -346,7 +354,8 @@ static inline void seccomp_sync_threads(void)
+ 		 * allow one thread to transition the other.
+ 		 */
+ 		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
+-			seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
++			seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
++					    flags);
+ 	}
+ }
+ 
+@@ -469,7 +478,7 @@ static long seccomp_attach_filter(unsigned int flags,
+ 
+ 	/* Now that the new filter is in place, synchronize to all threads. */
+ 	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
+-		seccomp_sync_threads();
++		seccomp_sync_threads(flags);
+ 
+ 	return 0;
+ }
+@@ -818,7 +827,7 @@ static long seccomp_set_mode_strict(void)
+ #ifdef TIF_NOTSC
+ 	disable_TSC();
+ #endif
+-	seccomp_assign_mode(current, seccomp_mode);
++	seccomp_assign_mode(current, seccomp_mode, 0);
+ 	ret = 0;
+ 
+ out:
+@@ -876,7 +885,7 @@ static long seccomp_set_mode_filter(unsigned int flags,
+ 	/* Do not free the successfully attached filter. */
+ 	prepared = NULL;
+ 
+-	seccomp_assign_mode(current, seccomp_mode);
++	seccomp_assign_mode(current, seccomp_mode, flags);
+ out:
+ 	spin_unlock_irq(&current->sighand->siglock);
+ 	if (flags & SECCOMP_FILTER_FLAG_TSYNC)
+diff --git a/kernel/sys.c b/kernel/sys.c
+index f2289de20e19..9afc4cb5acf5 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -61,6 +61,8 @@
+ #include <linux/uidgid.h>
+ #include <linux/cred.h>
+ 
++#include <linux/nospec.h>
++
+ #include <linux/kmsg_dump.h>
+ /* Move somewhere else to avoid recompiling? */
+ #include <generated/utsrelease.h>
+@@ -2190,6 +2192,17 @@ static int propagate_has_child_subreaper(struct task_struct *p, void *data)
+ 	return 1;
+ }
+ 
++int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
++{
++	return -EINVAL;
++}
++
++int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
++				    unsigned long ctrl)
++{
++	return -EINVAL;
++}
++
+ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+ 		unsigned long, arg4, unsigned long, arg5)
+ {
+@@ -2398,6 +2411,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
+ 	case PR_SVE_GET_VL:
+ 		error = SVE_GET_VL();
+ 		break;
++	case PR_GET_SPECULATION_CTRL:
++		if (arg3 || arg4 || arg5)
++			return -EINVAL;
++		error = arch_prctl_spec_ctrl_get(me, arg2);
++		break;
++	case PR_SET_SPECULATION_CTRL:
++		if (arg4 || arg5)
++			return -EINVAL;
++		error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
++		break;
+ 	default:
+ 		error = -EINVAL;
+ 		break;
+diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
+index b398c2ea69b2..aa2094d5dd27 100644
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -612,6 +612,14 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
+ 	now = ktime_get();
+ 	/* Find all expired events */
+ 	for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
++		/*
++		 * Required for !SMP because for_each_cpu() reports
++		 * unconditionally CPU0 as set on UP kernels.
++		 */
++		if (!IS_ENABLED(CONFIG_SMP) &&
++		    cpumask_empty(tick_broadcast_oneshot_mask))
++			break;
++
+ 		td = &per_cpu(tick_cpu_device, cpu);
+ 		if (td->evtdev->next_event <= now) {
+ 			cpumask_set_cpu(cpu, tmpmask);
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index 8e00138d593f..a7705b0f139c 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -1612,11 +1612,9 @@ static void set_iter_tags(struct radix_tree_iter *iter,
+ static void __rcu **skip_siblings(struct radix_tree_node **nodep,
+ 			void __rcu **slot, struct radix_tree_iter *iter)
+ {
+-	void *sib = node_to_entry(slot - 1);
+-
+ 	while (iter->index < iter->next_index) {
+ 		*nodep = rcu_dereference_raw(*slot);
+-		if (*nodep && *nodep != sib)
++		if (*nodep && !is_sibling_entry(iter->node, *nodep))
+ 			return slot;
+ 		slot++;
+ 		iter->index = __radix_tree_iter_add(iter, 1);
+@@ -1631,7 +1629,7 @@ void __rcu **__radix_tree_next_slot(void __rcu **slot,
+ 				struct radix_tree_iter *iter, unsigned flags)
+ {
+ 	unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
+-	struct radix_tree_node *node = rcu_dereference_raw(*slot);
++	struct radix_tree_node *node;
+ 
+ 	slot = skip_siblings(&node, slot, iter);
+ 
+diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
+index 413367cf569e..d47a802220f5 100644
+--- a/lib/test_bitmap.c
++++ b/lib/test_bitmap.c
+@@ -329,23 +329,32 @@ static void noinline __init test_mem_optimisations(void)
+ 	unsigned int start, nbits;
+ 
+ 	for (start = 0; start < 1024; start += 8) {
+-		memset(bmap1, 0x5a, sizeof(bmap1));
+-		memset(bmap2, 0x5a, sizeof(bmap2));
+ 		for (nbits = 0; nbits < 1024 - start; nbits += 8) {
++			memset(bmap1, 0x5a, sizeof(bmap1));
++			memset(bmap2, 0x5a, sizeof(bmap2));
++
+ 			bitmap_set(bmap1, start, nbits);
+ 			__bitmap_set(bmap2, start, nbits);
+-			if (!bitmap_equal(bmap1, bmap2, 1024))
++			if (!bitmap_equal(bmap1, bmap2, 1024)) {
+ 				printk("set not equal %d %d\n", start, nbits);
+-			if (!__bitmap_equal(bmap1, bmap2, 1024))
++				failed_tests++;
++			}
++			if (!__bitmap_equal(bmap1, bmap2, 1024)) {
+ 				printk("set not __equal %d %d\n", start, nbits);
++				failed_tests++;
++			}
+ 
+ 			bitmap_clear(bmap1, start, nbits);
+ 			__bitmap_clear(bmap2, start, nbits);
+-			if (!bitmap_equal(bmap1, bmap2, 1024))
++			if (!bitmap_equal(bmap1, bmap2, 1024)) {
+ 				printk("clear not equal %d %d\n", start, nbits);
+-			if (!__bitmap_equal(bmap1, bmap2, 1024))
++				failed_tests++;
++			}
++			if (!__bitmap_equal(bmap1, bmap2, 1024)) {
+ 				printk("clear not __equal %d %d\n", start,
+ 									nbits);
++				failed_tests++;
++			}
+ 		}
+ 	}
+ }
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index 89f8a4a4b770..38b509cc6b46 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -1659,19 +1659,22 @@ char *pointer_string(char *buf, char *end, const void *ptr,
+ 	return number(buf, end, (unsigned long int)ptr, spec);
+ }
+ 
+-static bool have_filled_random_ptr_key __read_mostly;
++static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
+ static siphash_key_t ptr_key __read_mostly;
+ 
+-static void fill_random_ptr_key(struct random_ready_callback *unused)
++static void enable_ptr_key_workfn(struct work_struct *work)
+ {
+ 	get_random_bytes(&ptr_key, sizeof(ptr_key));
+-	/*
+-	 * have_filled_random_ptr_key==true is dependent on get_random_bytes().
+-	 * ptr_to_id() needs to see have_filled_random_ptr_key==true
+-	 * after get_random_bytes() returns.
+-	 */
+-	smp_mb();
+-	WRITE_ONCE(have_filled_random_ptr_key, true);
++	/* Needs to run from preemptible context */
++	static_branch_disable(&not_filled_random_ptr_key);
++}
++
++static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
++
++static void fill_random_ptr_key(struct random_ready_callback *unused)
++{
++	/* This may be in an interrupt handler. */
++	queue_work(system_unbound_wq, &enable_ptr_key_work);
+ }
+ 
+ static struct random_ready_callback random_ready = {
+@@ -1685,7 +1688,8 @@ static int __init initialize_ptr_random(void)
+ 	if (!ret) {
+ 		return 0;
+ 	} else if (ret == -EALREADY) {
+-		fill_random_ptr_key(&random_ready);
++		/* This is in preemptible context */
++		enable_ptr_key_workfn(&enable_ptr_key_work);
+ 		return 0;
+ 	}
+ 
+@@ -1699,7 +1703,7 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
+ 	unsigned long hashval;
+ 	const int default_width = 2 * sizeof(ptr);
+ 
+-	if (unlikely(!have_filled_random_ptr_key)) {
++	if (static_branch_unlikely(&not_filled_random_ptr_key)) {
+ 		spec.field_width = default_width;
+ 		/* string length must be less than default_width */
+ 		return string(buf, end, "(ptrval)", spec);
+diff --git a/mm/Kconfig b/mm/Kconfig
+index c782e8fb7235..e07608f64d47 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -644,6 +644,7 @@ config DEFERRED_STRUCT_PAGE_INIT
+ 	default n
+ 	depends on NO_BOOTMEM
+ 	depends on !FLATMEM
++	depends on !NEED_PER_CPU_KM
+ 	help
+ 	  Ordinarily all struct pages are initialised during early boot in a
+ 	  single thread. On very large machines this can take a considerable
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 530e12ae52d7..c853386b86ff 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -2357,41 +2357,46 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
+ 	}
+ 
+ 	if (nlh->nlmsg_flags & NLM_F_REPLACE) {
+-		if (nft_is_active_next(net, old_rule)) {
+-			trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
+-						   old_rule);
+-			if (trans == NULL) {
+-				err = -ENOMEM;
+-				goto err2;
+-			}
+-			nft_deactivate_next(net, old_rule);
+-			chain->use--;
+-			list_add_tail_rcu(&rule->list, &old_rule->list);
+-		} else {
++		if (!nft_is_active_next(net, old_rule)) {
+ 			err = -ENOENT;
+ 			goto err2;
+ 		}
+-	} else if (nlh->nlmsg_flags & NLM_F_APPEND)
+-		if (old_rule)
+-			list_add_rcu(&rule->list, &old_rule->list);
+-		else
+-			list_add_tail_rcu(&rule->list, &chain->rules);
+-	else {
+-		if (old_rule)
+-			list_add_tail_rcu(&rule->list, &old_rule->list);
+-		else
+-			list_add_rcu(&rule->list, &chain->rules);
+-	}
++		trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
++					   old_rule);
++		if (trans == NULL) {
++			err = -ENOMEM;
++			goto err2;
++		}
++		nft_deactivate_next(net, old_rule);
++		chain->use--;
+ 
+-	if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
+-		err = -ENOMEM;
+-		goto err3;
++		if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
++			err = -ENOMEM;
++			goto err2;
++		}
++
++		list_add_tail_rcu(&rule->list, &old_rule->list);
++	} else {
++		if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
++			err = -ENOMEM;
++			goto err2;
++		}
++
++		if (nlh->nlmsg_flags & NLM_F_APPEND) {
++			if (old_rule)
++				list_add_rcu(&rule->list, &old_rule->list);
++			else
++				list_add_tail_rcu(&rule->list, &chain->rules);
++		 } else {
++			if (old_rule)
++				list_add_tail_rcu(&rule->list, &old_rule->list);
++			else
++				list_add_rcu(&rule->list, &chain->rules);
++		}
+ 	}
+ 	chain->use++;
+ 	return 0;
+ 
+-err3:
+-	list_del_rcu(&rule->list);
+ err2:
+ 	nf_tables_rule_destroy(&ctx, rule);
+ err1:
+@@ -3203,18 +3208,20 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
+ 
+ 	err = ops->init(set, &desc, nla);
+ 	if (err < 0)
+-		goto err2;
++		goto err3;
+ 
+ 	err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
+ 	if (err < 0)
+-		goto err3;
++		goto err4;
+ 
+ 	list_add_tail_rcu(&set->list, &table->sets);
+ 	table->use++;
+ 	return 0;
+ 
+-err3:
++err4:
+ 	ops->destroy(set);
++err3:
++	kfree(set->name);
+ err2:
+ 	kvfree(set);
+ err1:
+@@ -4392,9 +4399,9 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
+ }
+ EXPORT_SYMBOL_GPL(nf_tables_obj_lookup);
+ 
+-struct nft_object *nf_tables_obj_lookup_byhandle(const struct nft_table *table,
+-						 const struct nlattr *nla,
+-						 u32 objtype, u8 genmask)
++static struct nft_object *nf_tables_obj_lookup_byhandle(const struct nft_table *table,
++							const struct nlattr *nla,
++							u32 objtype, u8 genmask)
+ {
+ 	struct nft_object *obj;
+ 
+@@ -4914,7 +4921,7 @@ struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table,
+ }
+ EXPORT_SYMBOL_GPL(nf_tables_flowtable_lookup);
+ 
+-struct nft_flowtable *
++static struct nft_flowtable *
+ nf_tables_flowtable_lookup_byhandle(const struct nft_table *table,
+ 				    const struct nlattr *nla, u8 genmask)
+ {
+diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
+index a848836a5de0..507fd5210c1c 100644
+--- a/sound/core/control_compat.c
++++ b/sound/core/control_compat.c
+@@ -396,8 +396,7 @@ static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
+ 	if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) ||
+ 	    copy_from_user(&data->type, &data32->type, 3 * sizeof(u32)))
+ 		goto error;
+-	if (get_user(data->owner, &data32->owner) ||
+-	    get_user(data->type, &data32->type))
++	if (get_user(data->owner, &data32->owner))
+ 		goto error;
+ 	switch (data->type) {
+ 	case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 738e1fe90312..62fbdbe74b93 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2208,6 +2208,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
+ 	SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
+ 	/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ 	SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
++	/* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
++	SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
+ 	/* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
+ 	SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
+ 	{}
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 2dd34dd77447..01a6643fc7d4 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2363,6 +2363,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+ 	SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
+ 	SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
++	SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 06b22624ab7a..1a4c107b1940 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -915,6 +915,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 		}
+ 		break;
+ 
++	case USB_ID(0x0d8c, 0x0103):
++		if (!strcmp(kctl->id.name, "PCM Playback Volume")) {
++			usb_audio_info(chip,
++				 "set volume quirk for CM102-A+/102S+\n");
++			cval->min = -256;
++		}
++		break;
++
+ 	case USB_ID(0x0471, 0x0101):
+ 	case USB_ID(0x0471, 0x0104):
+ 	case USB_ID(0x0471, 0x0105):
+diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
+index 5df609950a66..e350cf3d4f90 100644
+--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
++++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
+@@ -134,11 +134,15 @@ struct seccomp_data {
+ #endif
+ 
+ #ifndef SECCOMP_FILTER_FLAG_TSYNC
+-#define SECCOMP_FILTER_FLAG_TSYNC 1
++#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
+ #endif
+ 
+ #ifndef SECCOMP_FILTER_FLAG_LOG
+-#define SECCOMP_FILTER_FLAG_LOG 2
++#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
++#endif
++
++#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
++#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
+ #endif
+ 
+ #ifndef PTRACE_SECCOMP_GET_METADATA
+@@ -2072,14 +2076,26 @@ TEST(seccomp_syscall_mode_lock)
+ TEST(detect_seccomp_filter_flags)
+ {
+ 	unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
+-				 SECCOMP_FILTER_FLAG_LOG };
++				 SECCOMP_FILTER_FLAG_LOG,
++				 SECCOMP_FILTER_FLAG_SPEC_ALLOW };
+ 	unsigned int flag, all_flags;
+ 	int i;
+ 	long ret;
+ 
+ 	/* Test detection of known-good filter flags */
+ 	for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
++		int bits = 0;
++
+ 		flag = flags[i];
++		/* Make sure the flag is a single bit! */
++		while (flag) {
++			if (flag & 0x1)
++				bits ++;
++			flag >>= 1;
++		}
++		ASSERT_EQ(1, bits);
++		flag = flags[i];
++
+ 		ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+ 		ASSERT_NE(ENOSYS, errno) {
+ 			TH_LOG("Kernel does not support seccomp syscall!");
+diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
+index 10b38178cff2..4ffc0b5e6105 100644
+--- a/virt/kvm/arm/vgic/vgic-debug.c
++++ b/virt/kvm/arm/vgic/vgic-debug.c
+@@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
+ 	struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
+ 	struct vgic_irq *irq;
+ 	struct kvm_vcpu *vcpu = NULL;
++	unsigned long flags;
+ 
+ 	if (iter->dist_id == 0) {
+ 		print_dist_state(s, &kvm->arch.vgic);
+@@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
+ 		irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
+ 	}
+ 
+-	spin_lock(&irq->irq_lock);
++	spin_lock_irqsave(&irq->irq_lock, flags);
+ 	print_irq_state(s, irq, vcpu);
+-	spin_unlock(&irq->irq_lock);
++	spin_unlock_irqrestore(&irq->irq_lock, flags);
+ 
+ 	return 0;
+ }
+diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
+index a8f07243aa9f..4ed79c939fb4 100644
+--- a/virt/kvm/arm/vgic/vgic-its.c
++++ b/virt/kvm/arm/vgic/vgic-its.c
+@@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
+ {
+ 	struct vgic_dist *dist = &kvm->arch.vgic;
+ 	struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
++	unsigned long flags;
+ 	int ret;
+ 
+ 	/* In this case there is no put, since we keep the reference. */
+@@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
+ 	irq->intid = intid;
+ 	irq->target_vcpu = vcpu;
+ 
+-	spin_lock(&dist->lpi_list_lock);
++	spin_lock_irqsave(&dist->lpi_list_lock, flags);
+ 
+ 	/*
+ 	 * There could be a race with another vgic_add_lpi(), so we need to
+@@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
+ 	dist->lpi_list_count++;
+ 
+ out_unlock:
+-	spin_unlock(&dist->lpi_list_lock);
++	spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ 
+ 	/*
+ 	 * We "cache" the configuration table entries in our struct vgic_irq's.
+@@ -280,8 +281,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
+ 	int ret;
+ 	unsigned long flags;
+ 
+-	ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
+-			     &prop, 1);
++	ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
++				  &prop, 1);
+ 
+ 	if (ret)
+ 		return ret;
+@@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
+ {
+ 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ 	struct vgic_irq *irq;
++	unsigned long flags;
+ 	u32 *intids;
+ 	int irq_count, i = 0;
+ 
+@@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
+ 	if (!intids)
+ 		return -ENOMEM;
+ 
+-	spin_lock(&dist->lpi_list_lock);
++	spin_lock_irqsave(&dist->lpi_list_lock, flags);
+ 	list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
+ 		if (i == irq_count)
+ 			break;
+@@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
+ 			continue;
+ 		intids[i++] = irq->intid;
+ 	}
+-	spin_unlock(&dist->lpi_list_lock);
++	spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ 
+ 	*intid_ptr = intids;
+ 	return i;
+@@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
+ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
+ {
+ 	int ret = 0;
++	unsigned long flags;
+ 
+-	spin_lock(&irq->irq_lock);
++	spin_lock_irqsave(&irq->irq_lock, flags);
+ 	irq->target_vcpu = vcpu;
+-	spin_unlock(&irq->irq_lock);
++	spin_unlock_irqrestore(&irq->irq_lock, flags);
+ 
+ 	if (irq->hw) {
+ 		struct its_vlpi_map map;
+@@ -441,8 +444,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
+ 		 * this very same byte in the last iteration. Reuse that.
+ 		 */
+ 		if (byte_offset != last_byte_offset) {
+-			ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
+-					     &pendmask, 1);
++			ret = kvm_read_guest_lock(vcpu->kvm,
++						  pendbase + byte_offset,
++						  &pendmask, 1);
+ 			if (ret) {
+ 				kfree(intids);
+ 				return ret;
+@@ -786,7 +790,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
+ 		return false;
+ 
+ 	/* Each 1st level entry is represented by a 64-bit value. */
+-	if (kvm_read_guest(its->dev->kvm,
++	if (kvm_read_guest_lock(its->dev->kvm,
+ 			   BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
+ 			   &indirect_ptr, sizeof(indirect_ptr)))
+ 		return false;
+@@ -1367,8 +1371,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
+ 	cbaser = CBASER_ADDRESS(its->cbaser);
+ 
+ 	while (its->cwriter != its->creadr) {
+-		int ret = kvm_read_guest(kvm, cbaser + its->creadr,
+-					 cmd_buf, ITS_CMD_SIZE);
++		int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
++					      cmd_buf, ITS_CMD_SIZE);
+ 		/*
+ 		 * If kvm_read_guest() fails, this could be due to the guest
+ 		 * programming a bogus value in CBASER or something else going
+@@ -1893,7 +1897,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
+ 		int next_offset;
+ 		size_t byte_offset;
+ 
+-		ret = kvm_read_guest(kvm, gpa, entry, esz);
++		ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
+ 		if (ret)
+ 			return ret;
+ 
+@@ -2263,7 +2267,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
+ 	int ret;
+ 
+ 	BUG_ON(esz > sizeof(val));
+-	ret = kvm_read_guest(kvm, gpa, &val, esz);
++	ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
+ 	if (ret)
+ 		return ret;
+ 	val = le64_to_cpu(val);
+diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
+index 0ff2006f3781..cf2f716532ac 100644
+--- a/virt/kvm/arm/vgic/vgic-v3.c
++++ b/virt/kvm/arm/vgic/vgic-v3.c
+@@ -329,7 +329,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
+ 	bit_nr = irq->intid % BITS_PER_BYTE;
+ 	ptr = pendbase + byte_offset;
+ 
+-	ret = kvm_read_guest(kvm, ptr, &val, 1);
++	ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -382,7 +382,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
+ 		ptr = pendbase + byte_offset;
+ 
+ 		if (byte_offset != last_byte_offset) {
+-			ret = kvm_read_guest(kvm, ptr, &val, 1);
++			ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
+ 			if (ret)
+ 				return ret;
+ 			last_byte_offset = byte_offset;
+diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
+index 8201899126f6..4232c40b34f8 100644
+--- a/virt/kvm/arm/vgic/vgic.c
++++ b/virt/kvm/arm/vgic/vgic.c
+@@ -40,9 +40,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
+  * kvm->lock (mutex)
+  *   its->cmd_lock (mutex)
+  *     its->its_lock (mutex)
+- *       vgic_cpu->ap_list_lock
+- *         kvm->lpi_list_lock
+- *           vgic_irq->irq_lock
++ *       vgic_cpu->ap_list_lock		must be taken with IRQs disabled
++ *         kvm->lpi_list_lock		must be taken with IRQs disabled
++ *           vgic_irq->irq_lock		must be taken with IRQs disabled
++ *
++ * As the ap_list_lock might be taken from the timer interrupt handler,
++ * we have to disable IRQs before taking this lock and everything lower
++ * than it.
+  *
+  * If you need to take multiple locks, always take the upper lock first,
+  * then the lower ones, e.g. first take the its_lock, then the irq_lock.
+@@ -69,8 +73,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
+ {
+ 	struct vgic_dist *dist = &kvm->arch.vgic;
+ 	struct vgic_irq *irq = NULL;
++	unsigned long flags;
+ 
+-	spin_lock(&dist->lpi_list_lock);
++	spin_lock_irqsave(&dist->lpi_list_lock, flags);
+ 
+ 	list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
+ 		if (irq->intid != intid)
+@@ -86,7 +91,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
+ 	irq = NULL;
+ 
+ out_unlock:
+-	spin_unlock(&dist->lpi_list_lock);
++	spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ 
+ 	return irq;
+ }
+@@ -127,19 +132,20 @@ static void vgic_irq_release(struct kref *ref)
+ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
+ {
+ 	struct vgic_dist *dist = &kvm->arch.vgic;
++	unsigned long flags;
+ 
+ 	if (irq->intid < VGIC_MIN_LPI)
+ 		return;
+ 
+-	spin_lock(&dist->lpi_list_lock);
++	spin_lock_irqsave(&dist->lpi_list_lock, flags);
+ 	if (!kref_put(&irq->refcount, vgic_irq_release)) {
+-		spin_unlock(&dist->lpi_list_lock);
++		spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ 		return;
+ 	};
+ 
+ 	list_del(&irq->lpi_list);
+ 	dist->lpi_list_count--;
+-	spin_unlock(&dist->lpi_list_lock);
++	spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
+ 
+ 	kfree(irq);
+ }


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-05-20 22:22 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-05-20 22:22 UTC (permalink / raw
  To: gentoo-commits

commit:     c741e5c30736c7523ab72393e932827a9179a35f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun May 20 22:22:28 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun May 20 22:22:28 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c741e5c3

Linux patch 4.16.10

 0000_README              |    4 +
 1009_linux-4.16.10.patch | 1808 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1812 insertions(+)

diff --git a/0000_README b/0000_README
index 73b7e2e..89eb684 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch:  1008_linux-4.16.9.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.9
 
+Patch:  1009_linux-4.16.10.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.10
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1009_linux-4.16.10.patch b/1009_linux-4.16.10.patch
new file mode 100644
index 0000000..7d08449
--- /dev/null
+++ b/1009_linux-4.16.10.patch
@@ -0,0 +1,1808 @@
+diff --git a/Makefile b/Makefile
+index ea3cb221d7c5..33f3c94f02ca 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 7f8bda3a2005..0881f7907848 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -4303,7 +4303,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
+ {
+ 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ 
+-	return mlx5_get_vector_affinity(dev->mdev, comp_vector);
++	return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
+ }
+ 
+ /* The mlx5_ib_multiport_mutex should be held when calling this function */
+diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
+index 1ed9529e7bd1..5eb0df2e5464 100644
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -450,7 +450,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
+ {
+ 	int i;
+ 
+-	if (!client_info->slave)
++	if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
+ 		return;
+ 
+ 	for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
+@@ -943,6 +943,10 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
+ 	skb->priority = TC_PRIO_CONTROL;
+ 	skb->dev = slave->dev;
+ 
++	netdev_dbg(slave->bond->dev,
++		   "Send learning packet: dev %s mac %pM vlan %d\n",
++		   slave->dev->name, mac_addr, vid);
++
+ 	if (vid)
+ 		__vlan_hwaccel_put_tag(skb, vlan_proto, vid);
+ 
+@@ -965,14 +969,13 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data)
+ 	u8 *mac_addr = data->mac_addr;
+ 	struct bond_vlan_tag *tags;
+ 
+-	if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
+-		if (strict_match &&
+-		    ether_addr_equal_64bits(mac_addr,
+-					    upper->dev_addr)) {
++	if (is_vlan_dev(upper) &&
++	    bond->nest_level == vlan_get_encap_level(upper) - 1) {
++		if (upper->addr_assign_type == NET_ADDR_STOLEN) {
+ 			alb_send_lp_vid(slave, mac_addr,
+ 					vlan_dev_vlan_proto(upper),
+ 					vlan_dev_vlan_id(upper));
+-		} else if (!strict_match) {
++		} else {
+ 			alb_send_lp_vid(slave, upper->dev_addr,
+ 					vlan_dev_vlan_proto(upper),
+ 					vlan_dev_vlan_id(upper));
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 718e4914e3a0..1f1e97b26f95 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1738,6 +1738,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ 	if (bond_mode_uses_xmit_hash(bond))
+ 		bond_update_slave_arr(bond, NULL);
+ 
++	bond->nest_level = dev_get_nest_level(bond_dev);
++
+ 	netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
+ 		    slave_dev->name,
+ 		    bond_is_active_slave(new_slave) ? "an active" : "a backup",
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index 3fc549b88c43..d61e51ebca51 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -2052,14 +2052,21 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
+ 	.ndo_select_queue	= bcm_sysport_select_queue,
+ };
+ 
+-static int bcm_sysport_map_queues(struct net_device *dev,
++static int bcm_sysport_map_queues(struct notifier_block *nb,
+ 				  struct dsa_notifier_register_info *info)
+ {
+-	struct bcm_sysport_priv *priv = netdev_priv(dev);
+ 	struct bcm_sysport_tx_ring *ring;
++	struct bcm_sysport_priv *priv;
+ 	struct net_device *slave_dev;
+ 	unsigned int num_tx_queues;
+ 	unsigned int q, start, port;
++	struct net_device *dev;
++
++	priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
++	if (priv->netdev != info->master)
++		return 0;
++
++	dev = info->master;
+ 
+ 	/* We can't be setting up queue inspection for non directly attached
+ 	 * switches
+@@ -2082,6 +2089,7 @@ static int bcm_sysport_map_queues(struct net_device *dev,
+ 	if (priv->is_lite)
+ 		netif_set_real_num_tx_queues(slave_dev,
+ 					     slave_dev->num_tx_queues / 2);
++
+ 	num_tx_queues = slave_dev->real_num_tx_queues;
+ 
+ 	if (priv->per_port_num_tx_queues &&
+@@ -2109,7 +2117,7 @@ static int bcm_sysport_map_queues(struct net_device *dev,
+ 	return 0;
+ }
+ 
+-static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
++static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
+ 				    unsigned long event, void *ptr)
+ {
+ 	struct dsa_notifier_register_info *info;
+@@ -2119,7 +2127,7 @@ static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
+ 
+ 	info = ptr;
+ 
+-	return notifier_from_errno(bcm_sysport_map_queues(info->master, info));
++	return notifier_from_errno(bcm_sysport_map_queues(nb, info));
+ }
+ 
+ #define REV_FMT	"v%2x.%02x"
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index f2593978ae75..bde98a994e96 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -8733,14 +8733,15 @@ static void tg3_free_consistent(struct tg3 *tp)
+ 	tg3_mem_rx_release(tp);
+ 	tg3_mem_tx_release(tp);
+ 
+-	/* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
+-	tg3_full_lock(tp, 0);
++	/* tp->hw_stats can be referenced safely:
++	 *     1. under rtnl_lock
++	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
++	 */
+ 	if (tp->hw_stats) {
+ 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
+ 				  tp->hw_stats, tp->stats_mapping);
+ 		tp->hw_stats = NULL;
+ 	}
+-	tg3_full_unlock(tp);
+ }
+ 
+ /*
+@@ -14178,7 +14179,7 @@ static void tg3_get_stats64(struct net_device *dev,
+ 	struct tg3 *tp = netdev_priv(dev);
+ 
+ 	spin_lock_bh(&tp->lock);
+-	if (!tp->hw_stats) {
++	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
+ 		*stats = tp->net_stats_prev;
+ 		spin_unlock_bh(&tp->lock);
+ 		return;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+index f3302edba8b4..7f87db9734b8 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+@@ -1013,6 +1013,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
+ 	if (!coal->tx_max_coalesced_frames_irq)
+ 		return -EINVAL;
+ 
++	if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
++	    coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
++	    coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
++	    coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
++		netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
++			    __func__, MLX4_EN_MAX_COAL_TIME);
++		return -ERANGE;
++	}
++
++	if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
++	    coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
++		netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
++			    __func__, MLX4_EN_MAX_COAL_PKTS);
++		return -ERANGE;
++	}
++
+ 	priv->rx_frames = (coal->rx_max_coalesced_frames ==
+ 			   MLX4_EN_AUTO_CONF) ?
+ 				MLX4_EN_RX_COAL_TARGET :
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index 8fc51bc29003..41f8fbced11d 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -3320,12 +3320,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 					   MAX_TX_RINGS, GFP_KERNEL);
+ 		if (!priv->tx_ring[t]) {
+ 			err = -ENOMEM;
+-			goto err_free_tx;
++			goto out;
+ 		}
+ 		priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
+ 					 MAX_TX_RINGS, GFP_KERNEL);
+ 		if (!priv->tx_cq[t]) {
+-			kfree(priv->tx_ring[t]);
+ 			err = -ENOMEM;
+ 			goto out;
+ 		}
+@@ -3578,11 +3577,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 
+ 	return 0;
+ 
+-err_free_tx:
+-	while (t--) {
+-		kfree(priv->tx_ring[t]);
+-		kfree(priv->tx_cq[t]);
+-	}
+ out:
+ 	mlx4_en_destroy_netdev(dev);
+ 	return err;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index f470ae37d937..4dabaf025b12 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -132,6 +132,9 @@
+ #define MLX4_EN_TX_COAL_PKTS	16
+ #define MLX4_EN_TX_COAL_TIME	0x10
+ 
++#define MLX4_EN_MAX_COAL_PKTS	U16_MAX
++#define MLX4_EN_MAX_COAL_TIME	U16_MAX
++
+ #define MLX4_EN_RX_RATE_LOW		400000
+ #define MLX4_EN_RX_COAL_TIME_LOW	0
+ #define MLX4_EN_RX_RATE_HIGH		450000
+@@ -552,8 +555,8 @@ struct mlx4_en_priv {
+ 	u16 rx_usecs_low;
+ 	u32 pkt_rate_high;
+ 	u16 rx_usecs_high;
+-	u16 sample_interval;
+-	u16 adaptive_rx_coal;
++	u32 sample_interval;
++	u32 adaptive_rx_coal;
+ 	u32 msg_enable;
+ 	u32 loopback_ok;
+ 	u32 validate_loopback;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+index 3d46ef48d5b8..c641d5656b2d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+@@ -1007,12 +1007,14 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
+ 
+ 	mutex_lock(&priv->state_lock);
+ 
+-	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+-		goto out;
+-
+ 	new_channels.params = priv->channels.params;
+ 	mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
+ 
++	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
++		priv->channels.params = new_channels.params;
++		goto out;
++	}
++
+ 	/* Skip if tx_min_inline is the same */
+ 	if (new_channels.params.tx_min_inline_mode ==
+ 	    priv->channels.params.tx_min_inline_mode)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 43234cabf444..8665670fddbc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1260,6 +1260,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
+ 						  f->mask);
+ 		addr_type = key->addr_type;
+ 
++		/* the HW doesn't support frag first/later */
++		if (mask->flags & FLOW_DIS_FIRST_FRAG)
++			return -EOPNOTSUPP;
++
+ 		if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ 			MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
+ 			MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
+@@ -1863,7 +1867,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
+ 	}
+ 
+ 	ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
+-	if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
++	if (modify_ip_header && ip_proto != IPPROTO_TCP &&
++	    ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
+ 		pr_info("can't offload re-write of ip proto %d\n", ip_proto);
+ 		return false;
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index 11b4f1089d1c..ea725664f4f2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -255,7 +255,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ 		dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
+ 					  DMA_TO_DEVICE);
+ 		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+-			return -ENOMEM;
++			goto dma_unmap_wqe_err;
+ 
+ 		dseg->addr       = cpu_to_be64(dma_addr);
+ 		dseg->lkey       = sq->mkey_be;
+@@ -273,7 +273,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ 		dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
+ 					    DMA_TO_DEVICE);
+ 		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+-			return -ENOMEM;
++			goto dma_unmap_wqe_err;
+ 
+ 		dseg->addr       = cpu_to_be64(dma_addr);
+ 		dseg->lkey       = sq->mkey_be;
+@@ -285,6 +285,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ 	}
+ 
+ 	return num_dma;
++
++dma_unmap_wqe_err:
++	mlx5e_dma_unmap_wqe_err(sq, num_dma);
++	return -ENOMEM;
+ }
+ 
+ static inline void
+@@ -380,17 +384,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ 	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
+ 					  (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
+ 	if (unlikely(num_dma < 0))
+-		goto dma_unmap_wqe_err;
++		goto err_drop;
+ 
+ 	mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
+ 			     num_bytes, num_dma, wi, cseg);
+ 
+ 	return NETDEV_TX_OK;
+ 
+-dma_unmap_wqe_err:
++err_drop:
+ 	sq->stats.dropped++;
+-	mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
+-
+ 	dev_kfree_skb_any(skb);
+ 
+ 	return NETDEV_TX_OK;
+@@ -620,17 +622,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ 	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
+ 					  (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
+ 	if (unlikely(num_dma < 0))
+-		goto dma_unmap_wqe_err;
++		goto err_drop;
+ 
+ 	mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
+ 			     num_bytes, num_dma, wi, cseg);
+ 
+ 	return NETDEV_TX_OK;
+ 
+-dma_unmap_wqe_err:
++err_drop:
+ 	sq->stats.dropped++;
+-	mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
+-
+ 	dev_kfree_skb_any(skb);
+ 
+ 	return NETDEV_TX_OK;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index c2b1d7d351fc..0f745df1506c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -2143,26 +2143,35 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
+ 	memset(vf_stats, 0, sizeof(*vf_stats));
+ 	vf_stats->rx_packets =
+ 		MLX5_GET_CTR(out, received_eth_unicast.packets) +
++		MLX5_GET_CTR(out, received_ib_unicast.packets) +
+ 		MLX5_GET_CTR(out, received_eth_multicast.packets) +
++		MLX5_GET_CTR(out, received_ib_multicast.packets) +
+ 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
+ 
+ 	vf_stats->rx_bytes =
+ 		MLX5_GET_CTR(out, received_eth_unicast.octets) +
++		MLX5_GET_CTR(out, received_ib_unicast.octets) +
+ 		MLX5_GET_CTR(out, received_eth_multicast.octets) +
++		MLX5_GET_CTR(out, received_ib_multicast.octets) +
+ 		MLX5_GET_CTR(out, received_eth_broadcast.octets);
+ 
+ 	vf_stats->tx_packets =
+ 		MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
++		MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
+ 		MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
++		MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
+ 		MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
+ 
+ 	vf_stats->tx_bytes =
+ 		MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
++		MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
+ 		MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
++		MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
+ 		MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+ 
+ 	vf_stats->multicast =
+-		MLX5_GET_CTR(out, received_eth_multicast.packets);
++		MLX5_GET_CTR(out, received_eth_multicast.packets) +
++		MLX5_GET_CTR(out, received_ib_multicast.packets);
+ 
+ 	vf_stats->broadcast =
+ 		MLX5_GET_CTR(out, received_eth_broadcast.packets);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 31fc2cfac3b3..4d5b87e0d472 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -185,6 +185,7 @@ static void del_sw_ns(struct fs_node *node);
+ static void del_sw_hw_rule(struct fs_node *node);
+ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
+ 				struct mlx5_flow_destination *d2);
++static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
+ static struct mlx5_flow_rule *
+ find_flow_rule(struct fs_fte *fte,
+ 	       struct mlx5_flow_destination *dest);
+@@ -2329,23 +2330,27 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
+ 
+ static int init_root_ns(struct mlx5_flow_steering *steering)
+ {
++	int err;
++
+ 	steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
+ 	if (!steering->root_ns)
+-		goto cleanup;
++		return -ENOMEM;
+ 
+-	if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
+-		goto cleanup;
++	err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
++	if (err)
++		goto out_err;
+ 
+ 	set_prio_attrs(steering->root_ns);
+-
+-	if (create_anchor_flow_table(steering))
+-		goto cleanup;
++	err = create_anchor_flow_table(steering);
++	if (err)
++		goto out_err;
+ 
+ 	return 0;
+ 
+-cleanup:
+-	mlx5_cleanup_fs(steering->dev);
+-	return -ENOMEM;
++out_err:
++	cleanup_root_ns(steering->root_ns);
++	steering->root_ns = NULL;
++	return err;
+ }
+ 
+ static void clean_tree(struct fs_node *node)
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
+index 3529b545675d..1c09a274c637 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
+@@ -1099,11 +1099,11 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ err_alloc_lag_mapping:
+ 	mlxsw_ports_fini(mlxsw_core);
+ err_ports_init:
+-	mlxsw_bus->fini(bus_priv);
+-err_bus_init:
+ 	if (!reload)
+ 		devlink_resources_unregister(devlink, NULL);
+ err_register_resources:
++	mlxsw_bus->fini(bus_priv);
++err_bus_init:
+ 	if (!reload)
+ 		devlink_free(devlink);
+ err_devlink_alloc:
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+index 161bcdc012f0..fd6b86892595 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -1718,13 +1718,11 @@ __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ 	struct net_device *dev = mlxsw_sp_port->dev;
+ 	int err;
+ 
+-	if (bridge_port->bridge_device->multicast_enabled) {
+-		if (bridge_port->bridge_device->multicast_enabled) {
+-			err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid,
+-						     false);
+-			if (err)
+-				netdev_err(dev, "Unable to remove port from SMID\n");
+-		}
++	if (bridge_port->bridge_device->multicast_enabled &&
++	    !bridge_port->mrouter) {
++		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
++		if (err)
++			netdev_err(dev, "Unable to remove port from SMID\n");
+ 	}
+ 
+ 	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
+index b3567a596fc1..80df9a5d4217 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
+@@ -183,17 +183,21 @@ static int
+ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
+ 			const struct tc_action *action,
+ 			struct nfp_fl_pre_tunnel *pre_tun,
+-			enum nfp_flower_tun_type tun_type)
++			enum nfp_flower_tun_type tun_type,
++			struct net_device *netdev)
+ {
+ 	size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
+ 	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
+ 	u32 tmp_set_ip_tun_type_index = 0;
+ 	/* Currently support one pre-tunnel so index is always 0. */
+ 	int pretun_idx = 0;
++	struct net *net;
+ 
+ 	if (ip_tun->options_len)
+ 		return -EOPNOTSUPP;
+ 
++	net = dev_net(netdev);
++
+ 	set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
+ 	set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+ 
+@@ -204,6 +208,7 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
+ 
+ 	set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
+ 	set_tun->tun_id = ip_tun->key.tun_id;
++	set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
+ 
+ 	/* Complete pre_tunnel action. */
+ 	pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
+@@ -511,7 +516,8 @@ nfp_flower_loop_action(const struct tc_action *a,
+ 		*a_len += sizeof(struct nfp_fl_pre_tunnel);
+ 
+ 		set_tun = (void *)&nfp_fl->action_data[*a_len];
+-		err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type);
++		err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type,
++					      netdev);
+ 		if (err)
+ 			return err;
+ 		*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+index adfe474c2cf0..329a9b6d453a 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
++++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+@@ -178,7 +178,10 @@ struct nfp_fl_set_ipv4_udp_tun {
+ 	__be16 reserved;
+ 	__be64 tun_id __packed;
+ 	__be32 tun_type_index;
+-	__be32 extra[3];
++	__be16 reserved2;
++	u8 ttl;
++	u8 reserved3;
++	__be32 extra[2];
+ };
+ 
+ /* Metadata with L2 (1W/4B)
+diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
+index d24b47b8e0b2..d118da5a10a2 100644
+--- a/drivers/net/ethernet/realtek/8139too.c
++++ b/drivers/net/ethernet/realtek/8139too.c
+@@ -2224,7 +2224,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
+ 	struct rtl8139_private *tp = netdev_priv(dev);
+ 	const int irq = tp->pci_dev->irq;
+ 
+-	disable_irq(irq);
++	disable_irq_nosync(irq);
+ 	rtl8139_interrupt(irq, dev);
+ 	enable_irq(irq);
+ }
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index b4779acb6b5c..18bb1e226e6d 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5087,6 +5087,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
+ static void rtl_pll_power_up(struct rtl8169_private *tp)
+ {
+ 	rtl_generic_op(tp, tp->pll_power_ops.up);
++
++	/* give MAC/PHY some time to resume */
++	msleep(20);
+ }
+ 
+ static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
+diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
+index 8dd545fed30d..6c94af263be8 100644
+--- a/drivers/net/ethernet/sun/niu.c
++++ b/drivers/net/ethernet/sun/niu.c
+@@ -3443,7 +3443,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
+ 
+ 		len = (val & RCR_ENTRY_L2_LEN) >>
+ 			RCR_ENTRY_L2_LEN_SHIFT;
+-		len -= ETH_FCS_LEN;
++		append_size = len + ETH_HLEN + ETH_FCS_LEN;
+ 
+ 		addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
+ 			RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
+@@ -3453,7 +3453,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
+ 					 RCR_ENTRY_PKTBUFSZ_SHIFT];
+ 
+ 		off = addr & ~PAGE_MASK;
+-		append_size = rcr_size;
+ 		if (num_rcr == 1) {
+ 			int ptype;
+ 
+@@ -3466,7 +3465,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
+ 			else
+ 				skb_checksum_none_assert(skb);
+ 		} else if (!(val & RCR_ENTRY_MULTI))
+-			append_size = len - skb->len;
++			append_size = append_size - skb->len;
+ 
+ 		niu_rx_skb_append(skb, page, off, append_size, rcr_size);
+ 		if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 33c35b2df7d5..5490c7d09c16 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -1278,6 +1278,8 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
+ 	cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ 			   HOST_PORT_NUM, ALE_VLAN |
+ 			   ALE_SECURE, slave->port_vlan);
++	cpsw_ale_control_set(cpsw->ale, slave_port,
++			     ALE_PORT_DROP_UNKNOWN_VLAN, 1);
+ }
+ 
+ static void soft_reset_slave(struct cpsw_slave *slave)
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index f28c85d212ce..4774766fe20d 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -1840,7 +1840,8 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
+ 		goto rx_handler_failed;
+ 	}
+ 
+-	ret = netdev_upper_dev_link(vf_netdev, ndev, NULL);
++	ret = netdev_master_upper_dev_link(vf_netdev, ndev,
++					   NULL, NULL, NULL);
+ 	if (ret != 0) {
+ 		netdev_err(vf_netdev,
+ 			   "can not set master device %s (err = %d)\n",
+diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
+index 465c42e30508..95846f0321f3 100644
+--- a/drivers/net/hyperv/rndis_filter.c
++++ b/drivers/net/hyperv/rndis_filter.c
+@@ -1282,7 +1282,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
+ 		   rndis_device->link_state ? "down" : "up");
+ 
+ 	if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
+-		return net_device;
++		goto out;
+ 
+ 	rndis_filter_query_link_speed(rndis_device, net_device);
+ 
+diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
+index 8961209ee949..a386138c7255 100644
+--- a/drivers/net/phy/sfp-bus.c
++++ b/drivers/net/phy/sfp-bus.c
+@@ -190,7 +190,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
+ 	if (id->base.br_nominal) {
+ 		if (id->base.br_nominal != 255) {
+ 			br_nom = id->base.br_nominal * 100;
+-			br_min = br_nom + id->base.br_nominal * id->ext.br_min;
++			br_min = br_nom - id->base.br_nominal * id->ext.br_min;
+ 			br_max = br_nom + id->base.br_nominal * id->ext.br_max;
+ 		} else if (id->ext.br_max) {
+ 			br_nom = 250 * id->ext.br_max;
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 51c68fc416fa..42565dd33aa6 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1344,6 +1344,18 @@ static int qmi_wwan_probe(struct usb_interface *intf,
+ 		id->driver_info = (unsigned long)&qmi_wwan_info;
+ 	}
+ 
++	/* There are devices where the same interface number can be
++	 * configured as different functions. We should only bind to
++	 * vendor specific functions when matching on interface number
++	 */
++	if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
++	    desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
++		dev_dbg(&intf->dev,
++			"Rejecting interface number match for class %02x\n",
++			desc->bInterfaceClass);
++		return -ENODEV;
++	}
++
+ 	/* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
+ 	if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) {
+ 		dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index 84858d5c8257..d9f2229664ad 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -724,6 +724,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
+ 	int wait;
+ 	unsigned long flags = 0;
+ 	unsigned long mflags = 0;
++	struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
++			fibptr->hw_fib_va;
+ 
+ 	fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
+ 	if (callback) {
+@@ -734,11 +736,9 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
+ 		wait = 1;
+ 
+ 
+-	if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
+-		struct aac_hba_cmd_req *hbacmd =
+-			(struct aac_hba_cmd_req *)fibptr->hw_fib_va;
++	hbacmd->iu_type = command;
+ 
+-		hbacmd->iu_type = command;
++	if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
+ 		/* bit1 of request_id must be 0 */
+ 		hbacmd->request_id =
+ 			cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 9298324325ed..f034eccd8616 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -264,7 +264,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
+ 	 * Inherently racy -- command line shares address space
+ 	 * with code and data.
+ 	 */
+-	rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
++	rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON);
+ 	if (rv <= 0)
+ 		goto out_free_page;
+ 
+@@ -282,7 +282,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
+ 			int nr_read;
+ 
+ 			_count = min3(count, len, PAGE_SIZE);
+-			nr_read = access_remote_vm(mm, p, page, _count, 0);
++			nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
+ 			if (nr_read < 0)
+ 				rv = nr_read;
+ 			if (nr_read <= 0)
+@@ -328,7 +328,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
+ 				bool final;
+ 
+ 				_count = min3(count, len, PAGE_SIZE);
+-				nr_read = access_remote_vm(mm, p, page, _count, 0);
++				nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
+ 				if (nr_read < 0)
+ 					rv = nr_read;
+ 				if (nr_read <= 0)
+@@ -946,7 +946,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
+ 		max_len = min_t(size_t, PAGE_SIZE, count);
+ 		this_len = min(max_len, this_len);
+ 
+-		retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
++		retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON);
+ 
+ 		if (retval <= 0) {
+ 			ret = retval;
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 9d3a03364e6e..1352b1b990a7 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -1269,25 +1269,19 @@ enum {
+ };
+ 
+ static inline const struct cpumask *
+-mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
++mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
+ {
+-	const struct cpumask *mask;
+ 	struct irq_desc *desc;
+ 	unsigned int irq;
+ 	int eqn;
+ 	int err;
+ 
+-	err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq);
++	err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
+ 	if (err)
+ 		return NULL;
+ 
+ 	desc = irq_to_desc(irq);
+-#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
+-	mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
+-#else
+-	mask = desc->irq_common_data.affinity;
+-#endif
+-	return mask;
++	return desc->affinity_hint;
+ }
+ 
+ #endif /* MLX5_DRIVER_H */
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 95a2d748e978..a4e9bdbec490 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2441,6 +2441,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
+ #define FOLL_MLOCK	0x1000	/* lock present pages */
+ #define FOLL_REMOTE	0x2000	/* we are working on non-current tsk/mm */
+ #define FOLL_COW	0x4000	/* internal GUP flag */
++#define FOLL_ANON	0x8000	/* don't do file mappings */
+ 
+ static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
+ {
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index f801fc940b29..b52235158836 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -198,6 +198,7 @@ struct bonding {
+ 	struct   slave __rcu *primary_slave;
+ 	struct   bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
+ 	bool     force_primary;
++	u32      nest_level;
+ 	s32      slave_cnt; /* never change this value outside the attach/detach wrappers */
+ 	int     (*recv_probe)(const struct sk_buff *, struct bonding *,
+ 			      struct slave *);
+diff --git a/include/net/tls.h b/include/net/tls.h
+index 4913430ab807..f5c928a76994 100644
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -102,6 +102,7 @@ struct tls_context {
+ 	struct scatterlist *partially_sent_record;
+ 	u16 partially_sent_offset;
+ 	unsigned long flags;
++	bool in_tcp_sendpages;
+ 
+ 	u16 pending_open_record_frags;
+ 	int (*push_pending_record)(struct sock *sk, int flags);
+diff --git a/mm/gup.c b/mm/gup.c
+index 8f3a06408e28..f5f83c2e6c83 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -544,6 +544,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
+ 	if (vm_flags & (VM_IO | VM_PFNMAP))
+ 		return -EFAULT;
+ 
++	if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
++		return -EFAULT;
++
+ 	if (write) {
+ 		if (!(vm_flags & VM_WRITE)) {
+ 			if (!(gup_flags & FOLL_FORCE))
+diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
+index 9ba4ed65c52b..4be5335407c5 100644
+--- a/net/bridge/br_if.c
++++ b/net/bridge/br_if.c
+@@ -509,8 +509,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
+ 		return -ELOOP;
+ 	}
+ 
+-	/* Device is already being bridged */
+-	if (br_port_exists(dev))
++	/* Device has master upper dev */
++	if (netdev_master_upper_dev_get(dev))
+ 		return -EBUSY;
+ 
+ 	/* No bridging devices that dislike that (e.g. wireless) */
+diff --git a/net/compat.c b/net/compat.c
+index 22381719718c..32ed993588d6 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -377,7 +377,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
+ 	    optname == SO_ATTACH_REUSEPORT_CBPF)
+ 		return do_set_attach_filter(sock, level, optname,
+ 					    optval, optlen);
+-	if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
++	if (!COMPAT_USE_64BIT_TIME &&
++	    (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
+ 		return do_set_sock_timeout(sock, level, optname, optval, optlen);
+ 
+ 	return sock_setsockopt(sock, level, optname, optval, optlen);
+@@ -442,7 +443,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
+ static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
+ 				char __user *optval, int __user *optlen)
+ {
+-	if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
++	if (!COMPAT_USE_64BIT_TIME &&
++	    (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
+ 		return do_get_sock_timeout(sock, level, optname, optval, optlen);
+ 	return sock_getsockopt(sock, level, optname, optval, optlen);
+ }
+diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
+index 92d016e87816..385f153fe031 100644
+--- a/net/dccp/ccids/ccid2.c
++++ b/net/dccp/ccids/ccid2.c
+@@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
+ 						  DCCPF_SEQ_WMAX));
+ }
+ 
++static void dccp_tasklet_schedule(struct sock *sk)
++{
++	struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
++
++	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
++		sock_hold(sk);
++		__tasklet_schedule(t);
++	}
++}
++
+ static void ccid2_hc_tx_rto_expire(struct timer_list *t)
+ {
+ 	struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer);
+@@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(struct timer_list *t)
+ 
+ 	/* if we were blocked before, we may now send cwnd=1 packet */
+ 	if (sender_was_blocked)
+-		tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
++		dccp_tasklet_schedule(sk);
+ 	/* restart backed-off timer */
+ 	sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
+ out:
+@@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
+ done:
+ 	/* check if incoming Acks allow pending packets to be sent */
+ 	if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
+-		tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
++		dccp_tasklet_schedule(sk);
+ 	dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
+ }
+ 
+diff --git a/net/dccp/timer.c b/net/dccp/timer.c
+index b50a8732ff43..1501a20a94ca 100644
+--- a/net/dccp/timer.c
++++ b/net/dccp/timer.c
+@@ -232,6 +232,7 @@ static void dccp_write_xmitlet(unsigned long data)
+ 	else
+ 		dccp_write_xmit(sk);
+ 	bh_unlock_sock(sk);
++	sock_put(sk);
+ }
+ 
+ static void dccp_write_xmit_timer(struct timer_list *t)
+@@ -240,7 +241,6 @@ static void dccp_write_xmit_timer(struct timer_list *t)
+ 	struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
+ 
+ 	dccp_write_xmitlet((unsigned long)sk);
+-	sock_put(sk);
+ }
+ 
+ void dccp_init_xmit_timers(struct sock *sk)
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index b8f0db54b197..16226d49263d 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -775,8 +775,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	ipc.addr = faddr = daddr;
+ 
+ 	if (ipc.opt && ipc.opt->opt.srr) {
+-		if (!daddr)
+-			return -EINVAL;
++		if (!daddr) {
++			err = -EINVAL;
++			goto out_free;
++		}
+ 		faddr = ipc.opt->opt.faddr;
+ 	}
+ 	tos = get_rttos(&ipc, inet);
+@@ -842,6 +844,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ out:
+ 	ip_rt_put(rt);
++out_free:
+ 	if (free)
+ 		kfree(ipc.opt);
+ 	if (!err) {
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 9d9b8358a898..df1c04d75f93 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -710,7 +710,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
+ 		fnhe->fnhe_gw = gw;
+ 		fnhe->fnhe_pmtu = pmtu;
+ 		fnhe->fnhe_mtu_locked = lock;
+-		fnhe->fnhe_expires = expires;
++		fnhe->fnhe_expires = max(1UL, expires);
+ 
+ 		/* Exception created; mark the cached routes for the nexthop
+ 		 * stale, so anyone caching it rechecks if this exception
+@@ -1298,6 +1298,36 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
+ 	return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
+ }
+ 
++static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
++{
++	struct fnhe_hash_bucket *hash;
++	struct fib_nh_exception *fnhe, __rcu **fnhe_p;
++	u32 hval = fnhe_hashfun(daddr);
++
++	spin_lock_bh(&fnhe_lock);
++
++	hash = rcu_dereference_protected(nh->nh_exceptions,
++					 lockdep_is_held(&fnhe_lock));
++	hash += hval;
++
++	fnhe_p = &hash->chain;
++	fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
++	while (fnhe) {
++		if (fnhe->fnhe_daddr == daddr) {
++			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
++				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
++			fnhe_flush_routes(fnhe);
++			kfree_rcu(fnhe, rcu);
++			break;
++		}
++		fnhe_p = &fnhe->fnhe_next;
++		fnhe = rcu_dereference_protected(fnhe->fnhe_next,
++						 lockdep_is_held(&fnhe_lock));
++	}
++
++	spin_unlock_bh(&fnhe_lock);
++}
++
+ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
+ {
+ 	struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
+@@ -1311,8 +1341,14 @@ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
+ 
+ 	for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
+ 	     fnhe = rcu_dereference(fnhe->fnhe_next)) {
+-		if (fnhe->fnhe_daddr == daddr)
++		if (fnhe->fnhe_daddr == daddr) {
++			if (fnhe->fnhe_expires &&
++			    time_after(jiffies, fnhe->fnhe_expires)) {
++				ip_del_fnhe(nh, daddr);
++				break;
++			}
+ 			return fnhe;
++		}
+ 	}
+ 	return NULL;
+ }
+@@ -1340,6 +1376,7 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
+ 			fnhe->fnhe_gw = 0;
+ 			fnhe->fnhe_pmtu = 0;
+ 			fnhe->fnhe_expires = 0;
++			fnhe->fnhe_mtu_locked = false;
+ 			fnhe_flush_routes(fnhe);
+ 			orig = NULL;
+ 		}
+@@ -1638,36 +1675,6 @@ static void ip_handle_martian_source(struct net_device *dev,
+ #endif
+ }
+ 
+-static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
+-{
+-	struct fnhe_hash_bucket *hash;
+-	struct fib_nh_exception *fnhe, __rcu **fnhe_p;
+-	u32 hval = fnhe_hashfun(daddr);
+-
+-	spin_lock_bh(&fnhe_lock);
+-
+-	hash = rcu_dereference_protected(nh->nh_exceptions,
+-					 lockdep_is_held(&fnhe_lock));
+-	hash += hval;
+-
+-	fnhe_p = &hash->chain;
+-	fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
+-	while (fnhe) {
+-		if (fnhe->fnhe_daddr == daddr) {
+-			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
+-				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
+-			fnhe_flush_routes(fnhe);
+-			kfree_rcu(fnhe, rcu);
+-			break;
+-		}
+-		fnhe_p = &fnhe->fnhe_next;
+-		fnhe = rcu_dereference_protected(fnhe->fnhe_next,
+-						 lockdep_is_held(&fnhe_lock));
+-	}
+-
+-	spin_unlock_bh(&fnhe_lock);
+-}
+-
+ static void set_lwt_redirect(struct rtable *rth)
+ {
+ 	if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
+@@ -1734,20 +1741,10 @@ static int __mkroute_input(struct sk_buff *skb,
+ 
+ 	fnhe = find_exception(&FIB_RES_NH(*res), daddr);
+ 	if (do_cache) {
+-		if (fnhe) {
++		if (fnhe)
+ 			rth = rcu_dereference(fnhe->fnhe_rth_input);
+-			if (rth && rth->dst.expires &&
+-			    time_after(jiffies, rth->dst.expires)) {
+-				ip_del_fnhe(&FIB_RES_NH(*res), daddr);
+-				fnhe = NULL;
+-			} else {
+-				goto rt_cache;
+-			}
+-		}
+-
+-		rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+-
+-rt_cache:
++		else
++			rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+ 		if (rt_cache_valid(rth)) {
+ 			skb_dst_set_noref(skb, &rth->dst);
+ 			goto out;
+@@ -2224,39 +2221,31 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
+ 		 * be set to the loopback interface as well.
+ 		 */
+-		fi = NULL;
++		do_cache = false;
+ 	}
+ 
+ 	fnhe = NULL;
+ 	do_cache &= fi != NULL;
+-	if (do_cache) {
++	if (fi) {
+ 		struct rtable __rcu **prth;
+ 		struct fib_nh *nh = &FIB_RES_NH(*res);
+ 
+ 		fnhe = find_exception(nh, fl4->daddr);
++		if (!do_cache)
++			goto add;
+ 		if (fnhe) {
+ 			prth = &fnhe->fnhe_rth_output;
+-			rth = rcu_dereference(*prth);
+-			if (rth && rth->dst.expires &&
+-			    time_after(jiffies, rth->dst.expires)) {
+-				ip_del_fnhe(nh, fl4->daddr);
+-				fnhe = NULL;
+-			} else {
+-				goto rt_cache;
++		} else {
++			if (unlikely(fl4->flowi4_flags &
++				     FLOWI_FLAG_KNOWN_NH &&
++				     !(nh->nh_gw &&
++				       nh->nh_scope == RT_SCOPE_LINK))) {
++				do_cache = false;
++				goto add;
+ 			}
++			prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
+ 		}
+-
+-		if (unlikely(fl4->flowi4_flags &
+-			     FLOWI_FLAG_KNOWN_NH &&
+-			     !(nh->nh_gw &&
+-			       nh->nh_scope == RT_SCOPE_LINK))) {
+-			do_cache = false;
+-			goto add;
+-		}
+-		prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
+ 		rth = rcu_dereference(*prth);
+-
+-rt_cache:
+ 		if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
+ 			return rth;
+ 	}
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index c92fd253fc46..80e39d9a0423 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -688,7 +688,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
+ {
+ 	return skb->len < size_goal &&
+ 	       sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
+-	       skb != tcp_write_queue_head(sk) &&
++	       !tcp_rtx_queue_empty(sk) &&
+ 	       refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
+ }
+ 
+@@ -1210,7 +1210,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
+ 			uarg->zerocopy = 0;
+ 	}
+ 
+-	if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
++	if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
++	    !tp->repair) {
+ 		err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
+ 		if (err == -EINPROGRESS && copied_syn > 0)
+ 			goto out;
+diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
+index a471f696e13c..29478454b527 100644
+--- a/net/ipv4/tcp_bbr.c
++++ b/net/ipv4/tcp_bbr.c
+@@ -803,7 +803,9 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
+ 			}
+ 		}
+ 	}
+-	bbr->idle_restart = 0;
++	/* Restart after idle ends only once we process a new S/ACK for data */
++	if (rs->delivered > 0)
++		bbr->idle_restart = 0;
+ }
+ 
+ static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index e5ef7c38c934..b6ba51536b37 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -407,9 +407,9 @@ static int compute_score(struct sock *sk, struct net *net,
+ 		bool dev_match = (sk->sk_bound_dev_if == dif ||
+ 				  sk->sk_bound_dev_if == sdif);
+ 
+-		if (exact_dif && !dev_match)
++		if (!dev_match)
+ 			return -1;
+-		if (sk->sk_bound_dev_if && dev_match)
++		if (sk->sk_bound_dev_if)
+ 			score += 4;
+ 	}
+ 
+@@ -958,8 +958,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
+ 
+ 	if (ipc.opt && ipc.opt->opt.srr) {
+-		if (!daddr)
+-			return -EINVAL;
++		if (!daddr) {
++			err = -EINVAL;
++			goto out_free;
++		}
+ 		faddr = ipc.opt->opt.faddr;
+ 		connected = 0;
+ 	}
+@@ -1080,6 +1082,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ out:
+ 	ip_rt_put(rt);
++out_free:
+ 	if (free)
+ 		kfree(ipc.opt);
+ 	if (!err)
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 74a2e37412b2..1aee1a537cb1 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1822,11 +1822,16 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb,
+ 	const struct ipv6hdr *inner_iph;
+ 	const struct icmp6hdr *icmph;
+ 	struct ipv6hdr _inner_iph;
++	struct icmp6hdr _icmph;
+ 
+ 	if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
+ 		goto out;
+ 
+-	icmph = icmp6_hdr(skb);
++	icmph = skb_header_pointer(skb, skb_transport_offset(skb),
++				   sizeof(_icmph), &_icmph);
++	if (!icmph)
++		goto out;
++
+ 	if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
+ 	    icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
+ 	    icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 52e3ea0e6f50..68d589f8d2b2 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -148,9 +148,9 @@ static int compute_score(struct sock *sk, struct net *net,
+ 		bool dev_match = (sk->sk_bound_dev_if == dif ||
+ 				  sk->sk_bound_dev_if == sdif);
+ 
+-		if (exact_dif && !dev_match)
++		if (!dev_match)
+ 			return -1;
+-		if (sk->sk_bound_dev_if && dev_match)
++		if (sk->sk_bound_dev_if)
+ 			score++;
+ 	}
+ 
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index cf41d9b4a0b8..b49f5afab405 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -930,6 +930,9 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 	if (size > llc->dev->mtu)
+ 		size = llc->dev->mtu;
+ 	copied = size - hdrlen;
++	rc = -EINVAL;
++	if (copied < 0)
++		goto release;
+ 	release_sock(sk);
+ 	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
+ 	lock_sock(sk);
+diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
+index d7da99a0b0b8..9696ef96b719 100644
+--- a/net/nsh/nsh.c
++++ b/net/nsh/nsh.c
+@@ -57,6 +57,8 @@ int nsh_pop(struct sk_buff *skb)
+ 		return -ENOMEM;
+ 	nh = (struct nshhdr *)(skb->data);
+ 	length = nsh_hdr_len(nh);
++	if (length < NSH_BASE_HDR_LEN)
++		return -EINVAL;
+ 	inner_proto = tun_p_to_eth_p(nh->np);
+ 	if (!pskb_may_pull(skb, length))
+ 		return -ENOMEM;
+@@ -90,6 +92,8 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
+ 	if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
+ 		goto out;
+ 	nsh_len = nsh_hdr_len(nsh_hdr(skb));
++	if (nsh_len < NSH_BASE_HDR_LEN)
++		goto out;
+ 	if (unlikely(!pskb_may_pull(skb, nsh_len)))
+ 		goto out;
+ 
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index 7322aa1e382e..492ab0c36f7c 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -1712,13 +1712,10 @@ static void nlattr_set(struct nlattr *attr, u8 val,
+ 
+ 	/* The nlattr stream should already have been validated */
+ 	nla_for_each_nested(nla, attr, rem) {
+-		if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
+-			if (tbl[nla_type(nla)].next)
+-				tbl = tbl[nla_type(nla)].next;
+-			nlattr_set(nla, val, tbl);
+-		} else {
++		if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
++			nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
++		else
+ 			memset(nla_data(nla), val, nla_len(nla));
+-		}
+ 
+ 		if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
+ 			*(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
+diff --git a/net/rds/recv.c b/net/rds/recv.c
+index b25bcfe411ca..555f07ccf0dc 100644
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -558,6 +558,7 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
+ 		struct rds_cmsg_rx_trace t;
+ 		int i, j;
+ 
++		memset(&t, 0, sizeof(t));
+ 		inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
+ 		t.rx_traces =  rs->rs_rx_traces;
+ 		for (i = 0; i < rs->rs_rx_traces; i++) {
+diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
+index 7b0700f52b50..d87b611046bb 100644
+--- a/net/sched/act_skbmod.c
++++ b/net/sched/act_skbmod.c
+@@ -131,8 +131,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
+ 	if (exists && bind)
+ 		return 0;
+ 
+-	if (!lflags)
++	if (!lflags) {
++		if (exists)
++			tcf_idr_release(*a, bind);
+ 		return -EINVAL;
++	}
+ 
+ 	if (!exists) {
+ 		ret = tcf_idr_create(tn, parm->index, est, a,
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index 247b7cc20c13..c2c732aad87c 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -152,8 +152,8 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
+ 			NL_SET_ERR_MSG(extack, "TC classifier not found");
+ 			err = -ENOENT;
+ 		}
+-		goto errout;
+ #endif
++		goto errout;
+ 	}
+ 	tp->classify = tp->ops->classify;
+ 	tp->protocol = protocol;
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index a366e4c9413a..4808713c73b9 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
+ 	return f->next == &detached;
+ }
+ 
++static bool fq_flow_is_throttled(const struct fq_flow *f)
++{
++	return f->next == &throttled;
++}
++
++static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
++{
++	if (head->first)
++		head->last->next = flow;
++	else
++		head->first = flow;
++	head->last = flow;
++	flow->next = NULL;
++}
++
++static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
++{
++	rb_erase(&f->rate_node, &q->delayed);
++	q->throttled_flows--;
++	fq_flow_add_tail(&q->old_flows, f);
++}
++
+ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
+ {
+ 	struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
+@@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
+ 
+ static struct kmem_cache *fq_flow_cachep __read_mostly;
+ 
+-static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
+-{
+-	if (head->first)
+-		head->last->next = flow;
+-	else
+-		head->first = flow;
+-	head->last = flow;
+-	flow->next = NULL;
+-}
+ 
+ /* limit number of collected flows per round */
+ #define FQ_GC_MAX 8
+@@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
+ 				     f->socket_hash != sk->sk_hash)) {
+ 				f->credit = q->initial_quantum;
+ 				f->socket_hash = sk->sk_hash;
++				if (fq_flow_is_throttled(f))
++					fq_flow_unset_throttled(q, f);
+ 				f->time_next_packet = 0ULL;
+ 			}
+ 			return f;
+@@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
+ 			q->time_next_delayed_flow = f->time_next_packet;
+ 			break;
+ 		}
+-		rb_erase(p, &q->delayed);
+-		q->throttled_flows--;
+-		fq_flow_add_tail(&q->old_flows, f);
++		fq_flow_unset_throttled(q, f);
+ 	}
+ }
+ 
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index 837806dd5799..a47179da24e6 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1024,8 +1024,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
+ 	struct sctp_endpoint *ep;
+ 	struct sctp_chunk *chunk;
+ 	struct sctp_inq *inqueue;
+-	int state;
++	int first_time = 1;	/* is this the first time through the loop */
+ 	int error = 0;
++	int state;
+ 
+ 	/* The association should be held so we should be safe. */
+ 	ep = asoc->ep;
+@@ -1036,6 +1037,30 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
+ 		state = asoc->state;
+ 		subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
+ 
++		/* If the first chunk in the packet is AUTH, do special
++		 * processing specified in Section 6.3 of SCTP-AUTH spec
++		 */
++		if (first_time && subtype.chunk == SCTP_CID_AUTH) {
++			struct sctp_chunkhdr *next_hdr;
++
++			next_hdr = sctp_inq_peek(inqueue);
++			if (!next_hdr)
++				goto normal;
++
++			/* If the next chunk is COOKIE-ECHO, skip the AUTH
++			 * chunk while saving a pointer to it so we can do
++			 * Authentication later (during cookie-echo
++			 * processing).
++			 */
++			if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
++				chunk->auth_chunk = skb_clone(chunk->skb,
++							      GFP_ATOMIC);
++				chunk->auth = 1;
++				continue;
++			}
++		}
++
++normal:
+ 		/* SCTP-AUTH, Section 6.3:
+ 		 *    The receiver has a list of chunk types which it expects
+ 		 *    to be received only after an AUTH-chunk.  This list has
+@@ -1074,6 +1099,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
+ 		/* If there is an error on chunk, discard this packet. */
+ 		if (error && chunk)
+ 			chunk->pdiscard = 1;
++
++		if (first_time)
++			first_time = 0;
+ 	}
+ 	sctp_association_put(asoc);
+ }
+diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
+index 23ebc5318edc..eb93ffe2408b 100644
+--- a/net/sctp/inqueue.c
++++ b/net/sctp/inqueue.c
+@@ -217,7 +217,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
+ 	skb_pull(chunk->skb, sizeof(*ch));
+ 	chunk->subh.v = NULL; /* Subheader is no longer valid.  */
+ 
+-	if (chunk->chunk_end + sizeof(*ch) < skb_tail_pointer(chunk->skb)) {
++	if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) {
+ 		/* This is not a singleton */
+ 		chunk->singleton = 0;
+ 	} else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 07b64719d1bc..351e80c59211 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -866,6 +866,9 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
+ 	if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
+ 		return 1;
+ 
++	if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
++		return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
++
+ 	return __sctp_v6_cmp_addr(addr1, addr2);
+ }
+ 
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index eb7905ffe5f2..88573c57e106 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -153,10 +153,7 @@ static enum sctp_disposition sctp_sf_violation_chunk(
+ 					struct sctp_cmd_seq *commands);
+ 
+ static enum sctp_ierror sctp_sf_authenticate(
+-					struct net *net,
+-					const struct sctp_endpoint *ep,
+ 					const struct sctp_association *asoc,
+-					const union sctp_subtype type,
+ 					struct sctp_chunk *chunk);
+ 
+ static enum sctp_disposition __sctp_sf_do_9_1_abort(
+@@ -621,6 +618,38 @@ enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net,
+ 	return SCTP_DISPOSITION_CONSUME;
+ }
+ 
++static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk,
++				   const struct sctp_association *asoc)
++{
++	struct sctp_chunk auth;
++
++	if (!chunk->auth_chunk)
++		return true;
++
++	/* SCTP-AUTH:  auth_chunk pointer is only set when the cookie-echo
++	 * is supposed to be authenticated and we have to do delayed
++	 * authentication.  We've just recreated the association using
++	 * the information in the cookie and now it's much easier to
++	 * do the authentication.
++	 */
++
++	/* Make sure that we and the peer are AUTH capable */
++	if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
++		return false;
++
++	/* set-up our fake chunk so that we can process it */
++	auth.skb = chunk->auth_chunk;
++	auth.asoc = chunk->asoc;
++	auth.sctp_hdr = chunk->sctp_hdr;
++	auth.chunk_hdr = (struct sctp_chunkhdr *)
++				skb_push(chunk->auth_chunk,
++					 sizeof(struct sctp_chunkhdr));
++	skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
++	auth.transport = chunk->transport;
++
++	return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR;
++}
++
+ /*
+  * Respond to a normal COOKIE ECHO chunk.
+  * We are the side that is being asked for an association.
+@@ -758,37 +787,9 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
+ 	if (error)
+ 		goto nomem_init;
+ 
+-	/* SCTP-AUTH:  auth_chunk pointer is only set when the cookie-echo
+-	 * is supposed to be authenticated and we have to do delayed
+-	 * authentication.  We've just recreated the association using
+-	 * the information in the cookie and now it's much easier to
+-	 * do the authentication.
+-	 */
+-	if (chunk->auth_chunk) {
+-		struct sctp_chunk auth;
+-		enum sctp_ierror ret;
+-
+-		/* Make sure that we and the peer are AUTH capable */
+-		if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
+-			sctp_association_free(new_asoc);
+-			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+-		}
+-
+-		/* set-up our fake chunk so that we can process it */
+-		auth.skb = chunk->auth_chunk;
+-		auth.asoc = chunk->asoc;
+-		auth.sctp_hdr = chunk->sctp_hdr;
+-		auth.chunk_hdr = (struct sctp_chunkhdr *)
+-					skb_push(chunk->auth_chunk,
+-						 sizeof(struct sctp_chunkhdr));
+-		skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
+-		auth.transport = chunk->transport;
+-
+-		ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
+-		if (ret != SCTP_IERROR_NO_ERROR) {
+-			sctp_association_free(new_asoc);
+-			return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+-		}
++	if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) {
++		sctp_association_free(new_asoc);
++		return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ 	}
+ 
+ 	repl = sctp_make_cookie_ack(new_asoc, chunk);
+@@ -1758,13 +1759,15 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
+ 			       GFP_ATOMIC))
+ 		goto nomem;
+ 
++	if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
++		return SCTP_DISPOSITION_DISCARD;
++
+ 	/* Make sure no new addresses are being added during the
+ 	 * restart.  Though this is a pretty complicated attack
+ 	 * since you'd have to get inside the cookie.
+ 	 */
+-	if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
++	if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands))
+ 		return SCTP_DISPOSITION_CONSUME;
+-	}
+ 
+ 	/* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
+ 	 * the peer has restarted (Action A), it MUST NOT setup a new
+@@ -1870,6 +1873,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
+ 			       GFP_ATOMIC))
+ 		goto nomem;
+ 
++	if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
++		return SCTP_DISPOSITION_DISCARD;
++
+ 	/* Update the content of current association.  */
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+@@ -1964,6 +1970,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_d(
+ 	 * a COOKIE ACK.
+ 	 */
+ 
++	if (!sctp_auth_chunk_verify(net, chunk, asoc))
++		return SCTP_DISPOSITION_DISCARD;
++
+ 	/* Don't accidentally move back into established state. */
+ 	if (asoc->state < SCTP_STATE_ESTABLISHED) {
+ 		sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
+@@ -2003,7 +2012,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_d(
+ 		}
+ 	}
+ 
+-	repl = sctp_make_cookie_ack(new_asoc, chunk);
++	repl = sctp_make_cookie_ack(asoc, chunk);
+ 	if (!repl)
+ 		goto nomem;
+ 
+@@ -4108,10 +4117,7 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
+  * The return value is the disposition of the chunk.
+  */
+ static enum sctp_ierror sctp_sf_authenticate(
+-					struct net *net,
+-					const struct sctp_endpoint *ep,
+ 					const struct sctp_association *asoc,
+-					const union sctp_subtype type,
+ 					struct sctp_chunk *chunk)
+ {
+ 	struct sctp_authhdr *auth_hdr;
+@@ -4209,7 +4215,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net,
+ 						  commands);
+ 
+ 	auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
+-	error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
++	error = sctp_sf_authenticate(asoc, chunk);
+ 	switch (error) {
+ 	case SCTP_IERROR_AUTH_BAD_HMAC:
+ 		/* Generate the ERROR chunk and discard the rest
+diff --git a/net/sctp/stream.c b/net/sctp/stream.c
+index f799043abec9..f1f1d1b232ba 100644
+--- a/net/sctp/stream.c
++++ b/net/sctp/stream.c
+@@ -240,6 +240,8 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
+ 
+ 	new->out = NULL;
+ 	new->in  = NULL;
++	new->outcnt = 0;
++	new->incnt  = 0;
+ }
+ 
+ static int sctp_send_reconf(struct sctp_association *asoc,
+diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
+index 84207ad33e8e..8cb7d9858270 100644
+--- a/net/sctp/ulpevent.c
++++ b/net/sctp/ulpevent.c
+@@ -715,7 +715,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
+ 	return event;
+ 
+ fail_mark:
+-	sctp_chunk_put(chunk);
+ 	kfree_skb(skb);
+ fail:
+ 	return NULL;
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index afd5a935bbcb..5a983c9bea53 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -973,10 +973,6 @@ static void smc_tcp_listen_work(struct work_struct *work)
+ 	}
+ 
+ out:
+-	if (lsmc->clcsock) {
+-		sock_release(lsmc->clcsock);
+-		lsmc->clcsock = NULL;
+-	}
+ 	release_sock(lsk);
+ 	sock_put(&lsmc->sk); /* sock_hold in smc_listen */
+ }
+@@ -1165,13 +1161,15 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
+ 		/* delegate to CLC child sock */
+ 		release_sock(sk);
+ 		mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
+-		/* if non-blocking connect finished ... */
+ 		lock_sock(sk);
+-		if ((sk->sk_state == SMC_INIT) && (mask & EPOLLOUT)) {
+-			sk->sk_err = smc->clcsock->sk->sk_err;
+-			if (sk->sk_err) {
+-				mask |= EPOLLERR;
+-			} else {
++		sk->sk_err = smc->clcsock->sk->sk_err;
++		if (sk->sk_err) {
++			mask |= EPOLLERR;
++		} else {
++			/* if non-blocking connect finished ... */
++			if (sk->sk_state == SMC_INIT &&
++			    mask & EPOLLOUT &&
++			    smc->clcsock->sk->sk_state != TCP_CLOSE) {
+ 				rc = smc_connect_rdma(smc);
+ 				if (rc < 0)
+ 					mask |= EPOLLERR;
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 7dfa9fc99ec3..df4f504b1fef 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -1518,10 +1518,10 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
+ 
+ 	srcaddr->sock.family = AF_TIPC;
+ 	srcaddr->sock.addrtype = TIPC_ADDR_ID;
++	srcaddr->sock.scope = 0;
+ 	srcaddr->sock.addr.id.ref = msg_origport(hdr);
+ 	srcaddr->sock.addr.id.node = msg_orignode(hdr);
+ 	srcaddr->sock.addr.name.domain = 0;
+-	srcaddr->sock.scope = 0;
+ 	m->msg_namelen = sizeof(struct sockaddr_tipc);
+ 
+ 	if (!msg_in_group(hdr))
+@@ -1530,6 +1530,7 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
+ 	/* Group message users may also want to know sending member's id */
+ 	srcaddr->member.family = AF_TIPC;
+ 	srcaddr->member.addrtype = TIPC_ADDR_NAME;
++	srcaddr->member.scope = 0;
+ 	srcaddr->member.addr.name.name.type = msg_nametype(hdr);
+ 	srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
+ 	srcaddr->member.addr.name.domain = 0;
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index d824d548447e..b51180c1479a 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -107,6 +107,7 @@ int tls_push_sg(struct sock *sk,
+ 	size = sg->length - offset;
+ 	offset += sg->offset;
+ 
++	ctx->in_tcp_sendpages = true;
+ 	while (1) {
+ 		if (sg_is_last(sg))
+ 			sendpage_flags = flags;
+@@ -127,6 +128,7 @@ int tls_push_sg(struct sock *sk,
+ 			offset -= sg->offset;
+ 			ctx->partially_sent_offset = offset;
+ 			ctx->partially_sent_record = (void *)sg;
++			ctx->in_tcp_sendpages = false;
+ 			return ret;
+ 		}
+ 
+@@ -141,6 +143,8 @@ int tls_push_sg(struct sock *sk,
+ 	}
+ 
+ 	clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
++	ctx->in_tcp_sendpages = false;
++	ctx->sk_write_space(sk);
+ 
+ 	return 0;
+ }
+@@ -210,6 +214,10 @@ static void tls_write_space(struct sock *sk)
+ {
+ 	struct tls_context *ctx = tls_get_ctx(sk);
+ 
++	/* We are already sending pages, ignore notification */
++	if (ctx->in_tcp_sendpages)
++		return;
++
+ 	if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
+ 		gfp_t sk_allocation = sk->sk_allocation;
+ 		int rc;


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-05-16 10:25 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-05-16 10:25 UTC (permalink / raw
  To: gentoo-commits

commit:     34ff0d5a463b21dc147519af0daf60f6959fb22c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May 16 10:25:25 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May 16 10:25:25 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=34ff0d5a

Linux patch 4.16.9

 0000_README             |    4 +
 1008_linux-4.16.9.patch | 2525 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2529 insertions(+)

diff --git a/0000_README b/0000_README
index b4a9e43..73b7e2e 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  1007_linux-4.16.8.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.8
 
+Patch:  1008_linux-4.16.9.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.9
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1008_linux-4.16.9.patch b/1008_linux-4.16.9.patch
new file mode 100644
index 0000000..0d580f2
--- /dev/null
+++ b/1008_linux-4.16.9.patch
@@ -0,0 +1,2525 @@
+diff --git a/Makefile b/Makefile
+index 5da6ffd69209..ea3cb221d7c5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
+index e08c0c193767..f8939e82249b 100644
+--- a/arch/arm/boot/dts/imx35.dtsi
++++ b/arch/arm/boot/dts/imx35.dtsi
+@@ -303,7 +303,7 @@
+ 			};
+ 
+ 			can1: can@53fe4000 {
+-				compatible = "fsl,imx35-flexcan";
++				compatible = "fsl,imx35-flexcan", "fsl,imx25-flexcan";
+ 				reg = <0x53fe4000 0x1000>;
+ 				clocks = <&clks 33>, <&clks 33>;
+ 				clock-names = "ipg", "per";
+@@ -312,7 +312,7 @@
+ 			};
+ 
+ 			can2: can@53fe8000 {
+-				compatible = "fsl,imx35-flexcan";
++				compatible = "fsl,imx35-flexcan", "fsl,imx25-flexcan";
+ 				reg = <0x53fe8000 0x1000>;
+ 				clocks = <&clks 34>, <&clks 34>;
+ 				clock-names = "ipg", "per";
+diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
+index 1040251f2951..f333c1e40d6c 100644
+--- a/arch/arm/boot/dts/imx53.dtsi
++++ b/arch/arm/boot/dts/imx53.dtsi
+@@ -551,7 +551,7 @@
+ 			};
+ 
+ 			can1: can@53fc8000 {
+-				compatible = "fsl,imx53-flexcan";
++				compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan";
+ 				reg = <0x53fc8000 0x4000>;
+ 				interrupts = <82>;
+ 				clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>,
+@@ -561,7 +561,7 @@
+ 			};
+ 
+ 			can2: can@53fcc000 {
+-				compatible = "fsl,imx53-flexcan";
++				compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan";
+ 				reg = <0x53fcc000 0x4000>;
+ 				interrupts = <83>;
+ 				clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>,
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 88797c80b3e0..06086439b7bd 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -27,6 +27,7 @@
+ #include <linux/cpu.h>
+ #include <linux/bitops.h>
+ #include <linux/device.h>
++#include <linux/nospec.h>
+ 
+ #include <asm/apic.h>
+ #include <asm/stacktrace.h>
+@@ -304,17 +305,20 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
+ 
+ 	config = attr->config;
+ 
+-	cache_type = (config >>  0) & 0xff;
++	cache_type = (config >> 0) & 0xff;
+ 	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ 		return -EINVAL;
++	cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
+ 
+ 	cache_op = (config >>  8) & 0xff;
+ 	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ 		return -EINVAL;
++	cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
+ 
+ 	cache_result = (config >> 16) & 0xff;
+ 	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ 		return -EINVAL;
++	cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
+ 
+ 	val = hw_cache_event_ids[cache_type][cache_op][cache_result];
+ 
+@@ -421,6 +425,8 @@ int x86_setup_perfctr(struct perf_event *event)
+ 	if (attr->config >= x86_pmu.max_events)
+ 		return -EINVAL;
+ 
++	attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
++
+ 	/*
+ 	 * The generic map:
+ 	 */
+diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
+index 72db0664a53d..357e82dc0e2a 100644
+--- a/arch/x86/events/intel/cstate.c
++++ b/arch/x86/events/intel/cstate.c
+@@ -91,6 +91,7 @@
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
++#include <linux/nospec.h>
+ #include <asm/cpu_device_id.h>
+ #include <asm/intel-family.h>
+ #include "../perf_event.h"
+@@ -301,6 +302,7 @@ static int cstate_pmu_event_init(struct perf_event *event)
+ 	} else if (event->pmu == &cstate_pkg_pmu) {
+ 		if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
+ 			return -EINVAL;
++		cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
+ 		if (!pkg_msr[cfg].attr)
+ 			return -EINVAL;
+ 		event->hw.event_base = pkg_msr[cfg].msr;
+diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
+index 18e2628e2d8f..a8aae89dee7f 100644
+--- a/arch/x86/events/msr.c
++++ b/arch/x86/events/msr.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/perf_event.h>
++#include <linux/nospec.h>
+ #include <asm/intel-family.h>
+ 
+ enum perf_msr_id {
+@@ -158,9 +159,6 @@ static int msr_event_init(struct perf_event *event)
+ 	if (event->attr.type != event->pmu->type)
+ 		return -ENOENT;
+ 
+-	if (cfg >= PERF_MSR_EVENT_MAX)
+-		return -EINVAL;
+-
+ 	/* unsupported modes and filters */
+ 	if (event->attr.exclude_user   ||
+ 	    event->attr.exclude_kernel ||
+@@ -171,6 +169,11 @@ static int msr_event_init(struct perf_event *event)
+ 	    event->attr.sample_period) /* no sampling */
+ 		return -EINVAL;
+ 
++	if (cfg >= PERF_MSR_EVENT_MAX)
++		return -EINVAL;
++
++	cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
++
+ 	if (!msr[cfg].attr)
+ 		return -EINVAL;
+ 
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index c49766b03165..7846c0c20cfe 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -158,16 +158,16 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	void *private;
+ 	int err;
+ 
+-	/* If caller uses non-allowed flag, return error. */
+-	if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
+-		return -EINVAL;
+-
+ 	if (sock->state == SS_CONNECTED)
+ 		return -EINVAL;
+ 
+ 	if (addr_len < sizeof(*sa))
+ 		return -EINVAL;
+ 
++	/* If caller uses non-allowed flag, return error. */
++	if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
++		return -EINVAL;
++
+ 	sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
+ 	sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
+ 
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 7431ccd03316..0df21f046fc6 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4549,6 +4549,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM |
+ 						ATA_HORKAGE_NOLPM, },
+ 
++	/* Sandisk devices which are known to not handle LPM well */
++	{ "SanDisk SD7UB3Q*G1001",	NULL,	ATA_HORKAGE_NOLPM, },
++
+ 	/* devices that don't properly handle queued TRIM commands */
+ 	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
+index 1ef67db03c8e..9c9a22958717 100644
+--- a/drivers/atm/zatm.c
++++ b/drivers/atm/zatm.c
+@@ -28,6 +28,7 @@
+ #include <asm/io.h>
+ #include <linux/atomic.h>
+ #include <linux/uaccess.h>
++#include <linux/nospec.h>
+ 
+ #include "uPD98401.h"
+ #include "uPD98402.h"
+@@ -1458,6 +1459,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
+ 					return -EFAULT;
+ 				if (pool < 0 || pool > ZATM_LAST_POOL)
+ 					return -EINVAL;
++				pool = array_index_nospec(pool,
++							  ZATM_LAST_POOL + 1);
+ 				spin_lock_irqsave(&zatm_dev->lock, flags);
+ 				info = zatm_dev->pool_info[pool];
+ 				if (cmd == ZATM_GETPOOLZ) {
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 366a49c7c08f..dcb982e3a41f 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -231,6 +231,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
+@@ -263,7 +264,6 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* QCA ROME chipset */
+-	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
+ 	{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
+ 	{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
+ 	{ USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
+@@ -392,6 +392,13 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"),
+ 		},
+ 	},
++	{
++		/* Dell XPS 9360 (QCA ROME device 0cf3:e300) */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
++		},
++	},
+ 	{}
+ };
+ 
+@@ -2839,6 +2846,12 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
+ }
+ #endif
+ 
++static void btusb_check_needs_reset_resume(struct usb_interface *intf)
++{
++	if (dmi_check_system(btusb_needs_reset_resume_table))
++		interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
++}
++
+ static int btusb_probe(struct usb_interface *intf,
+ 		       const struct usb_device_id *id)
+ {
+@@ -2961,9 +2974,6 @@ static int btusb_probe(struct usb_interface *intf,
+ 	hdev->send   = btusb_send_frame;
+ 	hdev->notify = btusb_notify;
+ 
+-	if (dmi_check_system(btusb_needs_reset_resume_table))
+-		interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
+-
+ #ifdef CONFIG_PM
+ 	err = btusb_config_oob_wake(hdev);
+ 	if (err)
+@@ -3050,6 +3060,7 @@ static int btusb_probe(struct usb_interface *intf,
+ 	if (id->driver_info & BTUSB_QCA_ROME) {
+ 		data->setup_on_usb = btusb_setup_qca;
+ 		hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
++		btusb_check_needs_reset_resume(intf);
+ 	}
+ 
+ #ifdef CONFIG_BT_HCIBTUSB_RTL
+diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
+index d9b43bfc2532..b799a21463d9 100644
+--- a/drivers/clk/ti/clock.h
++++ b/drivers/clk/ti/clock.h
+@@ -74,6 +74,11 @@ enum {
+ #define CLKF_CORE			(1 << 9)
+ #define CLKF_J_TYPE			(1 << 10)
+ 
++/* CLKCTRL flags */
++#define CLKF_SW_SUP			BIT(5)
++#define CLKF_HW_SUP			BIT(6)
++#define CLKF_NO_IDLEST			BIT(7)
++
+ #define CLK(dev, con, ck)		\
+ 	{				\
+ 		.lk = {			\
+@@ -183,10 +188,6 @@ extern const struct omap_clkctrl_data am438x_clkctrl_data[];
+ extern const struct omap_clkctrl_data dm814_clkctrl_data[];
+ extern const struct omap_clkctrl_data dm816_clkctrl_data[];
+ 
+-#define CLKF_SW_SUP	BIT(0)
+-#define CLKF_HW_SUP	BIT(1)
+-#define CLKF_NO_IDLEST	BIT(2)
+-
+ typedef void (*ti_of_clk_init_cb_t)(void *, struct device_node *);
+ 
+ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
+diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
+index 77e485557498..6f693b7d5220 100644
+--- a/drivers/gpio/gpio-aspeed.c
++++ b/drivers/gpio/gpio-aspeed.c
+@@ -384,7 +384,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
+ 	if (set)
+ 		reg |= bit;
+ 	else
+-		reg &= bit;
++		reg &= ~bit;
+ 	iowrite32(reg, addr);
+ 
+ 	spin_unlock_irqrestore(&gpio->lock, flags);
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index d66de67ef307..2d18b598c011 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -446,7 +446,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+ 	struct gpiohandle_request handlereq;
+ 	struct linehandle_state *lh;
+ 	struct file *file;
+-	int fd, i, ret;
++	int fd, i, count = 0, ret;
+ 	u32 lflags;
+ 
+ 	if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
+@@ -507,6 +507,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+ 		if (ret)
+ 			goto out_free_descs;
+ 		lh->descs[i] = desc;
++		count = i;
+ 
+ 		if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
+ 			set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+@@ -577,7 +578,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+ out_put_unused_fd:
+ 	put_unused_fd(fd);
+ out_free_descs:
+-	for (; i >= 0; i--)
++	for (i = 0; i < count; i++)
+ 		gpiod_free(lh->descs[i]);
+ 	kfree(lh->label);
+ out_free_lh:
+@@ -851,7 +852,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
+ 	desc = &gdev->descs[offset];
+ 	ret = gpiod_request(desc, le->label);
+ 	if (ret)
+-		goto out_free_desc;
++		goto out_free_label;
+ 	le->desc = desc;
+ 	le->eflags = eflags;
+ 
+diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
+index b76d49218cf1..40549f6824ff 100644
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -155,6 +155,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
+ 						       state->connectors[i].state);
+ 		state->connectors[i].ptr = NULL;
+ 		state->connectors[i].state = NULL;
++		state->connectors[i].old_state = NULL;
++		state->connectors[i].new_state = NULL;
+ 		drm_connector_put(connector);
+ 	}
+ 
+@@ -169,6 +171,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
+ 
+ 		state->crtcs[i].ptr = NULL;
+ 		state->crtcs[i].state = NULL;
++		state->crtcs[i].old_state = NULL;
++		state->crtcs[i].new_state = NULL;
+ 	}
+ 
+ 	for (i = 0; i < config->num_total_plane; i++) {
+@@ -181,6 +185,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
+ 						   state->planes[i].state);
+ 		state->planes[i].ptr = NULL;
+ 		state->planes[i].state = NULL;
++		state->planes[i].old_state = NULL;
++		state->planes[i].new_state = NULL;
+ 	}
+ 
+ 	for (i = 0; i < state->num_private_objs; i++) {
+@@ -190,6 +196,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
+ 						 state->private_objs[i].state);
+ 		state->private_objs[i].ptr = NULL;
+ 		state->private_objs[i].state = NULL;
++		state->private_objs[i].old_state = NULL;
++		state->private_objs[i].new_state = NULL;
+ 	}
+ 	state->num_private_objs = 0;
+ 
+diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
+index fd58647fbff3..6c76c7534c49 100644
+--- a/drivers/gpu/drm/i915/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/intel_cdclk.c
+@@ -2108,9 +2108,44 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
+ 	return 0;
+ }
+ 
++static int skl_dpll0_vco(struct intel_atomic_state *intel_state)
++{
++	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
++	struct intel_crtc *crtc;
++	struct intel_crtc_state *crtc_state;
++	int vco, i;
++
++	vco = intel_state->cdclk.logical.vco;
++	if (!vco)
++		vco = dev_priv->skl_preferred_vco_freq;
++
++	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
++		if (!crtc_state->base.enable)
++			continue;
++
++		if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
++			continue;
++
++		/*
++		 * DPLL0 VCO may need to be adjusted to get the correct
++		 * clock for eDP. This will affect cdclk as well.
++		 */
++		switch (crtc_state->port_clock / 2) {
++		case 108000:
++		case 216000:
++			vco = 8640000;
++			break;
++		default:
++			vco = 8100000;
++			break;
++		}
++	}
++
++	return vco;
++}
++
+ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(state->dev);
+ 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ 	int min_cdclk, cdclk, vco;
+ 
+@@ -2118,9 +2153,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
+ 	if (min_cdclk < 0)
+ 		return min_cdclk;
+ 
+-	vco = intel_state->cdclk.logical.vco;
+-	if (!vco)
+-		vco = dev_priv->skl_preferred_vco_freq;
++	vco = skl_dpll0_vco(intel_state);
+ 
+ 	/*
+ 	 * FIXME should also account for plane ratio
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index a29868cd30c7..79521da5d11d 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -1794,26 +1794,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
+ 				reduce_m_n);
+ 	}
+ 
+-	/*
+-	 * DPLL0 VCO may need to be adjusted to get the correct
+-	 * clock for eDP. This will affect cdclk as well.
+-	 */
+-	if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
+-		int vco;
+-
+-		switch (pipe_config->port_clock / 2) {
+-		case 108000:
+-		case 216000:
+-			vco = 8640000;
+-			break;
+-		default:
+-			vco = 8100000;
+-			break;
+-		}
+-
+-		to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
+-	}
+-
+ 	if (!HAS_DDI(dev_priv))
+ 		intel_dp_set_clock(encoder, pipe_config);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index ef80499113ee..7ed6f7b69556 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -319,7 +319,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
+ 
+ 	I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
+ 	POSTING_READ(lvds_encoder->reg);
+-	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
++
++	if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
+ 		DRM_ERROR("timed out waiting for panel to power on\n");
+ 
+ 	intel_panel_enable_backlight(pipe_config, conn_state);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index 80fa68d54bd3..2e8c95ce1a5a 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -214,7 +214,6 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
+ 	INIT_LIST_HEAD(&nvbo->entry);
+ 	INIT_LIST_HEAD(&nvbo->vma_list);
+ 	nvbo->bo.bdev = &drm->ttm.bdev;
+-	nvbo->cli = cli;
+ 
+ 	/* This is confusing, and doesn't actually mean we want an uncached
+ 	 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
+index be8e00b49cde..73c48440d4d7 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
+@@ -26,8 +26,6 @@ struct nouveau_bo {
+ 
+ 	struct list_head vma_list;
+ 
+-	struct nouveau_cli *cli;
+-
+ 	unsigned contig:1;
+ 	unsigned page:5;
+ 	unsigned kind:8;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
+index dff51a0ee028..8c093ca4222e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
+@@ -63,7 +63,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+ 			 struct ttm_mem_reg *reg)
+ {
+ 	struct nouveau_bo *nvbo = nouveau_bo(bo);
+-	struct nouveau_drm *drm = nvbo->cli->drm;
++	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ 	struct nouveau_mem *mem;
+ 	int ret;
+ 
+@@ -103,7 +103,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
+ 			 struct ttm_mem_reg *reg)
+ {
+ 	struct nouveau_bo *nvbo = nouveau_bo(bo);
+-	struct nouveau_drm *drm = nvbo->cli->drm;
++	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ 	struct nouveau_mem *mem;
+ 	int ret;
+ 
+@@ -131,7 +131,7 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
+ 		      struct ttm_mem_reg *reg)
+ {
+ 	struct nouveau_bo *nvbo = nouveau_bo(bo);
+-	struct nouveau_drm *drm = nvbo->cli->drm;
++	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ 	struct nouveau_mem *mem;
+ 	int ret;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
+index caddce88d2d8..0451d80672a5 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -3272,10 +3272,11 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
+ 
+ 	drm_connector_unregister(&mstc->connector);
+ 
+-	drm_modeset_lock_all(drm->dev);
+ 	drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
++
++	drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
+ 	mstc->port = NULL;
+-	drm_modeset_unlock_all(drm->dev);
++	drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
+ 
+ 	drm_connector_unreference(&mstc->connector);
+ }
+@@ -3285,9 +3286,7 @@ nv50_mstm_register_connector(struct drm_connector *connector)
+ {
+ 	struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ 
+-	drm_modeset_lock_all(drm->dev);
+ 	drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
+-	drm_modeset_unlock_all(drm->dev);
+ 
+ 	drm_connector_register(connector);
+ }
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index 2b12c55a3bff..28311caf1e47 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -904,7 +904,8 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
+ 			while (npages >= HPAGE_PMD_NR) {
+ 				gfp_t huge_flags = gfp_flags;
+ 
+-				huge_flags |= GFP_TRANSHUGE;
++				huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
++					__GFP_KSWAPD_RECLAIM;
+ 				huge_flags &= ~__GFP_MOVABLE;
+ 				huge_flags &= ~__GFP_COMP;
+ 				p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
+@@ -1021,11 +1022,15 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+ 				  GFP_USER | GFP_DMA32, "uc dma", 0);
+ 
+ 	ttm_page_pool_init_locked(&_manager->wc_pool_huge,
+-				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP),
++				  (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
++				   __GFP_KSWAPD_RECLAIM) &
++				  ~(__GFP_MOVABLE | __GFP_COMP),
+ 				  "wc huge", order);
+ 
+ 	ttm_page_pool_init_locked(&_manager->uc_pool_huge,
+-				  GFP_TRANSHUGE	& ~(__GFP_MOVABLE | __GFP_COMP)
++				  (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
++				   __GFP_KSWAPD_RECLAIM) &
++				  ~(__GFP_MOVABLE | __GFP_COMP)
+ 				  , "uc huge", order);
+ 
+ 	_manager->options.max_size = max_pages;
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+index a88051552ace..323cadaeb7d1 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+@@ -915,7 +915,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
+ 		gfp_flags |= __GFP_ZERO;
+ 
+ 	if (huge) {
+-		gfp_flags |= GFP_TRANSHUGE;
++		gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
++			__GFP_KSWAPD_RECLAIM;
+ 		gfp_flags &= ~__GFP_MOVABLE;
+ 		gfp_flags &= ~__GFP_COMP;
+ 	}
+diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
+index 515f97997624..92bd12d3fe2b 100644
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -557,7 +557,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
+ 	 * the scl fields here.
+ 	 */
+ 	if (num_planes == 1) {
+-		scl0 = vc4_get_scl_field(state, 1);
++		scl0 = vc4_get_scl_field(state, 0);
+ 		scl1 = scl0;
+ 	} else {
+ 		scl0 = vc4_get_scl_field(state, 1);
+diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
+index 036a03f0d0a6..1667b6e7674f 100644
+--- a/drivers/i2c/i2c-dev.c
++++ b/drivers/i2c/i2c-dev.c
+@@ -280,7 +280,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
+ 		 */
+ 		if (msgs[i].flags & I2C_M_RECV_LEN) {
+ 			if (!(msgs[i].flags & I2C_M_RD) ||
+-			    msgs[i].buf[0] < 1 ||
++			    msgs[i].len < 1 || msgs[i].buf[0] < 1 ||
+ 			    msgs[i].len < msgs[i].buf[0] +
+ 					     I2C_SMBUS_BLOCK_MAX) {
+ 				res = -EINVAL;
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 46d7c8749222..945f9501b642 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -2440,7 +2440,7 @@ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, str
+ 	unsigned i;
+ 	for (i = 0; i < ic->journal_sections; i++)
+ 		kvfree(sl[i]);
+-	kfree(sl);
++	kvfree(sl);
+ }
+ 
+ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
+diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
+index 795f868fe1f7..f978c06fbd7d 100644
+--- a/drivers/mtd/nand/marvell_nand.c
++++ b/drivers/mtd/nand/marvell_nand.c
+@@ -1070,7 +1070,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
+ 		return ret;
+ 
+ 	ret = marvell_nfc_wait_op(chip,
+-				  chip->data_interface.timings.sdr.tPROG_max);
++				  PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
+ 	return ret;
+ }
+ 
+@@ -1404,6 +1404,7 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
+ 	struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ 	struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ 	const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
++	u32 xtype;
+ 	int ret;
+ 	struct marvell_nfc_op nfc_op = {
+ 		.ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
+@@ -1419,7 +1420,12 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
+ 	 * last naked write.
+ 	 */
+ 	if (chunk == 0) {
+-		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) |
++		if (lt->nchunks == 1)
++			xtype = XTYPE_MONOLITHIC_RW;
++		else
++			xtype = XTYPE_WRITE_DISPATCH;
++
++		nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) |
+ 				  NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ 				  NDCB0_CMD1(NAND_CMD_SEQIN);
+ 		nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
+@@ -1490,7 +1496,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
+ 	}
+ 
+ 	ret = marvell_nfc_wait_op(chip,
+-				  chip->data_interface.timings.sdr.tPROG_max);
++				  PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
+ 
+ 	marvell_nfc_disable_hw_ecc(chip);
+ 
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index 634c51e6b8ae..d53a45bf2a72 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -200,6 +200,7 @@
+ #define FLEXCAN_QUIRK_DISABLE_MECR	BIT(4) /* Disable Memory error detection */
+ #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP	BIT(5) /* Use timestamp based offloading */
+ #define FLEXCAN_QUIRK_BROKEN_PERR_STATE	BIT(6) /* No interrupt for error passive */
++#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN	BIT(7) /* default to BE register access */
+ 
+ /* Structure of the message buffer */
+ struct flexcan_mb {
+@@ -287,6 +288,12 @@ struct flexcan_priv {
+ };
+ 
+ static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
++	.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
++		FLEXCAN_QUIRK_BROKEN_PERR_STATE |
++		FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN,
++};
++
++static const struct flexcan_devtype_data fsl_imx25_devtype_data = {
+ 	.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+ 		FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ };
+@@ -1251,9 +1258,9 @@ static void unregister_flexcandev(struct net_device *dev)
+ static const struct of_device_id flexcan_of_match[] = {
+ 	{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
+ 	{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+-	{ .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, },
+-	{ .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, },
+-	{ .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, },
++	{ .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
++	{ .compatible = "fsl,imx35-flexcan", .data = &fsl_imx25_devtype_data, },
++	{ .compatible = "fsl,imx25-flexcan", .data = &fsl_imx25_devtype_data, },
+ 	{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
+ 	{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
+ 	{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
+@@ -1337,18 +1344,13 @@ static int flexcan_probe(struct platform_device *pdev)
+ 
+ 	priv = netdev_priv(dev);
+ 
+-	if (of_property_read_bool(pdev->dev.of_node, "big-endian")) {
++	if (of_property_read_bool(pdev->dev.of_node, "big-endian") ||
++	    devtype_data->quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) {
+ 		priv->read = flexcan_read_be;
+ 		priv->write = flexcan_write_be;
+ 	} else {
+-		if (of_device_is_compatible(pdev->dev.of_node,
+-					    "fsl,p1010-flexcan")) {
+-			priv->read = flexcan_read_be;
+-			priv->write = flexcan_write_be;
+-		} else {
+-			priv->read = flexcan_read_le;
+-			priv->write = flexcan_write_le;
+-		}
++		priv->read = flexcan_read_le;
++		priv->write = flexcan_write_le;
+ 	}
+ 
+ 	priv->can.clock.freq = clock_freq;
+diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
+index 5590c559a8ca..53e320c92a8b 100644
+--- a/drivers/net/can/spi/hi311x.c
++++ b/drivers/net/can/spi/hi311x.c
+@@ -91,6 +91,7 @@
+ #define HI3110_STAT_BUSOFF BIT(2)
+ #define HI3110_STAT_ERRP BIT(3)
+ #define HI3110_STAT_ERRW BIT(4)
++#define HI3110_STAT_TXMTY BIT(7)
+ 
+ #define HI3110_BTR0_SJW_SHIFT 6
+ #define HI3110_BTR0_BRP_SHIFT 0
+@@ -427,8 +428,10 @@ static int hi3110_get_berr_counter(const struct net_device *net,
+ 	struct hi3110_priv *priv = netdev_priv(net);
+ 	struct spi_device *spi = priv->spi;
+ 
++	mutex_lock(&priv->hi3110_lock);
+ 	bec->txerr = hi3110_read(spi, HI3110_READ_TEC);
+ 	bec->rxerr = hi3110_read(spi, HI3110_READ_REC);
++	mutex_unlock(&priv->hi3110_lock);
+ 
+ 	return 0;
+ }
+@@ -735,10 +738,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
+ 			}
+ 		}
+ 
+-		if (intf == 0)
+-			break;
+-
+-		if (intf & HI3110_INT_TXCPLT) {
++		if (priv->tx_len && statf & HI3110_STAT_TXMTY) {
+ 			net->stats.tx_packets++;
+ 			net->stats.tx_bytes += priv->tx_len - 1;
+ 			can_led_event(net, CAN_LED_EVENT_TX);
+@@ -748,6 +748,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
+ 			}
+ 			netif_wake_queue(net);
+ 		}
++
++		if (intf == 0)
++			break;
+ 	}
+ 	mutex_unlock(&priv->hi3110_lock);
+ 	return IRQ_HANDLED;
+diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
+index 63587b8e6825..daed57d3d209 100644
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -1179,7 +1179,7 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
+ 
+ 	skb = alloc_can_skb(priv->netdev, &cf);
+ 	if (!skb) {
+-		stats->tx_dropped++;
++		stats->rx_dropped++;
+ 		return;
+ 	}
+ 
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 0b9e60861e53..f81773570dfd 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -122,7 +122,8 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+ 	ret = nvme_reset_ctrl(ctrl);
+ 	if (!ret) {
+ 		flush_work(&ctrl->reset_work);
+-		if (ctrl->state != NVME_CTRL_LIVE)
++		if (ctrl->state != NVME_CTRL_LIVE &&
++		    ctrl->state != NVME_CTRL_ADMIN_ONLY)
+ 			ret = -ENETRESET;
+ 	}
+ 
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index d733b14ede9d..013380641ddf 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -83,6 +83,11 @@ enum nvme_quirks {
+ 	 * Supports the LighNVM command set if indicated in vs[1].
+ 	 */
+ 	NVME_QUIRK_LIGHTNVM			= (1 << 6),
++
++	/*
++	 * Set MEDIUM priority on SQ creation
++	 */
++	NVME_QUIRK_MEDIUM_PRIO_SQ		= (1 << 7),
+ };
+ 
+ /*
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index b6f43b738f03..f6648610d153 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1091,9 +1091,18 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
+ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
+ 						struct nvme_queue *nvmeq)
+ {
++	struct nvme_ctrl *ctrl = &dev->ctrl;
+ 	struct nvme_command c;
+ 	int flags = NVME_QUEUE_PHYS_CONTIG;
+ 
++	/*
++	 * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
++	 * set. Since URGENT priority is zeroes, it makes all queues
++	 * URGENT.
++	 */
++	if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
++		flags |= NVME_SQ_PRIO_MEDIUM;
++
+ 	/*
+ 	 * Note: we (ab)use the fact that the prp fields survive if no data
+ 	 * is attached to the request.
+@@ -2684,7 +2693,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ 		.driver_data = NVME_QUIRK_STRIPE_SIZE |
+ 				NVME_QUIRK_DEALLOCATE_ZEROES, },
+ 	{ PCI_VDEVICE(INTEL, 0xf1a5),	/* Intel 600P/P3100 */
+-		.driver_data = NVME_QUIRK_NO_DEEPEST_PS },
++		.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
++				NVME_QUIRK_MEDIUM_PRIO_SQ },
+ 	{ PCI_VDEVICE(INTEL, 0x5845),	/* Qemu emulated controller */
+ 		.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
+ 	{ PCI_DEVICE(0x1c58, 0x0003),	/* HGST adapter */
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index f6a4dd10d9b0..4f98b26301cb 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1897,7 +1897,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
+ EXPORT_SYMBOL(pci_pme_active);
+ 
+ /**
+- * pci_enable_wake - enable PCI device as wakeup event source
++ * __pci_enable_wake - enable PCI device as wakeup event source
+  * @dev: PCI device affected
+  * @state: PCI state from which device will issue wakeup events
+  * @enable: True to enable event generation; false to disable
+@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(pci_pme_active);
+  * Error code depending on the platform is returned if both the platform and
+  * the native mechanism fail to enable the generation of wake-up events
+  */
+-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
++static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
+ {
+ 	int ret = 0;
+ 
+@@ -1956,6 +1956,23 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
+ 
+ 	return ret;
+ }
++
++/**
++ * pci_enable_wake - change wakeup settings for a PCI device
++ * @pci_dev: Target device
++ * @state: PCI state from which device will issue wakeup events
++ * @enable: Whether or not to enable event generation
++ *
++ * If @enable is set, check device_may_wakeup() for the device before calling
++ * __pci_enable_wake() for it.
++ */
++int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
++{
++	if (enable && !device_may_wakeup(&pci_dev->dev))
++		return -EINVAL;
++
++	return __pci_enable_wake(pci_dev, state, enable);
++}
+ EXPORT_SYMBOL(pci_enable_wake);
+ 
+ /**
+@@ -1968,9 +1985,9 @@ EXPORT_SYMBOL(pci_enable_wake);
+  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
+  * ordering constraints.
+  *
+- * This function only returns error code if the device is not capable of
+- * generating PME# from both D3_hot and D3_cold, and the platform is unable to
+- * enable wake-up power for it.
++ * This function only returns error code if the device is not allowed to wake
++ * up the system from sleep or it is not capable of generating PME# from both
++ * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
+  */
+ int pci_wake_from_d3(struct pci_dev *dev, bool enable)
+ {
+@@ -2101,7 +2118,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
+ 
+ 	dev->runtime_d3cold = target_state == PCI_D3cold;
+ 
+-	pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
++	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
+ 
+ 	error = pci_set_power_state(dev, target_state);
+ 
+@@ -2125,16 +2142,16 @@ bool pci_dev_run_wake(struct pci_dev *dev)
+ {
+ 	struct pci_bus *bus = dev->bus;
+ 
+-	if (device_can_wakeup(&dev->dev))
+-		return true;
+-
+ 	if (!dev->pme_support)
+ 		return false;
+ 
+ 	/* PME-capable in principle, but not from the target power state */
+-	if (!pci_pme_capable(dev, pci_target_state(dev, false)))
++	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
+ 		return false;
+ 
++	if (device_can_wakeup(&dev->dev))
++		return true;
++
+ 	while (bus->parent) {
+ 		struct pci_dev *bridge = bus->self;
+ 
+diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
+index ed805c7c5ace..ac83f721db24 100644
+--- a/drivers/thermal/samsung/exynos_tmu.c
++++ b/drivers/thermal/samsung/exynos_tmu.c
+@@ -185,6 +185,7 @@
+  * @regulator: pointer to the TMU regulator structure.
+  * @reg_conf: pointer to structure to register with core thermal.
+  * @ntrip: number of supported trip points.
++ * @enabled: current status of TMU device
+  * @tmu_initialize: SoC specific TMU initialization method
+  * @tmu_control: SoC specific TMU control method
+  * @tmu_read: SoC specific TMU temperature read method
+@@ -205,6 +206,7 @@ struct exynos_tmu_data {
+ 	struct regulator *regulator;
+ 	struct thermal_zone_device *tzd;
+ 	unsigned int ntrip;
++	bool enabled;
+ 
+ 	int (*tmu_initialize)(struct platform_device *pdev);
+ 	void (*tmu_control)(struct platform_device *pdev, bool on);
+@@ -398,6 +400,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
+ 	mutex_lock(&data->lock);
+ 	clk_enable(data->clk);
+ 	data->tmu_control(pdev, on);
++	data->enabled = on;
+ 	clk_disable(data->clk);
+ 	mutex_unlock(&data->lock);
+ }
+@@ -889,19 +892,24 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
+ static int exynos_get_temp(void *p, int *temp)
+ {
+ 	struct exynos_tmu_data *data = p;
++	int value, ret = 0;
+ 
+-	if (!data || !data->tmu_read)
++	if (!data || !data->tmu_read || !data->enabled)
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&data->lock);
+ 	clk_enable(data->clk);
+ 
+-	*temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
++	value = data->tmu_read(data);
++	if (value < 0)
++		ret = value;
++	else
++		*temp = code_to_temp(data, value) * MCELSIUS;
+ 
+ 	clk_disable(data->clk);
+ 	mutex_unlock(&data->lock);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ #ifdef CONFIG_THERMAL_EMULATION
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index b67eec3532a1..4ce8de724c62 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -878,6 +878,11 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
+ 		size_t start = 0;
+ 		ssize_t len;
+ 
++		if (write)
++			size = min_t(u64, size, fsc->mount_options->wsize);
++		else
++			size = min_t(u64, size, fsc->mount_options->rsize);
++
+ 		vino = ceph_vino(inode);
+ 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
+ 					    vino, pos, &size, 0,
+@@ -893,11 +898,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
+ 			break;
+ 		}
+ 
+-		if (write)
+-			size = min_t(u64, size, fsc->mount_options->wsize);
+-		else
+-			size = min_t(u64, size, fsc->mount_options->rsize);
+-
+ 		len = size;
+ 		pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
+ 		if (IS_ERR(pages)) {
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index f715609b13f3..5a5a0158cc8f 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -1047,6 +1047,18 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
+ 	return rc;
+ }
+ 
++/*
++ * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
++ * is a dummy operation.
++ */
++static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
++{
++	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
++		 file, datasync);
++
++	return 0;
++}
++
+ static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
+ 				struct file *dst_file, loff_t destoff,
+ 				size_t len, unsigned int flags)
+@@ -1181,6 +1193,7 @@ const struct file_operations cifs_dir_ops = {
+ 	.copy_file_range = cifs_copy_file_range,
+ 	.clone_file_range = cifs_clone_file_range,
+ 	.llseek = generic_file_llseek,
++	.fsync = cifs_dir_fsync,
+ };
+ 
+ static void
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index 40c34a0ef58a..3abf4b6f3a3f 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -1961,7 +1961,7 @@ void wb_workfn(struct work_struct *work)
+ 	}
+ 
+ 	if (!list_empty(&wb->work_list))
+-		mod_delayed_work(bdi_wq, &wb->dwork, 0);
++		wb_wakeup(wb);
+ 	else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
+ 		wb_wakeup_delayed(wb);
+ 
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 66df387106de..a9e4f6c6339e 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -335,8 +335,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
+ void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
+ 				struct bpf_prog *old_prog);
+ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+-			     __u32 __user *prog_ids, u32 request_cnt,
+-			     __u32 __user *prog_cnt);
++			     u32 *prog_ids, u32 request_cnt,
++			     u32 *prog_cnt);
+ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
+ 			struct bpf_prog *exclude_prog,
+ 			struct bpf_prog *include_prog,
+diff --git a/include/linux/oom.h b/include/linux/oom.h
+index 5bad038ac012..6adac113e96d 100644
+--- a/include/linux/oom.h
++++ b/include/linux/oom.h
+@@ -95,6 +95,8 @@ static inline int check_stable_address_space(struct mm_struct *mm)
+ 	return 0;
+ }
+ 
++void __oom_reap_task_mm(struct mm_struct *mm);
++
+ extern unsigned long oom_badness(struct task_struct *p,
+ 		struct mem_cgroup *memcg, const nodemask_t *nodemask,
+ 		unsigned long totalpages);
+diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
+index 61b39eaf7cad..612b82ca68b5 100644
+--- a/include/linux/wait_bit.h
++++ b/include/linux/wait_bit.h
+@@ -262,4 +262,21 @@ int wait_on_atomic_t(atomic_t *val, wait_atomic_t_action_f action, unsigned mode
+ 	return out_of_line_wait_on_atomic_t(val, action, mode);
+ }
+ 
++/**
++ * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
++ *
++ * @bit: the bit of the word being waited on
++ * @word: the word being waited on, a kernel virtual address
++ *
++ * You can use this helper if bitflags are manipulated atomically rather than
++ * non-atomically under a lock.
++ */
++static inline void clear_and_wake_up_bit(int bit, void *word)
++{
++	clear_bit_unlock(bit, word);
++	/* See wake_up_bit() for which memory barrier you need to use. */
++	smp_mb__after_atomic();
++	wake_up_bit(word, bit);
++}
++
+ #endif /* _LINUX_WAIT_BIT_H */
+diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
+index 899495589a7e..c7be1ca8e562 100644
+--- a/include/net/inet_timewait_sock.h
++++ b/include/net/inet_timewait_sock.h
+@@ -43,6 +43,7 @@ struct inet_timewait_sock {
+ #define tw_family		__tw_common.skc_family
+ #define tw_state		__tw_common.skc_state
+ #define tw_reuse		__tw_common.skc_reuse
++#define tw_reuseport		__tw_common.skc_reuseport
+ #define tw_ipv6only		__tw_common.skc_ipv6only
+ #define tw_bound_dev_if		__tw_common.skc_bound_dev_if
+ #define tw_node			__tw_common.skc_nulls_node
+diff --git a/include/net/nexthop.h b/include/net/nexthop.h
+index 36bb794f5cd6..902ff382a6dc 100644
+--- a/include/net/nexthop.h
++++ b/include/net/nexthop.h
+@@ -7,7 +7,7 @@
+ 
+ static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
+ {
+-	return remaining >= sizeof(*rtnh) &&
++	return remaining >= (int)sizeof(*rtnh) &&
+ 	       rtnh->rtnh_len >= sizeof(*rtnh) &&
+ 	       rtnh->rtnh_len <= remaining;
+ }
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index d315b393abdd..ba03ec39efb3 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1572,13 +1572,32 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
+ 	return cnt;
+ }
+ 
++static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
++				     u32 *prog_ids,
++				     u32 request_cnt)
++{
++	int i = 0;
++
++	for (; *prog; prog++) {
++		if (*prog == &dummy_bpf_prog.prog)
++			continue;
++		prog_ids[i] = (*prog)->aux->id;
++		if (++i == request_cnt) {
++			prog++;
++			break;
++		}
++	}
++
++	return !!(*prog);
++}
++
+ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
+ 				__u32 __user *prog_ids, u32 cnt)
+ {
+ 	struct bpf_prog **prog;
+ 	unsigned long err = 0;
+-	u32 i = 0, *ids;
+ 	bool nospc;
++	u32 *ids;
+ 
+ 	/* users of this function are doing:
+ 	 * cnt = bpf_prog_array_length();
+@@ -1595,16 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
+ 		return -ENOMEM;
+ 	rcu_read_lock();
+ 	prog = rcu_dereference(progs)->progs;
+-	for (; *prog; prog++) {
+-		if (*prog == &dummy_bpf_prog.prog)
+-			continue;
+-		ids[i] = (*prog)->aux->id;
+-		if (++i == cnt) {
+-			prog++;
+-			break;
+-		}
+-	}
+-	nospc = !!(*prog);
++	nospc = bpf_prog_array_copy_core(prog, ids, cnt);
+ 	rcu_read_unlock();
+ 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
+ 	kfree(ids);
+@@ -1683,22 +1693,25 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
+ }
+ 
+ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+-			     __u32 __user *prog_ids, u32 request_cnt,
+-			     __u32 __user *prog_cnt)
++			     u32 *prog_ids, u32 request_cnt,
++			     u32 *prog_cnt)
+ {
++	struct bpf_prog **prog;
+ 	u32 cnt = 0;
+ 
+ 	if (array)
+ 		cnt = bpf_prog_array_length(array);
+ 
+-	if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
+-		return -EFAULT;
++	*prog_cnt = cnt;
+ 
+ 	/* return early if user requested only program count or nothing to copy */
+ 	if (!request_cnt || !cnt)
+ 		return 0;
+ 
+-	return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
++	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
++	prog = rcu_dereference_check(array, 1)->progs;
++	return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
++								     : 0;
+ }
+ 
+ static void bpf_prog_free_deferred(struct work_struct *work)
+diff --git a/kernel/compat.c b/kernel/compat.c
+index 3f5fa8902e7d..b3a9ea4aa8fd 100644
+--- a/kernel/compat.c
++++ b/kernel/compat.c
+@@ -34,6 +34,7 @@ int compat_get_timex(struct timex *txc, const struct compat_timex __user *utp)
+ {
+ 	struct compat_timex tx32;
+ 
++	memset(txc, 0, sizeof(struct timex));
+ 	if (copy_from_user(&tx32, utp, sizeof(struct compat_timex)))
+ 		return -EFAULT;
+ 
+diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
+index 73cc26e321de..c187aa3df3c8 100644
+--- a/kernel/events/callchain.c
++++ b/kernel/events/callchain.c
+@@ -131,14 +131,8 @@ int get_callchain_buffers(int event_max_stack)
+ 		goto exit;
+ 	}
+ 
+-	if (count > 1) {
+-		/* If the allocation failed, give up */
+-		if (!callchain_cpus_entries)
+-			err = -ENOMEM;
+-		goto exit;
+-	}
+-
+-	err = alloc_callchain_buffers();
++	if (count == 1)
++		err = alloc_callchain_buffers();
+ exit:
+ 	if (err)
+ 		atomic_dec(&nr_callchain_events);
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 6c6b3c48db71..1d8ca9ea9979 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -14,6 +14,7 @@
+ #include <linux/slab.h>
+ #include <linux/circ_buf.h>
+ #include <linux/poll.h>
++#include <linux/nospec.h>
+ 
+ #include "internal.h"
+ 
+@@ -867,8 +868,10 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
+ 			return NULL;
+ 
+ 		/* AUX space */
+-		if (pgoff >= rb->aux_pgoff)
+-			return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
++		if (pgoff >= rb->aux_pgoff) {
++			int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
++			return virt_to_page(rb->aux_pages[aux_pgoff]);
++		}
+ 	}
+ 
+ 	return __perf_mmap_to_page(rb, pgoff);
+diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c
+index bb4b9fe026a1..e3d1ba7e3a94 100644
+--- a/kernel/sched/autogroup.c
++++ b/kernel/sched/autogroup.c
+@@ -4,6 +4,7 @@
+ #include <linux/utsname.h>
+ #include <linux/security.h>
+ #include <linux/export.h>
++#include <linux/nospec.h>
+ 
+ #include "sched.h"
+ 
+@@ -212,7 +213,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
+ 	static unsigned long next = INITIAL_JIFFIES;
+ 	struct autogroup *ag;
+ 	unsigned long shares;
+-	int err;
++	int err, idx;
+ 
+ 	if (nice < MIN_NICE || nice > MAX_NICE)
+ 		return -EINVAL;
+@@ -230,7 +231,9 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
+ 
+ 	next = HZ / 10 + jiffies;
+ 	ag = autogroup_task_get(p);
+-	shares = scale_load(sched_prio_to_weight[nice + 20]);
++
++	idx = array_index_nospec(nice + 20, 40);
++	shares = scale_load(sched_prio_to_weight[idx]);
+ 
+ 	down_write(&ag->lock);
+ 	err = sched_group_set_shares(ag->tg, shares);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index c94895bc5a2c..5f37ef9f6cd5 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -23,6 +23,7 @@
+ #include <linux/mmu_context.h>
+ #include <linux/module.h>
+ #include <linux/nmi.h>
++#include <linux/nospec.h>
+ #include <linux/prefetch.h>
+ #include <linux/profile.h>
+ #include <linux/security.h>
+@@ -6873,11 +6874,15 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
+ 				     struct cftype *cft, s64 nice)
+ {
+ 	unsigned long weight;
++	int idx;
+ 
+ 	if (nice < MIN_NICE || nice > MAX_NICE)
+ 		return -ERANGE;
+ 
+-	weight = sched_prio_to_weight[NICE_TO_PRIO(nice) - MAX_RT_PRIO];
++	idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
++	idx = array_index_nospec(idx, 40);
++	weight = sched_prio_to_weight[idx];
++
+ 	return sched_group_set_shares(css_tg(css), scale_load(weight));
+ }
+ #endif
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 7936f548e071..6a64d45a4c80 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -290,7 +290,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
+ 		 * Do not reduce the frequency if the CPU has not been idle
+ 		 * recently, as the reduction is likely to be premature then.
+ 		 */
+-		if (busy && next_f < sg_policy->next_freq) {
++		if (busy && next_f < sg_policy->next_freq &&
++		    sg_policy->next_freq != UINT_MAX) {
+ 			next_f = sg_policy->next_freq;
+ 
+ 			/* Reset cached freq as next_freq has changed */
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 01e6b3a38871..142b6c73bba8 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -876,6 +876,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
+ {
+ 	struct perf_event_query_bpf __user *uquery = info;
+ 	struct perf_event_query_bpf query = {};
++	u32 *ids, prog_cnt, ids_len;
+ 	int ret;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+@@ -884,15 +885,31 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
+ 		return -EINVAL;
+ 	if (copy_from_user(&query, uquery, sizeof(query)))
+ 		return -EFAULT;
+-	if (query.ids_len > BPF_TRACE_MAX_PROGS)
++
++	ids_len = query.ids_len;
++	if (ids_len > BPF_TRACE_MAX_PROGS)
+ 		return -E2BIG;
++	ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
++	if (!ids)
++		return -ENOMEM;
++	/*
++	 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
++	 * is required when user only wants to check for uquery->prog_cnt.
++	 * There is no need to check for it since the case is handled
++	 * gracefully in bpf_prog_array_copy_info.
++	 */
+ 
+ 	mutex_lock(&bpf_event_mutex);
+ 	ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
+-				       uquery->ids,
+-				       query.ids_len,
+-				       &uquery->prog_cnt);
++				       ids,
++				       ids_len,
++				       &prog_cnt);
+ 	mutex_unlock(&bpf_event_mutex);
+ 
++	if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
++	    copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
++		ret = -EFAULT;
++
++	kfree(ids);
+ 	return ret;
+ }
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index a764aec3c9a1..55008fa93097 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -338,6 +338,9 @@ static int regex_match_full(char *str, struct regex *r, int len)
+ 
+ static int regex_match_front(char *str, struct regex *r, int len)
+ {
++	if (len < r->len)
++		return 0;
++
+ 	if (strncmp(str, r->pattern, r->len) == 0)
+ 		return 1;
+ 	return 0;
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index fff97dc0b70f..67a52bbbe48d 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -152,6 +152,8 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
+ 		return;
+ 
+ 	ret = strncpy_from_user(dst, src, maxlen);
++	if (ret == maxlen)
++		dst[--ret] = '\0';
+ 
+ 	if (ret < 0) {	/* Failed to fetch string */
+ 		((u8 *)get_rloc_data(dest))[0] = '\0';
+diff --git a/lib/swiotlb.c b/lib/swiotlb.c
+index 0331de0e9144..dc81f16b9095 100644
+--- a/lib/swiotlb.c
++++ b/lib/swiotlb.c
+@@ -727,7 +727,7 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ 
+ 	phys_addr = swiotlb_tbl_map_single(dev,
+ 			swiotlb_phys_to_dma(dev, io_tlb_start),
+-			0, size, DMA_FROM_DEVICE, 0);
++			0, size, DMA_FROM_DEVICE, attrs);
+ 	if (phys_addr == SWIOTLB_MAP_ERROR)
+ 		goto out_warn;
+ 
+diff --git a/mm/backing-dev.c b/mm/backing-dev.c
+index b5f940ce0143..be585f545337 100644
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -126,6 +126,7 @@ static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
+ 					       bdi, &bdi_debug_stats_fops);
+ 	if (!bdi->debug_stats) {
+ 		debugfs_remove(bdi->debug_dir);
++		bdi->debug_dir = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -394,7 +395,7 @@ static void wb_shutdown(struct bdi_writeback *wb)
+ 	 * the barrier provided by test_and_clear_bit() above.
+ 	 */
+ 	smp_wmb();
+-	clear_bit(WB_shutting_down, &wb->state);
++	clear_and_wake_up_bit(WB_shutting_down, &wb->state);
+ }
+ 
+ static void wb_exit(struct bdi_writeback *wb)
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 9ec024b862ac..88719f53ae3b 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -4108,6 +4108,9 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
+ {
+ 	struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
+ 
++	if (!pn)
++		return;
++
+ 	free_percpu(pn->lruvec_stat_cpu);
+ 	kfree(pn);
+ }
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 9efdc021ad22..03ca089cce0f 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2997,6 +2997,32 @@ void exit_mmap(struct mm_struct *mm)
+ 	/* mm's last user has gone, and its about to be pulled down */
+ 	mmu_notifier_release(mm);
+ 
++	if (unlikely(mm_is_oom_victim(mm))) {
++		/*
++		 * Manually reap the mm to free as much memory as possible.
++		 * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
++		 * this mm from further consideration.  Taking mm->mmap_sem for
++		 * write after setting MMF_OOM_SKIP will guarantee that the oom
++		 * reaper will not run on this mm again after mmap_sem is
++		 * dropped.
++		 *
++		 * Nothing can be holding mm->mmap_sem here and the above call
++		 * to mmu_notifier_release(mm) ensures mmu notifier callbacks in
++		 * __oom_reap_task_mm() will not block.
++		 *
++		 * This needs to be done before calling munlock_vma_pages_all(),
++		 * which clears VM_LOCKED, otherwise the oom reaper cannot
++		 * reliably test it.
++		 */
++		mutex_lock(&oom_lock);
++		__oom_reap_task_mm(mm);
++		mutex_unlock(&oom_lock);
++
++		set_bit(MMF_OOM_SKIP, &mm->flags);
++		down_write(&mm->mmap_sem);
++		up_write(&mm->mmap_sem);
++	}
++
+ 	if (mm->locked_vm) {
+ 		vma = mm->mmap;
+ 		while (vma) {
+@@ -3018,24 +3044,6 @@ void exit_mmap(struct mm_struct *mm)
+ 	/* update_hiwater_rss(mm) here? but nobody should be looking */
+ 	/* Use -1 here to ensure all VMAs in the mm are unmapped */
+ 	unmap_vmas(&tlb, vma, 0, -1);
+-
+-	if (unlikely(mm_is_oom_victim(mm))) {
+-		/*
+-		 * Wait for oom_reap_task() to stop working on this
+-		 * mm. Because MMF_OOM_SKIP is already set before
+-		 * calling down_read(), oom_reap_task() will not run
+-		 * on this "mm" post up_write().
+-		 *
+-		 * mm_is_oom_victim() cannot be set from under us
+-		 * either because victim->mm is already set to NULL
+-		 * under task_lock before calling mmput and oom_mm is
+-		 * set not NULL by the OOM killer only if victim->mm
+-		 * is found not NULL while holding the task_lock.
+-		 */
+-		set_bit(MMF_OOM_SKIP, &mm->flags);
+-		down_write(&mm->mmap_sem);
+-		up_write(&mm->mmap_sem);
+-	}
+ 	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
+ 	tlb_finish_mmu(&tlb, 0, -1);
+ 
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index f2e7dfb81eee..c594679ce201 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -474,7 +474,6 @@ bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
+ 	return false;
+ }
+ 
+-
+ #ifdef CONFIG_MMU
+ /*
+  * OOM Reaper kernel thread which tries to reap the memory used by the OOM
+@@ -485,16 +484,54 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
+ static struct task_struct *oom_reaper_list;
+ static DEFINE_SPINLOCK(oom_reaper_lock);
+ 
+-static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
++void __oom_reap_task_mm(struct mm_struct *mm)
+ {
+-	struct mmu_gather tlb;
+ 	struct vm_area_struct *vma;
++
++	/*
++	 * Tell all users of get_user/copy_from_user etc... that the content
++	 * is no longer stable. No barriers really needed because unmapping
++	 * should imply barriers already and the reader would hit a page fault
++	 * if it stumbled over a reaped memory.
++	 */
++	set_bit(MMF_UNSTABLE, &mm->flags);
++
++	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
++		if (!can_madv_dontneed_vma(vma))
++			continue;
++
++		/*
++		 * Only anonymous pages have a good chance to be dropped
++		 * without additional steps which we cannot afford as we
++		 * are OOM already.
++		 *
++		 * We do not even care about fs backed pages because all
++		 * which are reclaimable have already been reclaimed and
++		 * we do not want to block exit_mmap by keeping mm ref
++		 * count elevated without a good reason.
++		 */
++		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
++			const unsigned long start = vma->vm_start;
++			const unsigned long end = vma->vm_end;
++			struct mmu_gather tlb;
++
++			tlb_gather_mmu(&tlb, mm, start, end);
++			mmu_notifier_invalidate_range_start(mm, start, end);
++			unmap_page_range(&tlb, vma, start, end, NULL);
++			mmu_notifier_invalidate_range_end(mm, start, end);
++			tlb_finish_mmu(&tlb, start, end);
++		}
++	}
++}
++
++static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
++{
+ 	bool ret = true;
+ 
+ 	/*
+ 	 * We have to make sure to not race with the victim exit path
+ 	 * and cause premature new oom victim selection:
+-	 * __oom_reap_task_mm		exit_mm
++	 * oom_reap_task_mm		exit_mm
+ 	 *   mmget_not_zero
+ 	 *				  mmput
+ 	 *				    atomic_dec_and_test
+@@ -539,39 +576,8 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
+ 
+ 	trace_start_task_reaping(tsk->pid);
+ 
+-	/*
+-	 * Tell all users of get_user/copy_from_user etc... that the content
+-	 * is no longer stable. No barriers really needed because unmapping
+-	 * should imply barriers already and the reader would hit a page fault
+-	 * if it stumbled over a reaped memory.
+-	 */
+-	set_bit(MMF_UNSTABLE, &mm->flags);
+-
+-	for (vma = mm->mmap ; vma; vma = vma->vm_next) {
+-		if (!can_madv_dontneed_vma(vma))
+-			continue;
++	__oom_reap_task_mm(mm);
+ 
+-		/*
+-		 * Only anonymous pages have a good chance to be dropped
+-		 * without additional steps which we cannot afford as we
+-		 * are OOM already.
+-		 *
+-		 * We do not even care about fs backed pages because all
+-		 * which are reclaimable have already been reclaimed and
+-		 * we do not want to block exit_mmap by keeping mm ref
+-		 * count elevated without a good reason.
+-		 */
+-		if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
+-			const unsigned long start = vma->vm_start;
+-			const unsigned long end = vma->vm_end;
+-
+-			tlb_gather_mmu(&tlb, mm, start, end);
+-			mmu_notifier_invalidate_range_start(mm, start, end);
+-			unmap_page_range(&tlb, vma, start, end, NULL);
+-			mmu_notifier_invalidate_range_end(mm, start, end);
+-			tlb_finish_mmu(&tlb, start, end);
+-		}
+-	}
+ 	pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
+ 			task_pid_nr(tsk), tsk->comm,
+ 			K(get_mm_counter(mm, MM_ANONPAGES)),
+@@ -592,13 +598,12 @@ static void oom_reap_task(struct task_struct *tsk)
+ 	struct mm_struct *mm = tsk->signal->oom_mm;
+ 
+ 	/* Retry the down_read_trylock(mmap_sem) a few times */
+-	while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
++	while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
+ 		schedule_timeout_idle(HZ/10);
+ 
+ 	if (attempts <= MAX_OOM_REAP_RETRIES)
+ 		goto done;
+ 
+-
+ 	pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
+ 		task_pid_nr(tsk), tsk->comm);
+ 	debug_show_all_locks();
+diff --git a/mm/sparse.c b/mm/sparse.c
+index 7af5e7a92528..6336444fe589 100644
+--- a/mm/sparse.c
++++ b/mm/sparse.c
+@@ -666,7 +666,7 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
+ 	unsigned long pfn;
+ 
+ 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+-		unsigned long section_nr = pfn_to_section_nr(start_pfn);
++		unsigned long section_nr = pfn_to_section_nr(pfn);
+ 		struct mem_section *ms;
+ 
+ 		/*
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index d589d318727f..36d31d3593e1 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -144,7 +144,8 @@ enum z3fold_page_flags {
+ 	PAGE_HEADLESS = 0,
+ 	MIDDLE_CHUNK_MAPPED,
+ 	NEEDS_COMPACTING,
+-	PAGE_STALE
++	PAGE_STALE,
++	UNDER_RECLAIM
+ };
+ 
+ /*****************
+@@ -173,6 +174,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
+ 	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
+ 	clear_bit(NEEDS_COMPACTING, &page->private);
+ 	clear_bit(PAGE_STALE, &page->private);
++	clear_bit(UNDER_RECLAIM, &page->private);
+ 
+ 	spin_lock_init(&zhdr->page_lock);
+ 	kref_init(&zhdr->refcount);
+@@ -748,6 +750,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
+ 		atomic64_dec(&pool->pages_nr);
+ 		return;
+ 	}
++	if (test_bit(UNDER_RECLAIM, &page->private)) {
++		z3fold_page_unlock(zhdr);
++		return;
++	}
+ 	if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
+ 		z3fold_page_unlock(zhdr);
+ 		return;
+@@ -832,6 +838,8 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
+ 			kref_get(&zhdr->refcount);
+ 			list_del_init(&zhdr->buddy);
+ 			zhdr->cpu = -1;
++			set_bit(UNDER_RECLAIM, &page->private);
++			break;
+ 		}
+ 
+ 		list_del_init(&page->lru);
+@@ -879,25 +887,35 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
+ 				goto next;
+ 		}
+ next:
+-		spin_lock(&pool->lock);
+ 		if (test_bit(PAGE_HEADLESS, &page->private)) {
+ 			if (ret == 0) {
+-				spin_unlock(&pool->lock);
+ 				free_z3fold_page(page);
+ 				return 0;
+ 			}
+-		} else if (kref_put(&zhdr->refcount, release_z3fold_page)) {
+-			atomic64_dec(&pool->pages_nr);
++			spin_lock(&pool->lock);
++			list_add(&page->lru, &pool->lru);
++			spin_unlock(&pool->lock);
++		} else {
++			z3fold_page_lock(zhdr);
++			clear_bit(UNDER_RECLAIM, &page->private);
++			if (kref_put(&zhdr->refcount,
++					release_z3fold_page_locked)) {
++				atomic64_dec(&pool->pages_nr);
++				return 0;
++			}
++			/*
++			 * if we are here, the page is still not completely
++			 * free. Take the global pool lock then to be able
++			 * to add it back to the lru list
++			 */
++			spin_lock(&pool->lock);
++			list_add(&page->lru, &pool->lru);
+ 			spin_unlock(&pool->lock);
+-			return 0;
++			z3fold_page_unlock(zhdr);
+ 		}
+ 
+-		/*
+-		 * Add to the beginning of LRU.
+-		 * Pool lock has to be kept here to ensure the page has
+-		 * not already been released
+-		 */
+-		list_add(&page->lru, &pool->lru);
++		/* We started off locked to we need to lock the pool back */
++		spin_lock(&pool->lock);
+ 	}
+ 	spin_unlock(&pool->lock);
+ 	return -EAGAIN;
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index 09a1f056712a..7579e85af531 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -41,6 +41,9 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
+ #include <linux/module.h>
+ #include <linux/init.h>
+ 
++/* Hardening for Spectre-v1 */
++#include <linux/nospec.h>
++
+ #include "lec.h"
+ #include "lec_arpc.h"
+ #include "resources.h"
+@@ -687,8 +690,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
+ 	bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
+ 	if (bytes_left != 0)
+ 		pr_info("copy from user failed for %d bytes\n", bytes_left);
+-	if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
+-	    !dev_lec[ioc_data.dev_num])
++	if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
++		return -EINVAL;
++	ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF);
++	if (!dev_lec[ioc_data.dev_num])
+ 		return -EINVAL;
+ 	vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
+ 	if (!vpriv)
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 752112539753..a685cb02438d 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1821,13 +1821,14 @@ static int compat_table_info(const struct ebt_table_info *info,
+ {
+ 	unsigned int size = info->entries_size;
+ 	const void *entries = info->entries;
+-	int ret;
+ 
+ 	newinfo->entries_size = size;
+-
+-	ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
+-	if (ret)
+-		return ret;
++	if (info->nentries) {
++		int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
++						 info->nentries);
++		if (ret)
++			return ret;
++	}
+ 
+ 	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
+ 							entries, newinfo);
+diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
+index c0548d268e1a..e3e6a3e2ca22 100644
+--- a/net/core/dev_addr_lists.c
++++ b/net/core/dev_addr_lists.c
+@@ -57,8 +57,8 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
+ 		return -EINVAL;
+ 
+ 	list_for_each_entry(ha, &list->list, list) {
+-		if (!memcmp(ha->addr, addr, addr_len) &&
+-		    ha->type == addr_type) {
++		if (ha->type == addr_type &&
++		    !memcmp(ha->addr, addr, addr_len)) {
+ 			if (global) {
+ 				/* check if addr is already used as global */
+ 				if (ha->global_use)
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 857e4e6f751a..789f8edd37ae 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -857,6 +857,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
+ 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
+ 	n->cloned = 1;
+ 	n->nohdr = 0;
++	n->peeked = 0;
+ 	n->destructor = NULL;
+ 	C(tail);
+ 	C(end);
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index e65fcb45c3f6..b08feb219b44 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -614,6 +614,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	ireq = inet_rsk(req);
+ 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+ 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
++	ireq->ir_mark = inet_request_mark(sk, skb);
+ 	ireq->ireq_family = AF_INET;
+ 	ireq->ir_iif = sk->sk_bound_dev_if;
+ 
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 5df7857fc0f3..6344f1b18a6a 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -351,6 +351,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+ 	ireq->ireq_family = AF_INET6;
++	ireq->ir_mark = inet_request_mark(sk, skb);
+ 
+ 	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
+ 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index c3ea4906d237..88c5069b5d20 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -178,6 +178,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+ 		tw->tw_dport	    = inet->inet_dport;
+ 		tw->tw_family	    = sk->sk_family;
+ 		tw->tw_reuse	    = sk->sk_reuse;
++		tw->tw_reuseport    = sk->sk_reuseport;
+ 		tw->tw_hash	    = sk->sk_hash;
+ 		tw->tw_ipv6only	    = 0;
+ 		tw->tw_transparent  = inet->transparent;
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index 914d56928578..1ef8f86072bd 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -210,6 +210,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
+ 		p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
+ 		if (p) {
+ 			p->daddr = *daddr;
++			p->dtime = (__u32)jiffies;
+ 			refcount_set(&p->refcnt, 2);
+ 			atomic_set(&p->rid, 0);
+ 			p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 299e247b2032..9d9b8358a898 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2306,13 +2306,14 @@ struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
+ 					const struct sk_buff *skb)
+ {
+ 	__u8 tos = RT_FL_TOS(fl4);
+-	struct fib_result res;
++	struct fib_result res = {
++		.type		= RTN_UNSPEC,
++		.fi		= NULL,
++		.table		= NULL,
++		.tclassid	= 0,
++	};
+ 	struct rtable *rth;
+ 
+-	res.tclassid	= 0;
+-	res.fi		= NULL;
+-	res.table	= NULL;
+-
+ 	fl4->flowi4_iif = LOOPBACK_IFINDEX;
+ 	fl4->flowi4_tos = tos & IPTOS_RT_MASK;
+ 	fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 1ab8733dac5f..c92fd253fc46 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2690,7 +2690,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ 	case TCP_REPAIR_QUEUE:
+ 		if (!tp->repair)
+ 			err = -EPERM;
+-		else if (val < TCP_QUEUES_NR)
++		else if ((unsigned int)val < TCP_QUEUES_NR)
+ 			tp->repair_queue = val;
+ 		else
+ 			err = -EINVAL;
+diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
+index 34355fd19f27..dc76bc346829 100644
+--- a/net/kcm/kcmsock.c
++++ b/net/kcm/kcmsock.c
+@@ -1425,6 +1425,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
+ 	 */
+ 	if (csk->sk_user_data) {
+ 		write_unlock_bh(&csk->sk_callback_lock);
++		strp_stop(&psock->strp);
+ 		strp_done(&psock->strp);
+ 		kmem_cache_free(kcm_psockp, psock);
+ 		err = -EALREADY;
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 5ebde4b15810..f36098887ad0 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -2384,11 +2384,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
+ 			strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
+ 				sizeof(cfg.mcast_ifn));
+ 			cfg.syncid = dm->syncid;
+-			rtnl_lock();
+-			mutex_lock(&ipvs->sync_mutex);
+ 			ret = start_sync_thread(ipvs, &cfg, dm->state);
+-			mutex_unlock(&ipvs->sync_mutex);
+-			rtnl_unlock();
+ 		} else {
+ 			mutex_lock(&ipvs->sync_mutex);
+ 			ret = stop_sync_thread(ipvs, dm->state);
+@@ -3481,12 +3477,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
+ 	if (ipvs->mixed_address_family_dests > 0)
+ 		return -EINVAL;
+ 
+-	rtnl_lock();
+-	mutex_lock(&ipvs->sync_mutex);
+ 	ret = start_sync_thread(ipvs, &c,
+ 				nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+-	mutex_unlock(&ipvs->sync_mutex);
+-	rtnl_unlock();
+ 	return ret;
+ }
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index fbaf3bd05b2e..001501e25625 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -49,6 +49,7 @@
+ #include <linux/kthread.h>
+ #include <linux/wait.h>
+ #include <linux/kernel.h>
++#include <linux/sched/signal.h>
+ 
+ #include <asm/unaligned.h>		/* Used for ntoh_seq and hton_seq */
+ 
+@@ -1360,15 +1361,9 @@ static void set_mcast_pmtudisc(struct sock *sk, int val)
+ /*
+  *      Specifiy default interface for outgoing multicasts
+  */
+-static int set_mcast_if(struct sock *sk, char *ifname)
++static int set_mcast_if(struct sock *sk, struct net_device *dev)
+ {
+-	struct net_device *dev;
+ 	struct inet_sock *inet = inet_sk(sk);
+-	struct net *net = sock_net(sk);
+-
+-	dev = __dev_get_by_name(net, ifname);
+-	if (!dev)
+-		return -ENODEV;
+ 
+ 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
+ 		return -EINVAL;
+@@ -1396,19 +1391,14 @@ static int set_mcast_if(struct sock *sk, char *ifname)
+  *      in the in_addr structure passed in as a parameter.
+  */
+ static int
+-join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
++join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
+ {
+-	struct net *net = sock_net(sk);
+ 	struct ip_mreqn mreq;
+-	struct net_device *dev;
+ 	int ret;
+ 
+ 	memset(&mreq, 0, sizeof(mreq));
+ 	memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
+ 
+-	dev = __dev_get_by_name(net, ifname);
+-	if (!dev)
+-		return -ENODEV;
+ 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
+ 		return -EINVAL;
+ 
+@@ -1423,15 +1413,10 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
+ 
+ #ifdef CONFIG_IP_VS_IPV6
+ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
+-			     char *ifname)
++			     struct net_device *dev)
+ {
+-	struct net *net = sock_net(sk);
+-	struct net_device *dev;
+ 	int ret;
+ 
+-	dev = __dev_get_by_name(net, ifname);
+-	if (!dev)
+-		return -ENODEV;
+ 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
+ 		return -EINVAL;
+ 
+@@ -1443,24 +1428,18 @@ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
+ }
+ #endif
+ 
+-static int bind_mcastif_addr(struct socket *sock, char *ifname)
++static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
+ {
+-	struct net *net = sock_net(sock->sk);
+-	struct net_device *dev;
+ 	__be32 addr;
+ 	struct sockaddr_in sin;
+ 
+-	dev = __dev_get_by_name(net, ifname);
+-	if (!dev)
+-		return -ENODEV;
+-
+ 	addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
+ 	if (!addr)
+ 		pr_err("You probably need to specify IP address on "
+ 		       "multicast interface.\n");
+ 
+ 	IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
+-		  ifname, &addr);
++		  dev->name, &addr);
+ 
+ 	/* Now bind the socket with the address of multicast interface */
+ 	sin.sin_family	     = AF_INET;
+@@ -1493,7 +1472,8 @@ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
+ /*
+  *      Set up sending multicast socket over UDP
+  */
+-static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
++static int make_send_sock(struct netns_ipvs *ipvs, int id,
++			  struct net_device *dev, struct socket **sock_ret)
+ {
+ 	/* multicast addr */
+ 	union ipvs_sockaddr mcast_addr;
+@@ -1505,9 +1485,10 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
+ 				  IPPROTO_UDP, &sock);
+ 	if (result < 0) {
+ 		pr_err("Error during creation of socket; terminating\n");
+-		return ERR_PTR(result);
++		goto error;
+ 	}
+-	result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn);
++	*sock_ret = sock;
++	result = set_mcast_if(sock->sk, dev);
+ 	if (result < 0) {
+ 		pr_err("Error setting outbound mcast interface\n");
+ 		goto error;
+@@ -1522,7 +1503,7 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
+ 		set_sock_size(sock->sk, 1, result);
+ 
+ 	if (AF_INET == ipvs->mcfg.mcast_af)
+-		result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn);
++		result = bind_mcastif_addr(sock, dev);
+ 	else
+ 		result = 0;
+ 	if (result < 0) {
+@@ -1538,19 +1519,18 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
+ 		goto error;
+ 	}
+ 
+-	return sock;
++	return 0;
+ 
+ error:
+-	sock_release(sock);
+-	return ERR_PTR(result);
++	return result;
+ }
+ 
+ 
+ /*
+  *      Set up receiving multicast socket over UDP
+  */
+-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+-					int ifindex)
++static int make_receive_sock(struct netns_ipvs *ipvs, int id,
++			     struct net_device *dev, struct socket **sock_ret)
+ {
+ 	/* multicast addr */
+ 	union ipvs_sockaddr mcast_addr;
+@@ -1562,8 +1542,9 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+ 				  IPPROTO_UDP, &sock);
+ 	if (result < 0) {
+ 		pr_err("Error during creation of socket; terminating\n");
+-		return ERR_PTR(result);
++		goto error;
+ 	}
++	*sock_ret = sock;
+ 	/* it is equivalent to the REUSEADDR option in user-space */
+ 	sock->sk->sk_reuse = SK_CAN_REUSE;
+ 	result = sysctl_sync_sock_size(ipvs);
+@@ -1571,7 +1552,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+ 		set_sock_size(sock->sk, 0, result);
+ 
+ 	get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
+-	sock->sk->sk_bound_dev_if = ifindex;
++	sock->sk->sk_bound_dev_if = dev->ifindex;
+ 	result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
+ 	if (result < 0) {
+ 		pr_err("Error binding to the multicast addr\n");
+@@ -1582,21 +1563,20 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+ #ifdef CONFIG_IP_VS_IPV6
+ 	if (ipvs->bcfg.mcast_af == AF_INET6)
+ 		result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
+-					   ipvs->bcfg.mcast_ifn);
++					   dev);
+ 	else
+ #endif
+ 		result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
+-					  ipvs->bcfg.mcast_ifn);
++					  dev);
+ 	if (result < 0) {
+ 		pr_err("Error joining to the multicast group\n");
+ 		goto error;
+ 	}
+ 
+-	return sock;
++	return 0;
+ 
+ error:
+-	sock_release(sock);
+-	return ERR_PTR(result);
++	return result;
+ }
+ 
+ 
+@@ -1778,13 +1758,12 @@ static int sync_thread_backup(void *data)
+ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ 		      int state)
+ {
+-	struct ip_vs_sync_thread_data *tinfo;
++	struct ip_vs_sync_thread_data *tinfo = NULL;
+ 	struct task_struct **array = NULL, *task;
+-	struct socket *sock;
+ 	struct net_device *dev;
+ 	char *name;
+ 	int (*threadfn)(void *data);
+-	int id, count, hlen;
++	int id = 0, count, hlen;
+ 	int result = -ENOMEM;
+ 	u16 mtu, min_mtu;
+ 
+@@ -1792,6 +1771,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ 	IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
+ 		  sizeof(struct ip_vs_sync_conn_v0));
+ 
++	/* Do not hold one mutex and then to block on another */
++	for (;;) {
++		rtnl_lock();
++		if (mutex_trylock(&ipvs->sync_mutex))
++			break;
++		rtnl_unlock();
++		mutex_lock(&ipvs->sync_mutex);
++		if (rtnl_trylock())
++			break;
++		mutex_unlock(&ipvs->sync_mutex);
++	}
++
+ 	if (!ipvs->sync_state) {
+ 		count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
+ 		ipvs->threads_mask = count - 1;
+@@ -1810,7 +1801,8 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ 	dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
+ 	if (!dev) {
+ 		pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
+-		return -ENODEV;
++		result = -ENODEV;
++		goto out_early;
+ 	}
+ 	hlen = (AF_INET6 == c->mcast_af) ?
+ 	       sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
+@@ -1827,26 +1819,30 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ 		c->sync_maxlen = mtu - hlen;
+ 
+ 	if (state == IP_VS_STATE_MASTER) {
++		result = -EEXIST;
+ 		if (ipvs->ms)
+-			return -EEXIST;
++			goto out_early;
+ 
+ 		ipvs->mcfg = *c;
+ 		name = "ipvs-m:%d:%d";
+ 		threadfn = sync_thread_master;
+ 	} else if (state == IP_VS_STATE_BACKUP) {
++		result = -EEXIST;
+ 		if (ipvs->backup_threads)
+-			return -EEXIST;
++			goto out_early;
+ 
+ 		ipvs->bcfg = *c;
+ 		name = "ipvs-b:%d:%d";
+ 		threadfn = sync_thread_backup;
+ 	} else {
+-		return -EINVAL;
++		result = -EINVAL;
++		goto out_early;
+ 	}
+ 
+ 	if (state == IP_VS_STATE_MASTER) {
+ 		struct ipvs_master_sync_state *ms;
+ 
++		result = -ENOMEM;
+ 		ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL);
+ 		if (!ipvs->ms)
+ 			goto out;
+@@ -1862,39 +1858,38 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ 	} else {
+ 		array = kcalloc(count, sizeof(struct task_struct *),
+ 				GFP_KERNEL);
++		result = -ENOMEM;
+ 		if (!array)
+ 			goto out;
+ 	}
+ 
+-	tinfo = NULL;
+ 	for (id = 0; id < count; id++) {
+-		if (state == IP_VS_STATE_MASTER)
+-			sock = make_send_sock(ipvs, id);
+-		else
+-			sock = make_receive_sock(ipvs, id, dev->ifindex);
+-		if (IS_ERR(sock)) {
+-			result = PTR_ERR(sock);
+-			goto outtinfo;
+-		}
++		result = -ENOMEM;
+ 		tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
+ 		if (!tinfo)
+-			goto outsocket;
++			goto out;
+ 		tinfo->ipvs = ipvs;
+-		tinfo->sock = sock;
++		tinfo->sock = NULL;
+ 		if (state == IP_VS_STATE_BACKUP) {
+ 			tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
+ 					     GFP_KERNEL);
+ 			if (!tinfo->buf)
+-				goto outtinfo;
++				goto out;
+ 		} else {
+ 			tinfo->buf = NULL;
+ 		}
+ 		tinfo->id = id;
++		if (state == IP_VS_STATE_MASTER)
++			result = make_send_sock(ipvs, id, dev, &tinfo->sock);
++		else
++			result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
++		if (result < 0)
++			goto out;
+ 
+ 		task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
+ 		if (IS_ERR(task)) {
+ 			result = PTR_ERR(task);
+-			goto outtinfo;
++			goto out;
+ 		}
+ 		tinfo = NULL;
+ 		if (state == IP_VS_STATE_MASTER)
+@@ -1911,20 +1906,20 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ 	ipvs->sync_state |= state;
+ 	spin_unlock_bh(&ipvs->sync_buff_lock);
+ 
++	mutex_unlock(&ipvs->sync_mutex);
++	rtnl_unlock();
++
+ 	/* increase the module use count */
+ 	ip_vs_use_count_inc();
+ 
+ 	return 0;
+ 
+-outsocket:
+-	sock_release(sock);
+-
+-outtinfo:
+-	if (tinfo) {
+-		sock_release(tinfo->sock);
+-		kfree(tinfo->buf);
+-		kfree(tinfo);
+-	}
++out:
++	/* We do not need RTNL lock anymore, release it here so that
++	 * sock_release below and in the kthreads can use rtnl_lock
++	 * to leave the mcast group.
++	 */
++	rtnl_unlock();
+ 	count = id;
+ 	while (count-- > 0) {
+ 		if (state == IP_VS_STATE_MASTER)
+@@ -1932,13 +1927,23 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ 		else
+ 			kthread_stop(array[count]);
+ 	}
+-	kfree(array);
+-
+-out:
+ 	if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
+ 		kfree(ipvs->ms);
+ 		ipvs->ms = NULL;
+ 	}
++	mutex_unlock(&ipvs->sync_mutex);
++	if (tinfo) {
++		if (tinfo->sock)
++			sock_release(tinfo->sock);
++		kfree(tinfo->buf);
++		kfree(tinfo);
++	}
++	kfree(array);
++	return result;
++
++out_early:
++	mutex_unlock(&ipvs->sync_mutex);
++	rtnl_unlock();
+ 	return result;
+ }
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 70c455341243..02506752051d 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1845,6 +1845,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 
+ 	if (msg->msg_namelen) {
+ 		err = -EINVAL;
++		if (msg->msg_namelen < sizeof(struct sockaddr_nl))
++			goto out;
+ 		if (addr->nl_family != AF_NETLINK)
+ 			goto out;
+ 		dst_portid = addr->nl_pid;
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index 44c4652721af..ae18892a7010 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -275,13 +275,14 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr)
+ static void rds_tcp_conn_free(void *arg)
+ {
+ 	struct rds_tcp_connection *tc = arg;
++	unsigned long flags;
+ 
+ 	rdsdebug("freeing tc %p\n", tc);
+ 
+-	spin_lock_bh(&rds_tcp_conn_lock);
++	spin_lock_irqsave(&rds_tcp_conn_lock, flags);
+ 	if (!tc->t_tcp_node_detached)
+ 		list_del(&tc->t_tcp_node);
+-	spin_unlock_bh(&rds_tcp_conn_lock);
++	spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
+ 
+ 	kmem_cache_free(rds_tcp_conn_slab, tc);
+ }
+@@ -311,13 +312,13 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
+ 		rdsdebug("rds_conn_path [%d] tc %p\n", i,
+ 			 conn->c_path[i].cp_transport_data);
+ 	}
+-	spin_lock_bh(&rds_tcp_conn_lock);
++	spin_lock_irq(&rds_tcp_conn_lock);
+ 	for (i = 0; i < RDS_MPATH_WORKERS; i++) {
+ 		tc = conn->c_path[i].cp_transport_data;
+ 		tc->t_tcp_node_detached = false;
+ 		list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
+ 	}
+-	spin_unlock_bh(&rds_tcp_conn_lock);
++	spin_unlock_irq(&rds_tcp_conn_lock);
+ fail:
+ 	if (ret) {
+ 		for (j = 0; j < i; j++)
+@@ -529,7 +530,7 @@ static void rds_tcp_kill_sock(struct net *net)
+ 
+ 	rtn->rds_tcp_listen_sock = NULL;
+ 	rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
+-	spin_lock_bh(&rds_tcp_conn_lock);
++	spin_lock_irq(&rds_tcp_conn_lock);
+ 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+ 		struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+ 
+@@ -542,7 +543,7 @@ static void rds_tcp_kill_sock(struct net *net)
+ 			tc->t_tcp_node_detached = true;
+ 		}
+ 	}
+-	spin_unlock_bh(&rds_tcp_conn_lock);
++	spin_unlock_irq(&rds_tcp_conn_lock);
+ 	list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
+ 		rds_conn_destroy(tc->t_cpath->cp_conn);
+ }
+@@ -590,7 +591,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
+ {
+ 	struct rds_tcp_connection *tc, *_tc;
+ 
+-	spin_lock_bh(&rds_tcp_conn_lock);
++	spin_lock_irq(&rds_tcp_conn_lock);
+ 	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+ 		struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
+ 
+@@ -600,7 +601,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
+ 		/* reconnect with new parameters */
+ 		rds_conn_path_drop(tc->t_cpath, false);
+ 	}
+-	spin_unlock_bh(&rds_tcp_conn_lock);
++	spin_unlock_irq(&rds_tcp_conn_lock);
+ }
+ 
+ static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
+diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
+index 41bd496531d4..00192a996be0 100644
+--- a/net/rfkill/rfkill-gpio.c
++++ b/net/rfkill/rfkill-gpio.c
+@@ -137,13 +137,18 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
+ 
+ 	ret = rfkill_register(rfkill->rfkill_dev);
+ 	if (ret < 0)
+-		return ret;
++		goto err_destroy;
+ 
+ 	platform_set_drvdata(pdev, rfkill);
+ 
+ 	dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
+ 
+ 	return 0;
++
++err_destroy:
++	rfkill_destroy(rfkill->rfkill_dev);
++
++	return ret;
+ }
+ 
+ static int rfkill_gpio_remove(struct platform_device *pdev)


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-05-09 10:57 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-05-09 10:57 UTC (permalink / raw
  To: gentoo-commits

commit:     0498f014b69ef826fe7c972a5ab3d10ba1f29877
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May  9 10:57:07 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May  9 10:57:07 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0498f014

Linux patch 4.16.8

 0000_README             |    4 +
 1007_linux-4.16.8.patch | 2881 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2885 insertions(+)

diff --git a/0000_README b/0000_README
index 1139362..b4a9e43 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-4.16.7.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.7
 
+Patch:  1007_linux-4.16.8.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.8
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1007_linux-4.16.8.patch b/1007_linux-4.16.8.patch
new file mode 100644
index 0000000..51d061b
--- /dev/null
+++ b/1007_linux-4.16.8.patch
@@ -0,0 +1,2881 @@
+diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
+index c1d520de6dfe..3b2f2dd82225 100644
+--- a/Documentation/arm64/silicon-errata.txt
++++ b/Documentation/arm64/silicon-errata.txt
+@@ -55,6 +55,7 @@ stable kernels.
+ | ARM            | Cortex-A57      | #834220         | ARM64_ERRATUM_834220        |
+ | ARM            | Cortex-A72      | #853709         | N/A                         |
+ | ARM            | Cortex-A73      | #858921         | ARM64_ERRATUM_858921        |
++| ARM            | Cortex-A55      | #1024718        | ARM64_ERRATUM_1024718       |
+ | ARM            | MMU-500         | #841119,#826419 | N/A                         |
+ |                |                 |                 |                             |
+ | Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375        |
+diff --git a/Makefile b/Makefile
+index 1c5d5d8c45e2..5da6ffd69209 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 7381eeb7ef8e..be665760f2bd 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -464,6 +464,20 @@ config ARM64_ERRATUM_843419
+ 
+ 	  If unsure, say Y.
+ 
++config ARM64_ERRATUM_1024718
++	bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
++	default y
++	help
++	  This option adds work around for Arm Cortex-A55 Erratum 1024718.
++
++	  Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
++	  update of the hardware dirty bit when the DBM/AP bits are updated
++	  without a break-before-make. The work around is to disable the usage
++	  of hardware DBM locally on the affected cores. CPUs not affected by
++	  erratum will continue to use the feature.
++
++	  If unsure, say Y.
++
+ config CAVIUM_ERRATUM_22375
+ 	bool "Cavium erratum 22375, 24313"
+ 	default y
+diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
+index 3c78835bba94..a3ca19e68b73 100644
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -25,6 +25,7 @@
+ 
+ #include <asm/asm-offsets.h>
+ #include <asm/cpufeature.h>
++#include <asm/cputype.h>
+ #include <asm/debug-monitors.h>
+ #include <asm/page.h>
+ #include <asm/pgtable-hwdef.h>
+@@ -595,4 +596,43 @@ USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
+ #endif
+ 	.endm
+ 
++/*
++ * Check the MIDR_EL1 of the current CPU for a given model and a range of
++ * variant/revision. See asm/cputype.h for the macros used below.
++ *
++ *	model:		MIDR_CPU_MODEL of CPU
++ *	rv_min:		Minimum of MIDR_CPU_VAR_REV()
++ *	rv_max:		Maximum of MIDR_CPU_VAR_REV()
++ *	res:		Result register.
++ *	tmp1, tmp2, tmp3: Temporary registers
++ *
++ * Corrupts: res, tmp1, tmp2, tmp3
++ * Returns:  0, if the CPU id doesn't match. Non-zero otherwise
++ */
++	.macro	cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3
++	mrs		\res, midr_el1
++	mov_q		\tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK)
++	mov_q		\tmp2, MIDR_CPU_MODEL_MASK
++	and		\tmp3, \res, \tmp2	// Extract model
++	and		\tmp1, \res, \tmp1	// rev & variant
++	mov_q		\tmp2, \model
++	cmp		\tmp3, \tmp2
++	cset		\res, eq
++	cbz		\res, .Ldone\@		// Model matches ?
++
++	.if (\rv_min != 0)			// Skip min check if rv_min == 0
++	mov_q		\tmp3, \rv_min
++	cmp		\tmp1, \tmp3
++	cset		\res, ge
++	.endif					// \rv_min != 0
++	/* Skip rv_max check if rv_min == rv_max && rv_min != 0 */
++	.if ((\rv_min != \rv_max) || \rv_min == 0)
++	mov_q		\tmp2, \rv_max
++	cmp		\tmp1, \tmp2
++	cset		\tmp2, le
++	and		\res, \res, \tmp2
++	.endif
++.Ldone\@:
++	.endm
++
+ #endif	/* __ASM_ASSEMBLER_H */
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 350c76a1d15b..8e32a6f28f00 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -83,6 +83,7 @@
+ #define ARM_CPU_PART_CORTEX_A53		0xD03
+ #define ARM_CPU_PART_CORTEX_A73		0xD09
+ #define ARM_CPU_PART_CORTEX_A75		0xD0A
++#define ARM_CPU_PART_CORTEX_A55		0xD05
+ 
+ #define APM_CPU_PART_POTENZA		0x000
+ 
+@@ -102,6 +103,7 @@
+ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
+ #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
+ #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
++#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
+ #define MIDR_THUNDERX	MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index c0af47617299..5244440baea4 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -448,6 +448,11 @@ ENTRY(__cpu_setup)
+ 	cbz	x9, 2f
+ 	cmp	x9, #2
+ 	b.lt	1f
++#ifdef CONFIG_ARM64_ERRATUM_1024718
++	/* Disable hardware DBM on Cortex-A55 r0p0, r0p1 & r1p0 */
++	cpu_midr_match MIDR_CORTEX_A55, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(1, 0), x1, x2, x3, x4
++	cbnz	x1, 1f
++#endif
+ 	orr	x10, x10, #TCR_HD		// hardware Dirty flag update
+ 1:	orr	x10, x10, #TCR_HA		// hardware Access flag update
+ 2:
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index 3615c0f255e9..c375b0aef877 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -1067,6 +1067,7 @@ static struct clocksource clocksource_tsc_early = {
+ 	.resume			= tsc_resume,
+ 	.mark_unstable		= tsc_cs_mark_unstable,
+ 	.tick_stable		= tsc_cs_tick_stable,
++	.list			= LIST_HEAD_INIT(clocksource_tsc_early.list),
+ };
+ 
+ /*
+@@ -1086,6 +1087,7 @@ static struct clocksource clocksource_tsc = {
+ 	.resume			= tsc_resume,
+ 	.mark_unstable		= tsc_cs_mark_unstable,
+ 	.tick_stable		= tsc_cs_tick_stable,
++	.list			= LIST_HEAD_INIT(clocksource_tsc.list),
+ };
+ 
+ void mark_tsc_unstable(char *reason)
+@@ -1098,13 +1100,9 @@ void mark_tsc_unstable(char *reason)
+ 		clear_sched_clock_stable();
+ 	disable_sched_clock_irqtime();
+ 	pr_info("Marking TSC unstable due to %s\n", reason);
+-	/* Change only the rating, when not registered */
+-	if (clocksource_tsc.mult) {
+-		clocksource_mark_unstable(&clocksource_tsc);
+-	} else {
+-		clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
+-		clocksource_tsc.rating = 0;
+-	}
++
++	clocksource_mark_unstable(&clocksource_tsc_early);
++	clocksource_mark_unstable(&clocksource_tsc);
+ }
+ 
+ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
+@@ -1205,7 +1203,7 @@ static void tsc_refine_calibration_work(struct work_struct *work)
+ 
+ 	/* Don't bother refining TSC on unstable systems */
+ 	if (tsc_unstable)
+-		return;
++		goto unreg;
+ 
+ 	/*
+ 	 * Since the work is started early in boot, we may be
+@@ -1258,11 +1256,12 @@ static void tsc_refine_calibration_work(struct work_struct *work)
+ 
+ out:
+ 	if (tsc_unstable)
+-		return;
++		goto unreg;
+ 
+ 	if (boot_cpu_has(X86_FEATURE_ART))
+ 		art_related_clocksource = &clocksource_tsc;
+ 	clocksource_register_khz(&clocksource_tsc, tsc_khz);
++unreg:
+ 	clocksource_unregister(&clocksource_tsc_early);
+ }
+ 
+@@ -1272,8 +1271,8 @@ static int __init init_tsc_clocksource(void)
+ 	if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
+ 		return 0;
+ 
+-	if (check_tsc_unstable())
+-		return 0;
++	if (tsc_unstable)
++		goto unreg;
+ 
+ 	if (tsc_clocksource_reliable)
+ 		clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+@@ -1289,6 +1288,7 @@ static int __init init_tsc_clocksource(void)
+ 		if (boot_cpu_has(X86_FEATURE_ART))
+ 			art_related_clocksource = &clocksource_tsc;
+ 		clocksource_register_khz(&clocksource_tsc, tsc_khz);
++unreg:
+ 		clocksource_unregister(&clocksource_tsc_early);
+ 		return 0;
+ 	}
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 391dda8d43b7..7cf470a3755f 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1455,23 +1455,6 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
+ 	local_irq_restore(flags);
+ }
+ 
+-static void start_sw_period(struct kvm_lapic *apic)
+-{
+-	if (!apic->lapic_timer.period)
+-		return;
+-
+-	if (apic_lvtt_oneshot(apic) &&
+-	    ktime_after(ktime_get(),
+-			apic->lapic_timer.target_expiration)) {
+-		apic_timer_expired(apic);
+-		return;
+-	}
+-
+-	hrtimer_start(&apic->lapic_timer.timer,
+-		apic->lapic_timer.target_expiration,
+-		HRTIMER_MODE_ABS_PINNED);
+-}
+-
+ static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
+ {
+ 	ktime_t now, remaining;
+@@ -1538,6 +1521,26 @@ static void advance_periodic_target_expiration(struct kvm_lapic *apic)
+ 				apic->lapic_timer.period);
+ }
+ 
++static void start_sw_period(struct kvm_lapic *apic)
++{
++	if (!apic->lapic_timer.period)
++		return;
++
++	if (ktime_after(ktime_get(),
++			apic->lapic_timer.target_expiration)) {
++		apic_timer_expired(apic);
++
++		if (apic_lvtt_oneshot(apic))
++			return;
++
++		advance_periodic_target_expiration(apic);
++	}
++
++	hrtimer_start(&apic->lapic_timer.timer,
++		apic->lapic_timer.target_expiration,
++		HRTIMER_MODE_ABS_PINNED);
++}
++
+ bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
+ {
+ 	if (!lapic_in_kernel(vcpu))
+diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
+index e1eee7a60fad..f1cc4f9d31cd 100644
+--- a/drivers/acpi/button.c
++++ b/drivers/acpi/button.c
+@@ -635,4 +635,26 @@ module_param_call(lid_init_state,
+ 		  NULL, 0644);
+ MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
+ 
+-module_acpi_driver(acpi_button_driver);
++static int acpi_button_register_driver(struct acpi_driver *driver)
++{
++	/*
++	 * Modules such as nouveau.ko and i915.ko have a link time dependency
++	 * on acpi_lid_open(), and would therefore not be loadable on ACPI
++	 * capable kernels booted in non-ACPI mode if the return value of
++	 * acpi_bus_register_driver() is returned from here with ACPI disabled
++	 * when this driver is built as a module.
++	 */
++	if (acpi_disabled)
++		return 0;
++
++	return acpi_bus_register_driver(driver);
++}
++
++static void acpi_button_unregister_driver(struct acpi_driver *driver)
++{
++	if (!acpi_disabled)
++		acpi_bus_unregister_driver(driver);
++}
++
++module_driver(acpi_button_driver, acpi_button_register_driver,
++	       acpi_button_unregister_driver);
+diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
+index de5e7dee7ad6..2e6c61d9b8ea 100644
+--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
++++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
+@@ -55,7 +55,9 @@ static int dumb_vga_get_modes(struct drm_connector *connector)
+ 	}
+ 
+ 	drm_mode_connector_update_edid_property(connector, edid);
+-	return drm_add_edid_modes(connector, edid);
++	ret = drm_add_edid_modes(connector, edid);
++	kfree(edid);
++	return ret;
+ 
+ fallback:
+ 	/*
+diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
+index ce1e3b9e14c9..75f379c67a31 100644
+--- a/drivers/gpu/drm/vc4/vc4_crtc.c
++++ b/drivers/gpu/drm/vc4/vc4_crtc.c
+@@ -735,6 +735,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
+ struct vc4_async_flip_state {
+ 	struct drm_crtc *crtc;
+ 	struct drm_framebuffer *fb;
++	struct drm_framebuffer *old_fb;
+ 	struct drm_pending_vblank_event *event;
+ 
+ 	struct vc4_seqno_cb cb;
+@@ -764,6 +765,23 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
+ 
+ 	drm_crtc_vblank_put(crtc);
+ 	drm_framebuffer_put(flip_state->fb);
++
++	/* Decrement the BO usecnt in order to keep the inc/dec calls balanced
++	 * when the planes are updated through the async update path.
++	 * FIXME: we should move to generic async-page-flip when it's
++	 * available, so that we can get rid of this hand-made cleanup_fb()
++	 * logic.
++	 */
++	if (flip_state->old_fb) {
++		struct drm_gem_cma_object *cma_bo;
++		struct vc4_bo *bo;
++
++		cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
++		bo = to_vc4_bo(&cma_bo->base);
++		vc4_bo_dec_usecnt(bo);
++		drm_framebuffer_put(flip_state->old_fb);
++	}
++
+ 	kfree(flip_state);
+ 
+ 	up(&vc4->async_modeset);
+@@ -788,9 +806,22 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
+ 	struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
+ 	struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
+ 
++	/* Increment the BO usecnt here, so that we never end up with an
++	 * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
++	 * plane is later updated through the non-async path.
++	 * FIXME: we should move to generic async-page-flip when it's
++	 * available, so that we can get rid of this hand-made prepare_fb()
++	 * logic.
++	 */
++	ret = vc4_bo_inc_usecnt(bo);
++	if (ret)
++		return ret;
++
+ 	flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
+-	if (!flip_state)
++	if (!flip_state) {
++		vc4_bo_dec_usecnt(bo);
+ 		return -ENOMEM;
++	}
+ 
+ 	drm_framebuffer_get(fb);
+ 	flip_state->fb = fb;
+@@ -801,10 +832,23 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
+ 	ret = down_interruptible(&vc4->async_modeset);
+ 	if (ret) {
+ 		drm_framebuffer_put(fb);
++		vc4_bo_dec_usecnt(bo);
+ 		kfree(flip_state);
+ 		return ret;
+ 	}
+ 
++	/* Save the current FB before it's replaced by the new one in
++	 * drm_atomic_set_fb_for_plane(). We'll need the old FB in
++	 * vc4_async_page_flip_complete() to decrement the BO usecnt and keep
++	 * it consistent.
++	 * FIXME: we should move to generic async-page-flip when it's
++	 * available, so that we can get rid of this hand-made cleanup_fb()
++	 * logic.
++	 */
++	flip_state->old_fb = plane->state->fb;
++	if (flip_state->old_fb)
++		drm_framebuffer_get(flip_state->old_fb);
++
+ 	WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ 
+ 	/* Immediately update the plane's legacy fb pointer, so that later
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 3c824fd7cbf3..af9fd49c22e6 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -2598,6 +2598,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
+ 		vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
+ 					     out_fence, NULL);
+ 
++	vmw_dmabuf_unreference(&ctx->buf);
+ 	vmw_resource_unreserve(res, false, NULL, 0);
+ 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+ }
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 5c21ae237f82..006e9c0e1ea3 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -678,7 +678,7 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
+ 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ 		return -EFAULT;
+ 
+-	if (!rdma_addr_size_in6(&cmd.src_addr) ||
++	if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
+ 	    !rdma_addr_size_in6(&cmd.dst_addr))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
+index 6f2b26126c64..2be2e1ac1b5f 100644
+--- a/drivers/infiniband/hw/cxgb4/cq.c
++++ b/drivers/infiniband/hw/cxgb4/cq.c
+@@ -315,7 +315,7 @@ static void advance_oldest_read(struct t4_wq *wq)
+  * Deal with out-of-order and/or completions that complete
+  * prior unsignalled WRs.
+  */
+-void c4iw_flush_hw_cq(struct c4iw_cq *chp)
++void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
+ {
+ 	struct t4_cqe *hw_cqe, *swcqe, read_cqe;
+ 	struct c4iw_qp *qhp;
+@@ -339,6 +339,13 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
+ 		if (qhp == NULL)
+ 			goto next_cqe;
+ 
++		if (flush_qhp != qhp) {
++			spin_lock(&qhp->lock);
++
++			if (qhp->wq.flushed == 1)
++				goto next_cqe;
++		}
++
+ 		if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
+ 			goto next_cqe;
+ 
+@@ -390,6 +397,8 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
+ next_cqe:
+ 		t4_hwcq_consume(&chp->cq);
+ 		ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
++		if (qhp && flush_qhp != qhp)
++			spin_unlock(&qhp->lock);
+ 	}
+ }
+ 
+diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
+index 7a9d0de89d6a..01d94cd88916 100644
+--- a/drivers/infiniband/hw/cxgb4/device.c
++++ b/drivers/infiniband/hw/cxgb4/device.c
+@@ -875,6 +875,11 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
+ 
+ 	rdev->status_page->db_off = 0;
+ 
++	init_completion(&rdev->rqt_compl);
++	init_completion(&rdev->pbl_compl);
++	kref_init(&rdev->rqt_kref);
++	kref_init(&rdev->pbl_kref);
++
+ 	return 0;
+ err_free_status_page_and_wr_log:
+ 	if (c4iw_wr_log && rdev->wr_log)
+@@ -893,13 +898,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
+ 
+ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
+ {
+-	destroy_workqueue(rdev->free_workq);
+ 	kfree(rdev->wr_log);
+ 	c4iw_release_dev_ucontext(rdev, &rdev->uctx);
+ 	free_page((unsigned long)rdev->status_page);
+ 	c4iw_pblpool_destroy(rdev);
+ 	c4iw_rqtpool_destroy(rdev);
++	wait_for_completion(&rdev->pbl_compl);
++	wait_for_completion(&rdev->rqt_compl);
+ 	c4iw_ocqp_pool_destroy(rdev);
++	destroy_workqueue(rdev->free_workq);
+ 	c4iw_destroy_resource(&rdev->resource);
+ }
+ 
+diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+index cc929002c05e..831027717121 100644
+--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
++++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+@@ -185,6 +185,10 @@ struct c4iw_rdev {
+ 	struct wr_log_entry *wr_log;
+ 	int wr_log_size;
+ 	struct workqueue_struct *free_workq;
++	struct completion rqt_compl;
++	struct completion pbl_compl;
++	struct kref rqt_kref;
++	struct kref pbl_kref;
+ };
+ 
+ static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
+@@ -1049,7 +1053,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
+ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
+ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
+ void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
+-void c4iw_flush_hw_cq(struct c4iw_cq *chp);
++void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp);
+ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
+ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
+ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
+index de77b6027d69..ae167b686608 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -1343,12 +1343,12 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
+ 	qhp->wq.flushed = 1;
+ 	t4_set_wq_in_error(&qhp->wq);
+ 
+-	c4iw_flush_hw_cq(rchp);
++	c4iw_flush_hw_cq(rchp, qhp);
+ 	c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
+ 	rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
+ 
+ 	if (schp != rchp)
+-		c4iw_flush_hw_cq(schp);
++		c4iw_flush_hw_cq(schp, qhp);
+ 	sq_flushed = c4iw_flush_sq(qhp);
+ 
+ 	spin_unlock(&qhp->lock);
+diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
+index 3cf25997ed2b..0ef25ae05e6f 100644
+--- a/drivers/infiniband/hw/cxgb4/resource.c
++++ b/drivers/infiniband/hw/cxgb4/resource.c
+@@ -260,12 +260,22 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
+ 		rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
+ 		if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
+ 			rdev->stats.pbl.max = rdev->stats.pbl.cur;
++		kref_get(&rdev->pbl_kref);
+ 	} else
+ 		rdev->stats.pbl.fail++;
+ 	mutex_unlock(&rdev->stats.lock);
+ 	return (u32)addr;
+ }
+ 
++static void destroy_pblpool(struct kref *kref)
++{
++	struct c4iw_rdev *rdev;
++
++	rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
++	gen_pool_destroy(rdev->pbl_pool);
++	complete(&rdev->pbl_compl);
++}
++
+ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+ {
+ 	pr_debug("addr 0x%x size %d\n", addr, size);
+@@ -273,6 +283,7 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+ 	rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
+ 	mutex_unlock(&rdev->stats.lock);
+ 	gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
++	kref_put(&rdev->pbl_kref, destroy_pblpool);
+ }
+ 
+ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
+@@ -310,7 +321,7 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
+ 
+ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
+ {
+-	gen_pool_destroy(rdev->pbl_pool);
++	kref_put(&rdev->pbl_kref, destroy_pblpool);
+ }
+ 
+ /*
+@@ -331,12 +342,22 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
+ 		rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
+ 		if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
+ 			rdev->stats.rqt.max = rdev->stats.rqt.cur;
++		kref_get(&rdev->rqt_kref);
+ 	} else
+ 		rdev->stats.rqt.fail++;
+ 	mutex_unlock(&rdev->stats.lock);
+ 	return (u32)addr;
+ }
+ 
++static void destroy_rqtpool(struct kref *kref)
++{
++	struct c4iw_rdev *rdev;
++
++	rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
++	gen_pool_destroy(rdev->rqt_pool);
++	complete(&rdev->rqt_compl);
++}
++
+ void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+ {
+ 	pr_debug("addr 0x%x size %d\n", addr, size << 6);
+@@ -344,6 +365,7 @@ void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+ 	rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
+ 	mutex_unlock(&rdev->stats.lock);
+ 	gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
++	kref_put(&rdev->rqt_kref, destroy_rqtpool);
+ }
+ 
+ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
+@@ -380,7 +402,7 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
+ 
+ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
+ {
+-	gen_pool_destroy(rdev->rqt_pool);
++	kref_put(&rdev->rqt_kref, destroy_rqtpool);
+ }
+ 
+ /*
+diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
+index addc68e83606..78f7c4e743dd 100644
+--- a/drivers/infiniband/hw/hfi1/driver.c
++++ b/drivers/infiniband/hw/hfi1/driver.c
+@@ -432,31 +432,43 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+ 			       bool do_cnp)
+ {
+ 	struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
++	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ 	struct ib_other_headers *ohdr = pkt->ohdr;
+ 	struct ib_grh *grh = pkt->grh;
+ 	u32 rqpn = 0, bth1;
+-	u16 pkey, rlid, dlid = ib_get_dlid(pkt->hdr);
++	u16 pkey;
++	u32 rlid, slid, dlid = 0;
+ 	u8 hdr_type, sc, svc_type;
+ 	bool is_mcast = false;
+ 
++	/* can be called from prescan */
+ 	if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
+ 		is_mcast = hfi1_is_16B_mcast(dlid);
+ 		pkey = hfi1_16B_get_pkey(pkt->hdr);
+ 		sc = hfi1_16B_get_sc(pkt->hdr);
++		dlid = hfi1_16B_get_dlid(pkt->hdr);
++		slid = hfi1_16B_get_slid(pkt->hdr);
+ 		hdr_type = HFI1_PKT_TYPE_16B;
+ 	} else {
+ 		is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+ 			   (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
+ 		pkey = ib_bth_get_pkey(ohdr);
+ 		sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
++		dlid = ib_get_dlid(pkt->hdr);
++		slid = ib_get_slid(pkt->hdr);
+ 		hdr_type = HFI1_PKT_TYPE_9B;
+ 	}
+ 
+ 	switch (qp->ibqp.qp_type) {
++	case IB_QPT_UD:
++		dlid = ppd->lid;
++		rlid = slid;
++		rqpn = ib_get_sqpn(pkt->ohdr);
++		svc_type = IB_CC_SVCTYPE_UD;
++		break;
+ 	case IB_QPT_SMI:
+ 	case IB_QPT_GSI:
+-	case IB_QPT_UD:
+-		rlid = ib_get_slid(pkt->hdr);
++		rlid = slid;
+ 		rqpn = ib_get_sqpn(pkt->ohdr);
+ 		svc_type = IB_CC_SVCTYPE_UD;
+ 		break;
+@@ -481,7 +493,6 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+ 					      dlid, rlid, sc, grh);
+ 
+ 	if (!is_mcast && (bth1 & IB_BECN_SMASK)) {
+-		struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ 		u32 lqpn = bth1 & RVT_QPN_MASK;
+ 		u8 sl = ibp->sc_to_sl[sc];
+ 
+diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
+index 90bc8c76d2ca..43050006f301 100644
+--- a/drivers/infiniband/hw/hfi1/hfi.h
++++ b/drivers/infiniband/hw/hfi1/hfi.h
+@@ -1538,13 +1538,13 @@ void set_link_ipg(struct hfi1_pportdata *ppd);
+ void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
+ 		  u32 rqpn, u8 svc_type);
+ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
+-		u32 pkey, u32 slid, u32 dlid, u8 sc5,
++		u16 pkey, u32 slid, u32 dlid, u8 sc5,
+ 		const struct ib_grh *old_grh);
+ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
+-		    u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
++		    u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
+ 		    u8 sc5, const struct ib_grh *old_grh);
+ typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp,
+-				u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
++				u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
+ 				u8 sc5, const struct ib_grh *old_grh);
+ 
+ #define PKEY_CHECK_INVALID -1
+@@ -2438,7 +2438,7 @@ static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr,
+ 		((slid >> OPA_16B_SLID_SHIFT) << OPA_16B_SLID_HIGH_SHIFT);
+ 	lrh2 = (lrh2 & ~OPA_16B_DLID_MASK) |
+ 		((dlid >> OPA_16B_DLID_SHIFT) << OPA_16B_DLID_HIGH_SHIFT);
+-	lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | (pkey << OPA_16B_PKEY_SHIFT);
++	lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | ((u32)pkey << OPA_16B_PKEY_SHIFT);
+ 	lrh2 = (lrh2 & ~OPA_16B_L4_MASK) | l4;
+ 
+ 	hdr->lrh[0] = lrh0;
+diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
+index 33eba2356742..b27fe75c7102 100644
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -1265,6 +1265,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
+ 		return ERR_PTR(-ENOMEM);
+ 	dd->num_pports = nports;
+ 	dd->pport = (struct hfi1_pportdata *)(dd + 1);
++	dd->pcidev = pdev;
++	pci_set_drvdata(pdev, dd);
+ 
+ 	INIT_LIST_HEAD(&dd->list);
+ 	idr_preload(GFP_KERNEL);
+diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
+index 83d66e862207..c1c982908b4b 100644
+--- a/drivers/infiniband/hw/hfi1/pcie.c
++++ b/drivers/infiniband/hw/hfi1/pcie.c
+@@ -163,9 +163,6 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
+ 	resource_size_t addr;
+ 	int ret = 0;
+ 
+-	dd->pcidev = pdev;
+-	pci_set_drvdata(pdev, dd);
+-
+ 	addr = pci_resource_start(pdev, 0);
+ 	len = pci_resource_len(pdev, 0);
+ 
+diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
+index 3daa94bdae3a..c0071ca4147a 100644
+--- a/drivers/infiniband/hw/hfi1/ruc.c
++++ b/drivers/infiniband/hw/hfi1/ruc.c
+@@ -733,6 +733,20 @@ static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
+ 	ohdr->bth[2] = cpu_to_be32(bth2);
+ }
+ 
++/**
++ * hfi1_make_ruc_header_16B - build a 16B header
++ * @qp: the queue pair
++ * @ohdr: a pointer to the destination header memory
++ * @bth0: bth0 passed in from the RC/UC builder
++ * @bth2: bth2 passed in from the RC/UC builder
++ * @middle: non zero implies indicates ahg "could" be used
++ * @ps: the current packet state
++ *
++ * This routine may disarm ahg under these situations:
++ * - packet needs a GRH
++ * - BECN needed
++ * - migration state not IB_MIG_MIGRATED
++ */
+ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
+ 					    struct ib_other_headers *ohdr,
+ 					    u32 bth0, u32 bth2, int middle,
+@@ -777,6 +791,12 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
+ 	else
+ 		middle = 0;
+ 
++	if (qp->s_flags & RVT_S_ECN) {
++		qp->s_flags &= ~RVT_S_ECN;
++		/* we recently received a FECN, so return a BECN */
++		becn = true;
++		middle = 0;
++	}
+ 	if (middle)
+ 		build_ahg(qp, bth2);
+ 	else
+@@ -784,11 +804,6 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
+ 
+ 	bth0 |= pkey;
+ 	bth0 |= extra_bytes << 20;
+-	if (qp->s_flags & RVT_S_ECN) {
+-		qp->s_flags &= ~RVT_S_ECN;
+-		/* we recently received a FECN, so return a BECN */
+-		becn = true;
+-	}
+ 	hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
+ 
+ 	if (!ppd->lid)
+@@ -806,6 +821,20 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
+ 			  pkey, becn, 0, l4, priv->s_sc);
+ }
+ 
++/**
++ * hfi1_make_ruc_header_9B - build a 9B header
++ * @qp: the queue pair
++ * @ohdr: a pointer to the destination header memory
++ * @bth0: bth0 passed in from the RC/UC builder
++ * @bth2: bth2 passed in from the RC/UC builder
++ * @middle: non zero implies indicates ahg "could" be used
++ * @ps: the current packet state
++ *
++ * This routine may disarm ahg under these situations:
++ * - packet needs a GRH
++ * - BECN needed
++ * - migration state not IB_MIG_MIGRATED
++ */
+ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
+ 					   struct ib_other_headers *ohdr,
+ 					   u32 bth0, u32 bth2, int middle,
+@@ -839,6 +868,12 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
+ 	else
+ 		middle = 0;
+ 
++	if (qp->s_flags & RVT_S_ECN) {
++		qp->s_flags &= ~RVT_S_ECN;
++		/* we recently received a FECN, so return a BECN */
++		bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
++		middle = 0;
++	}
+ 	if (middle)
+ 		build_ahg(qp, bth2);
+ 	else
+@@ -846,11 +881,6 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
+ 
+ 	bth0 |= pkey;
+ 	bth0 |= extra_bytes << 20;
+-	if (qp->s_flags & RVT_S_ECN) {
+-		qp->s_flags &= ~RVT_S_ECN;
+-		/* we recently received a FECN, so return a BECN */
+-		bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
+-	}
+ 	hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
+ 	hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
+ 			 lrh0,
+diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
+index bcf3b0bebac8..69c17a5ef038 100644
+--- a/drivers/infiniband/hw/hfi1/ud.c
++++ b/drivers/infiniband/hw/hfi1/ud.c
+@@ -628,7 +628,7 @@ int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
+ }
+ 
+ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
+-		    u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
++		    u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
+ 		    u8 sc5, const struct ib_grh *old_grh)
+ {
+ 	u64 pbc, pbc_flags = 0;
+@@ -687,7 +687,7 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
+ }
+ 
+ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
+-		u32 pkey, u32 slid, u32 dlid, u8 sc5,
++		u16 pkey, u32 slid, u32 dlid, u8 sc5,
+ 		const struct ib_grh *old_grh)
+ {
+ 	u64 pbc, pbc_flags = 0;
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index f045491f2c14..93af6b704d06 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -673,7 +673,8 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
+ 					  MLX4_IB_RX_HASH_SRC_PORT_TCP	|
+ 					  MLX4_IB_RX_HASH_DST_PORT_TCP	|
+ 					  MLX4_IB_RX_HASH_SRC_PORT_UDP	|
+-					  MLX4_IB_RX_HASH_DST_PORT_UDP)) {
++					  MLX4_IB_RX_HASH_DST_PORT_UDP  |
++					  MLX4_IB_RX_HASH_INNER)) {
+ 		pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
+ 			 ucmd->rx_hash_fields_mask);
+ 		return (-EOPNOTSUPP);
+diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
+index bce263b92821..fb4d77be019b 100644
+--- a/drivers/infiniband/hw/mlx5/Kconfig
++++ b/drivers/infiniband/hw/mlx5/Kconfig
+@@ -1,6 +1,7 @@
+ config MLX5_INFINIBAND
+ 	tristate "Mellanox Connect-IB HCA support"
+ 	depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
++	depends on INFINIBAND_USER_ACCESS || INFINIBAND_USER_ACCESS=n
+ 	---help---
+ 	  This driver provides low-level InfiniBand support for
+ 	  Mellanox Connect-IB PCI Express host channel adapters (HCAs).
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 6857c61bdee1..deccd91c2361 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -836,25 +836,28 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
+ 		       int *order)
+ {
+ 	struct mlx5_ib_dev *dev = to_mdev(pd->device);
++	struct ib_umem *u;
+ 	int err;
+ 
+-	*umem = ib_umem_get(pd->uobject->context, start, length,
+-			    access_flags, 0);
+-	err = PTR_ERR_OR_ZERO(*umem);
++	*umem = NULL;
++
++	u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
++	err = PTR_ERR_OR_ZERO(u);
+ 	if (err) {
+-		*umem = NULL;
+-		mlx5_ib_err(dev, "umem get failed (%d)\n", err);
++		mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
+ 		return err;
+ 	}
+ 
+-	mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
++	mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
+ 			   page_shift, ncont, order);
+ 	if (!*npages) {
+ 		mlx5_ib_warn(dev, "avoid zero region\n");
+-		ib_umem_release(*umem);
++		ib_umem_release(u);
+ 		return -EINVAL;
+ 	}
+ 
++	*umem = u;
++
+ 	mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
+ 		    *npages, *ncont, *order, *page_shift);
+ 
+@@ -1343,13 +1346,12 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+ 	int access_flags = flags & IB_MR_REREG_ACCESS ?
+ 			    new_access_flags :
+ 			    mr->access_flags;
+-	u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
+-	u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
+ 	int page_shift = 0;
+ 	int upd_flags = 0;
+ 	int npages = 0;
+ 	int ncont = 0;
+ 	int order = 0;
++	u64 addr, len;
+ 	int err;
+ 
+ 	mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
+@@ -1357,6 +1359,17 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+ 
+ 	atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
+ 
++	if (!mr->umem)
++		return -EINVAL;
++
++	if (flags & IB_MR_REREG_TRANS) {
++		addr = virt_addr;
++		len = length;
++	} else {
++		addr = mr->umem->address;
++		len = mr->umem->length;
++	}
++
+ 	if (flags != IB_MR_REREG_PD) {
+ 		/*
+ 		 * Replace umem. This needs to be done whether or not UMR is
+@@ -1364,6 +1377,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+ 		 */
+ 		flags |= IB_MR_REREG_TRANS;
+ 		ib_umem_release(mr->umem);
++		mr->umem = NULL;
+ 		err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
+ 				  &npages, &page_shift, &ncont, &order);
+ 		if (err < 0) {
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index 6c424afea25f..5f0144fee2ac 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -256,7 +256,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
+ 	} else {
+ 		if (ucmd) {
+ 			qp->rq.wqe_cnt = ucmd->rq_wqe_count;
++			if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
++				return -EINVAL;
+ 			qp->rq.wqe_shift = ucmd->rq_wqe_shift;
++			if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
++				return -EINVAL;
+ 			qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
+ 			qp->rq.max_post = qp->rq.wqe_cnt;
+ 		} else {
+@@ -2420,18 +2424,18 @@ enum {
+ 
+ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
+ {
+-	if (rate == IB_RATE_PORT_CURRENT) {
++	if (rate == IB_RATE_PORT_CURRENT)
+ 		return 0;
+-	} else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
++
++	if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS)
+ 		return -EINVAL;
+-	} else {
+-		while (rate != IB_RATE_2_5_GBPS &&
+-		       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
+-			 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
+-			--rate;
+-	}
+ 
+-	return rate + MLX5_STAT_RATE_OFFSET;
++	while (rate != IB_RATE_PORT_CURRENT &&
++	       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
++		 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
++		--rate;
++
++	return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
+ }
+ 
+ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
+diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
+index 766bf2660116..5f04b2d94635 100644
+--- a/drivers/input/input-leds.c
++++ b/drivers/input/input-leds.c
+@@ -88,6 +88,7 @@ static int input_leds_connect(struct input_handler *handler,
+ 			      const struct input_device_id *id)
+ {
+ 	struct input_leds *leds;
++	struct input_led *led;
+ 	unsigned int num_leds;
+ 	unsigned int led_code;
+ 	int led_no;
+@@ -119,14 +120,13 @@ static int input_leds_connect(struct input_handler *handler,
+ 
+ 	led_no = 0;
+ 	for_each_set_bit(led_code, dev->ledbit, LED_CNT) {
+-		struct input_led *led = &leds->leds[led_no];
++		if (!input_led_info[led_code].name)
++			continue;
+ 
++		led = &leds->leds[led_no];
+ 		led->handle = &leds->handle;
+ 		led->code = led_code;
+ 
+-		if (!input_led_info[led_code].name)
+-			continue;
+-
+ 		led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
+ 					   dev_name(&dev->dev),
+ 					   input_led_info[led_code].name);
+diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
+index 7659bc48f1db..429b694405c7 100644
+--- a/drivers/input/touchscreen/atmel_mxt_ts.c
++++ b/drivers/input/touchscreen/atmel_mxt_ts.c
+@@ -3030,6 +3030,15 @@ static const struct dmi_system_id mxt_dmi_table[] = {
+ 		},
+ 		.driver_data = samus_platform_data,
+ 	},
++	{
++		/* Samsung Chromebook Pro */
++		.ident = "Samsung Chromebook Pro",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Google"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"),
++		},
++		.driver_data = samus_platform_data,
++	},
+ 	{
+ 		/* Other Google Chromebooks */
+ 		.ident = "Chromebook",
+diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
+index f31265937439..7f0c0be322e0 100644
+--- a/drivers/irqchip/qcom-irq-combiner.c
++++ b/drivers/irqchip/qcom-irq-combiner.c
+@@ -1,4 +1,4 @@
+-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
++/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 and
+@@ -68,7 +68,7 @@ static void combiner_handle_irq(struct irq_desc *desc)
+ 
+ 		bit = readl_relaxed(combiner->regs[reg].addr);
+ 		status = bit & combiner->regs[reg].enabled;
+-		if (!status)
++		if (bit && !status)
+ 			pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n",
+ 					    smp_processor_id(), bit,
+ 					    combiner->regs[reg].enabled,
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index c853e7410f5a..51c68fc416fa 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1098,6 +1098,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
++	{QMI_FIXED_INTF(0x05c6, 0x90b2, 3)},    /* ublox R410M */
+ 	{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+ 	{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)},	/* YUGA CLM920-NC5 */
+diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+index 1404729441a2..71f9eee8fbe5 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
++++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+@@ -158,16 +158,6 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
+ 
+ static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
+ {
+-	struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
+-
+-	/* override ant_num / ant_path */
+-	if (mod_params->ant_sel) {
+-		rtlpriv->btcoexist.btc_info.ant_num =
+-			(mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
+-
+-		rtlpriv->btcoexist.btc_info.single_ant_path =
+-			(mod_params->ant_sel == 1 ? 0 : 1);
+-	}
+ 	return rtlpriv->btcoexist.btc_info.single_ant_path;
+ }
+ 
+@@ -178,7 +168,6 @@ static u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
+ 
+ static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+ {
+-	struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
+ 	u8 num;
+ 
+ 	if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
+@@ -186,10 +175,6 @@ static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+ 	else
+ 		num = 1;
+ 
+-	/* override ant_num / ant_path */
+-	if (mod_params->ant_sel)
+-		num = (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1) + 1;
+-
+ 	return num;
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+index e7bbbc95cdb1..b4f3f91b590e 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+@@ -848,6 +848,9 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
+ 		return false;
+ 	}
+ 
++	if (rtlpriv->cfg->ops->get_btc_status())
++		rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv);
++
+ 	bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL);
+ 	rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3));
+ 
+@@ -2696,21 +2699,21 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ 		rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
+ 		rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
+ 		rtlpriv->btcoexist.btc_info.single_ant_path =
+-			 (value & 0x40);	/*0xc3[6]*/
++			 (value & 0x40 ? ANT_AUX : ANT_MAIN);	/*0xc3[6]*/
+ 	} else {
+ 		rtlpriv->btcoexist.btc_info.btcoexist = 0;
+ 		rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
+ 		rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
+-		rtlpriv->btcoexist.btc_info.single_ant_path = 0;
++		rtlpriv->btcoexist.btc_info.single_ant_path = ANT_MAIN;
+ 	}
+ 
+ 	/* override ant_num / ant_path */
+ 	if (mod_params->ant_sel) {
+ 		rtlpriv->btcoexist.btc_info.ant_num =
+-			(mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
++			(mod_params->ant_sel == 1 ? ANT_X1 : ANT_X2);
+ 
+ 		rtlpriv->btcoexist.btc_info.single_ant_path =
+-			(mod_params->ant_sel == 1 ? 0 : 1);
++			(mod_params->ant_sel == 1 ? ANT_AUX : ANT_MAIN);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+index 46dcb7fef195..36683364dbc7 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
++++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
+@@ -2759,6 +2759,11 @@ enum bt_ant_num {
+ 	ANT_X1 = 1,
+ };
+ 
++enum bt_ant_path {
++	ANT_MAIN = 0,
++	ANT_AUX = 1,
++};
++
+ enum bt_co_type {
+ 	BT_2WIRE = 0,
+ 	BT_ISSC_3WIRE = 1,
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 51ebc5a6053f..51a1b49760ea 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -154,7 +154,7 @@ config DELL_LAPTOP
+ 	depends on ACPI_VIDEO || ACPI_VIDEO = n
+ 	depends on RFKILL || RFKILL = n
+ 	depends on SERIO_I8042
+-	select DELL_SMBIOS
++	depends on DELL_SMBIOS
+ 	select POWER_SUPPLY
+ 	select LEDS_CLASS
+ 	select NEW_LEDS
+diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c
+index d4aeac3477f5..f086469ea740 100644
+--- a/drivers/platform/x86/asus-wireless.c
++++ b/drivers/platform/x86/asus-wireless.c
+@@ -178,8 +178,10 @@ static int asus_wireless_remove(struct acpi_device *adev)
+ {
+ 	struct asus_wireless_data *data = acpi_driver_data(adev);
+ 
+-	if (data->wq)
++	if (data->wq) {
++		devm_led_classdev_unregister(&adev->dev, &data->led);
+ 		destroy_workqueue(data->wq);
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
+index 07c814c42648..60429011292a 100644
+--- a/drivers/target/target_core_iblock.c
++++ b/drivers/target/target_core_iblock.c
+@@ -427,8 +427,8 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+ 	struct scatterlist *sg = &cmd->t_data_sg[0];
+-	unsigned char *buf, zero = 0x00, *p = &zero;
+-	int rc, ret;
++	unsigned char *buf, *not_zero;
++	int ret;
+ 
+ 	buf = kmap(sg_page(sg)) + sg->offset;
+ 	if (!buf)
+@@ -437,10 +437,10 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
+ 	 * Fall back to block_execute_write_same() slow-path if
+ 	 * incoming WRITE_SAME payload does not contain zeros.
+ 	 */
+-	rc = memcmp(buf, p, cmd->data_length);
++	not_zero = memchr_inv(buf, 0x00, cmd->data_length);
+ 	kunmap(sg_page(sg));
+ 
+-	if (rc)
++	if (not_zero)
+ 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ 
+ 	ret = blkdev_issue_zeroout(bdev,
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index c821b4b9647e..7b5cb28ffb35 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -191,7 +191,9 @@ static const unsigned short full_speed_maxpacket_maxes[4] = {
+ static const unsigned short high_speed_maxpacket_maxes[4] = {
+ 	[USB_ENDPOINT_XFER_CONTROL] = 64,
+ 	[USB_ENDPOINT_XFER_ISOC] = 1024,
+-	[USB_ENDPOINT_XFER_BULK] = 512,
++
++	/* Bulk should be 512, but some devices use 1024: we will warn below */
++	[USB_ENDPOINT_XFER_BULK] = 1024,
+ 	[USB_ENDPOINT_XFER_INT] = 1024,
+ };
+ static const unsigned short super_speed_maxpacket_maxes[4] = {
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 100454c514d5..726ae915c03a 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1424,7 +1424,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
+ 					dwc->lock);
+ 
+ 			if (!r->trb)
+-				goto out1;
++				goto out0;
+ 
+ 			if (r->num_pending_sgs) {
+ 				struct dwc3_trb *trb;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 5d37700ae4b0..b60a02c50b89 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3548,6 +3548,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
+ 		del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
+ 	}
+ 	xhci_debugfs_remove_slot(xhci, udev->slot_id);
++	virt_dev->udev = NULL;
+ 	ret = xhci_disable_slot(xhci, udev->slot_id);
+ 	if (ret)
+ 		xhci_free_virt_device(xhci, udev->slot_id);
+diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
+index 293e5b8da565..21b87a077c69 100644
+--- a/drivers/usb/musb/musb_gadget.c
++++ b/drivers/usb/musb/musb_gadget.c
+@@ -417,7 +417,6 @@ void musb_g_tx(struct musb *musb, u8 epnum)
+ 	req = next_request(musb_ep);
+ 	request = &req->request;
+ 
+-	trace_musb_req_tx(req);
+ 	csr = musb_readw(epio, MUSB_TXCSR);
+ 	musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
+ 
+@@ -456,6 +455,8 @@ void musb_g_tx(struct musb *musb, u8 epnum)
+ 		u8	is_dma = 0;
+ 		bool	short_packet = false;
+ 
++		trace_musb_req_tx(req);
++
+ 		if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
+ 			is_dma = 1;
+ 			csr |= MUSB_TXCSR_P_WZC_BITS;
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 45ed32c2cba9..0ee0c6d7f194 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -996,7 +996,9 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
+ 			/* set tx_reinit and schedule the next qh */
+ 			ep->tx_reinit = 1;
+ 		}
+-		musb_start_urb(musb, is_in, next_qh);
++
++		if (next_qh)
++			musb_start_urb(musb, is_in, next_qh);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 2d8d9150da0c..10c7ab427a52 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -233,6 +233,8 @@ static void option_instat_callback(struct urb *urb);
+ /* These Quectel products use Qualcomm's vendor ID */
+ #define QUECTEL_PRODUCT_UC20			0x9003
+ #define QUECTEL_PRODUCT_UC15			0x9090
++/* These u-blox products use Qualcomm's vendor ID */
++#define UBLOX_PRODUCT_R410M			0x90b2
+ /* These Yuga products use Qualcomm's vendor ID */
+ #define YUGA_PRODUCT_CLM920_NC5			0x9625
+ 
+@@ -548,151 +550,15 @@ static void option_instat_callback(struct urb *urb);
+ #define WETELECOM_PRODUCT_6802			0x6802
+ #define WETELECOM_PRODUCT_WMD300		0x6803
+ 
+-struct option_blacklist_info {
+-	/* bitmask of interface numbers blacklisted for send_setup */
+-	const unsigned long sendsetup;
+-	/* bitmask of interface numbers that are reserved */
+-	const unsigned long reserved;
+-};
+ 
+-static const struct option_blacklist_info four_g_w14_blacklist = {
+-	.sendsetup = BIT(0) | BIT(1),
+-};
++/* Device flags */
+ 
+-static const struct option_blacklist_info four_g_w100_blacklist = {
+-	.sendsetup = BIT(1) | BIT(2),
+-	.reserved = BIT(3),
+-};
++/* Interface does not support modem-control requests */
++#define NCTRL(ifnum)	((BIT(ifnum) & 0xff) << 8)
+ 
+-static const struct option_blacklist_info alcatel_x200_blacklist = {
+-	.sendsetup = BIT(0) | BIT(1),
+-	.reserved = BIT(4),
+-};
++/* Interface is reserved */
++#define RSVD(ifnum)	((BIT(ifnum) & 0xff) << 0)
+ 
+-static const struct option_blacklist_info zte_0037_blacklist = {
+-	.sendsetup = BIT(0) | BIT(1),
+-};
+-
+-static const struct option_blacklist_info zte_k3765_z_blacklist = {
+-	.sendsetup = BIT(0) | BIT(1) | BIT(2),
+-	.reserved = BIT(4),
+-};
+-
+-static const struct option_blacklist_info zte_ad3812_z_blacklist = {
+-	.sendsetup = BIT(0) | BIT(1) | BIT(2),
+-};
+-
+-static const struct option_blacklist_info zte_mc2718_z_blacklist = {
+-	.sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
+-};
+-
+-static const struct option_blacklist_info zte_mc2716_z_blacklist = {
+-	.sendsetup = BIT(1) | BIT(2) | BIT(3),
+-};
+-
+-static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
+-	.reserved = BIT(2) | BIT(3) | BIT(4),
+-};
+-
+-static const struct option_blacklist_info zte_me3620_xl_blacklist = {
+-	.reserved = BIT(3) | BIT(4) | BIT(5),
+-};
+-
+-static const struct option_blacklist_info zte_zm8620_x_blacklist = {
+-	.reserved = BIT(3) | BIT(4) | BIT(5),
+-};
+-
+-static const struct option_blacklist_info huawei_cdc12_blacklist = {
+-	.reserved = BIT(1) | BIT(2),
+-};
+-
+-static const struct option_blacklist_info net_intf0_blacklist = {
+-	.reserved = BIT(0),
+-};
+-
+-static const struct option_blacklist_info net_intf1_blacklist = {
+-	.reserved = BIT(1),
+-};
+-
+-static const struct option_blacklist_info net_intf2_blacklist = {
+-	.reserved = BIT(2),
+-};
+-
+-static const struct option_blacklist_info net_intf3_blacklist = {
+-	.reserved = BIT(3),
+-};
+-
+-static const struct option_blacklist_info net_intf4_blacklist = {
+-	.reserved = BIT(4),
+-};
+-
+-static const struct option_blacklist_info net_intf5_blacklist = {
+-	.reserved = BIT(5),
+-};
+-
+-static const struct option_blacklist_info net_intf6_blacklist = {
+-	.reserved = BIT(6),
+-};
+-
+-static const struct option_blacklist_info zte_mf626_blacklist = {
+-	.sendsetup = BIT(0) | BIT(1),
+-	.reserved = BIT(4),
+-};
+-
+-static const struct option_blacklist_info zte_1255_blacklist = {
+-	.reserved = BIT(3) | BIT(4),
+-};
+-
+-static const struct option_blacklist_info simcom_sim7100e_blacklist = {
+-	.reserved = BIT(5) | BIT(6),
+-};
+-
+-static const struct option_blacklist_info telit_me910_blacklist = {
+-	.sendsetup = BIT(0),
+-	.reserved = BIT(1) | BIT(3),
+-};
+-
+-static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
+-	.sendsetup = BIT(0),
+-	.reserved = BIT(3),
+-};
+-
+-static const struct option_blacklist_info telit_le910_blacklist = {
+-	.sendsetup = BIT(0),
+-	.reserved = BIT(1) | BIT(2),
+-};
+-
+-static const struct option_blacklist_info telit_le920_blacklist = {
+-	.sendsetup = BIT(0),
+-	.reserved = BIT(1) | BIT(5),
+-};
+-
+-static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
+-	.sendsetup = BIT(0),
+-	.reserved = BIT(1),
+-};
+-
+-static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
+-	.sendsetup = BIT(2),
+-	.reserved = BIT(0) | BIT(1) | BIT(3),
+-};
+-
+-static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
+-	.sendsetup = BIT(0),
+-	.reserved = BIT(1) | BIT(2) | BIT(3),
+-};
+-
+-static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
+-	.reserved = BIT(4) | BIT(5),
+-};
+-
+-static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
+-	.reserved = BIT(1) | BIT(4),
+-};
+-
+-static const struct option_blacklist_info quectel_ep06_blacklist = {
+-	.reserved = BIT(4) | BIT(5),
+-};
+ 
+ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+@@ -726,26 +592,26 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
+ 	{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
+ 	{ USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
++	  .driver_info = RSVD(1) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
++	  .driver_info = RSVD(1) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t) &net_intf2_blacklist },
++	  .driver_info = RSVD(2) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
++	  .driver_info = RSVD(1) | RSVD(2) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
++	  .driver_info = RSVD(1) | RSVD(2) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff),	/* Huawei E1820 */
+-		.driver_info = (kernel_ulong_t) &net_intf1_blacklist },
++	  .driver_info = RSVD(1) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
++	  .driver_info = RSVD(1) | RSVD(2) },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
+@@ -1190,67 +1056,70 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
+-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ 	/* Quectel products using Qualcomm vendor ID */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	/* Yuga products use Qualcomm vendor ID */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
+-	  .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
++	  .driver_info = RSVD(1) | RSVD(4) },
++	/* u-blox products using Qualcomm vendor ID */
++	{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
++	  .driver_info = RSVD(1) | RSVD(3) },
+ 	/* Quectel products using Quectel vendor ID */
+ 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
+-	  .driver_info = (kernel_ulong_t)&quectel_ep06_blacklist },
++	  .driver_info = RSVD(4) | RSVD(5) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
+-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++	  .driver_info = RSVD(0) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
+-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++	  .driver_info = RSVD(0) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
+-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++	  .driver_info = RSVD(0) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
+-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++	  .driver_info = RSVD(0) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
+-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++	  .driver_info = RSVD(0) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
+-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++	  .driver_info = RSVD(0) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
+-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
+-	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
+-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++	  .driver_info = RSVD(0) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
+-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++	  .driver_info = RSVD(0) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
+-	  .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
++	  .driver_info = RSVD(0) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
+-	  .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++	  .driver_info = RSVD(1) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
+-	  .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++	  .driver_info = RSVD(1) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
+-	  .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++	  .driver_info = RSVD(1) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
+@@ -1258,38 +1127,38 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
++	  .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
+-		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
++	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
+-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
++	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
+-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
++	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
+-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
++	  .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+-		.driver_info = (kernel_ulong_t)&telit_me910_blacklist },
++	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
+-		.driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
++	  .driver_info = NCTRL(0) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+-		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
++	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
+-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
++	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+-		.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
++	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(5) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
+-		.driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
++	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
+-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
++	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
+-		.driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
++	  .driver_info = NCTRL(0) | RSVD(1) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
+-		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
++	  .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++	  .driver_info = RSVD(1) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
+@@ -1305,58 +1174,58 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++	  .driver_info = RSVD(1) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++	  .driver_info = RSVD(1) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff,
+-	  0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mf626_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff),
++	  .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&zte_0037_blacklist },
++	  .driver_info = NCTRL(0) | NCTRL(1) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++	  .driver_info = RSVD(1) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
+@@ -1381,26 +1250,26 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++	  .driver_info = RSVD(6) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) },
+@@ -1416,50 +1285,50 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
+-	  .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++	  .driver_info = RSVD(1) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
+-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++	  .driver_info = RSVD(2) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
+@@ -1576,23 +1445,23 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
++	  .driver_info = RSVD(3) | RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
+@@ -1607,7 +1476,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
+@@ -1643,17 +1512,17 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++	  .driver_info = RSVD(2) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++	  .driver_info = RSVD(2) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++	  .driver_info = RSVD(2) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++	  .driver_info = RSVD(2) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff),  /* ZTE MF91 */
+-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++	  .driver_info = RSVD(2) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
+-		.driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++	  .driver_info = RSVD(2) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
+@@ -1671,8 +1540,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+-	  0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff),
++	  .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+ 
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
+@@ -1683,20 +1552,20 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
++	  .driver_info = RSVD(1) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
+@@ -1848,19 +1717,19 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
+-	 .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
++	 .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) | NCTRL(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
+-	 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
++	 .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
+-	 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
++	 .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) },
+ 	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
+-	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
++	 .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
+ 	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
+-	 .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
++	 .driver_info = RSVD(2) | RSVD(3) | RSVD(4) },
+ 	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
+-	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
++	 .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
+ 	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
+-	 .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
++	 .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
+@@ -1880,37 +1749,34 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
+-	  .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
++	  .driver_info = RSVD(5) | RSVD(6) },
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+-	  .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+-	},
++	  .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
+-	  .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++	  .driver_info = RSVD(6) },
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
+-	  .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++	  .driver_info = RSVD(6) },
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
+-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
+-	  .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
++	  .driver_info = RSVD(5) },
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
+-	  .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
++	  .driver_info = RSVD(2) },
+ 	{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
+ 	{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+-  	  .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
+-  	},
++	  .driver_info = NCTRL(0) | NCTRL(1) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
+-	  .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
+-	},
++	  .driver_info = NCTRL(1) | NCTRL(2) | RSVD(3) },
+ 	{USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
+-	 .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
++	 .driver_info = RSVD(3)},
+ 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	  .driver_info = RSVD(3) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+ 	{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+@@ -1936,14 +1802,14 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
+-		.driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
++	  .driver_info = RSVD(4) | RSVD(5) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+@@ -1953,20 +1819,20 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
+ 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
+-		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++	  .driver_info = RSVD(6) },
+ 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
+-		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++	  .driver_info = RSVD(6) },
+ 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
+-		.driver_info = (kernel_ulong_t)&net_intf6_blacklist },
++	  .driver_info = RSVD(6) },
+ 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
+-		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
+ 	{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
+@@ -2043,9 +1909,9 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) },	/* TP-Link LTE Module */
+ 	{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000),					/* TP-Link MA260 */
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d01, 0xff) },			/* D-Link DWM-156 (variant) */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d02, 0xff) },
+@@ -2053,9 +1919,9 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) },			/* D-Link DWM-158 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) },			/* D-Link DWM-157 C1 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),			/* D-Link DWM-222 */
+-	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	  .driver_info = RSVD(4) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
+@@ -2115,7 +1981,7 @@ static int option_probe(struct usb_serial *serial,
+ 	struct usb_interface_descriptor *iface_desc =
+ 				&serial->interface->cur_altsetting->desc;
+ 	struct usb_device_descriptor *dev_desc = &serial->dev->descriptor;
+-	const struct option_blacklist_info *blacklist;
++	unsigned long device_flags = id->driver_info;
+ 
+ 	/* Never bind to the CD-Rom emulation interface	*/
+ 	if (iface_desc->bInterfaceClass == 0x08)
+@@ -2126,9 +1992,7 @@ static int option_probe(struct usb_serial *serial,
+ 	 * the same class/subclass/protocol as the serial interfaces.  Look at
+ 	 * the Windows driver .INF files for reserved interface numbers.
+ 	 */
+-	blacklist = (void *)id->driver_info;
+-	if (blacklist && test_bit(iface_desc->bInterfaceNumber,
+-						&blacklist->reserved))
++	if (device_flags & RSVD(iface_desc->bInterfaceNumber))
+ 		return -ENODEV;
+ 	/*
+ 	 * Don't bind network interface on Samsung GT-B3730, it is handled by
+@@ -2139,8 +2003,8 @@ static int option_probe(struct usb_serial *serial,
+ 	    iface_desc->bInterfaceClass != USB_CLASS_CDC_DATA)
+ 		return -ENODEV;
+ 
+-	/* Store the blacklist info so we can use it during attach. */
+-	usb_set_serial_data(serial, (void *)blacklist);
++	/* Store the device flags so we can use them during attach. */
++	usb_set_serial_data(serial, (void *)device_flags);
+ 
+ 	return 0;
+ }
+@@ -2148,22 +2012,21 @@ static int option_probe(struct usb_serial *serial,
+ static int option_attach(struct usb_serial *serial)
+ {
+ 	struct usb_interface_descriptor *iface_desc;
+-	const struct option_blacklist_info *blacklist;
+ 	struct usb_wwan_intf_private *data;
++	unsigned long device_flags;
+ 
+ 	data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
+ 	if (!data)
+ 		return -ENOMEM;
+ 
+-	/* Retrieve blacklist info stored at probe. */
+-	blacklist = usb_get_serial_data(serial);
++	/* Retrieve device flags stored at probe. */
++	device_flags = (unsigned long)usb_get_serial_data(serial);
+ 
+ 	iface_desc = &serial->interface->cur_altsetting->desc;
+ 
+-	if (!blacklist || !test_bit(iface_desc->bInterfaceNumber,
+-						&blacklist->sendsetup)) {
++	if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
+ 		data->use_send_setup = 1;
+-	}
++
+ 	spin_lock_init(&data->susp_lock);
+ 
+ 	usb_set_serial_data(serial, data);
+diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
+index f5373ed2cd45..8ddbecc25d89 100644
+--- a/drivers/usb/serial/visor.c
++++ b/drivers/usb/serial/visor.c
+@@ -335,47 +335,48 @@ static int palm_os_3_probe(struct usb_serial *serial,
+ 		goto exit;
+ 	}
+ 
+-	if (retval == sizeof(*connection_info)) {
+-			connection_info = (struct visor_connection_info *)
+-							transfer_buffer;
+-
+-		num_ports = le16_to_cpu(connection_info->num_ports);
+-		for (i = 0; i < num_ports; ++i) {
+-			switch (
+-			   connection_info->connections[i].port_function_id) {
+-			case VISOR_FUNCTION_GENERIC:
+-				string = "Generic";
+-				break;
+-			case VISOR_FUNCTION_DEBUGGER:
+-				string = "Debugger";
+-				break;
+-			case VISOR_FUNCTION_HOTSYNC:
+-				string = "HotSync";
+-				break;
+-			case VISOR_FUNCTION_CONSOLE:
+-				string = "Console";
+-				break;
+-			case VISOR_FUNCTION_REMOTE_FILE_SYS:
+-				string = "Remote File System";
+-				break;
+-			default:
+-				string = "unknown";
+-				break;
+-			}
+-			dev_info(dev, "%s: port %d, is for %s use\n",
+-				serial->type->description,
+-				connection_info->connections[i].port, string);
+-		}
++	if (retval != sizeof(*connection_info)) {
++		dev_err(dev, "Invalid connection information received from device\n");
++		retval = -ENODEV;
++		goto exit;
+ 	}
+-	/*
+-	* Handle devices that report invalid stuff here.
+-	*/
++
++	connection_info = (struct visor_connection_info *)transfer_buffer;
++
++	num_ports = le16_to_cpu(connection_info->num_ports);
++
++	/* Handle devices that report invalid stuff here. */
+ 	if (num_ports == 0 || num_ports > 2) {
+ 		dev_warn(dev, "%s: No valid connect info available\n",
+ 			serial->type->description);
+ 		num_ports = 2;
+ 	}
+ 
++	for (i = 0; i < num_ports; ++i) {
++		switch (connection_info->connections[i].port_function_id) {
++		case VISOR_FUNCTION_GENERIC:
++			string = "Generic";
++			break;
++		case VISOR_FUNCTION_DEBUGGER:
++			string = "Debugger";
++			break;
++		case VISOR_FUNCTION_HOTSYNC:
++			string = "HotSync";
++			break;
++		case VISOR_FUNCTION_CONSOLE:
++			string = "Console";
++			break;
++		case VISOR_FUNCTION_REMOTE_FILE_SYS:
++			string = "Remote File System";
++			break;
++		default:
++			string = "unknown";
++			break;
++		}
++		dev_info(dev, "%s: port %d, is for %s use\n",
++			serial->type->description,
++			connection_info->connections[i].port, string);
++	}
+ 	dev_info(dev, "%s: Number of ports: %d\n", serial->type->description,
+ 		num_ports);
+ 
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 16b54b1ff20e..53ddfafa440b 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3155,7 +3155,11 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
+ 	struct rb_node *node;
+ 	int ret = 0;
+ 
++	spin_lock(&root->fs_info->trans_lock);
+ 	cur_trans = root->fs_info->running_transaction;
++	if (cur_trans)
++		refcount_inc(&cur_trans->use_count);
++	spin_unlock(&root->fs_info->trans_lock);
+ 	if (!cur_trans)
+ 		return 0;
+ 
+@@ -3164,6 +3168,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
+ 	head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
+ 	if (!head) {
+ 		spin_unlock(&delayed_refs->lock);
++		btrfs_put_transaction(cur_trans);
+ 		return 0;
+ 	}
+ 
+@@ -3180,6 +3185,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
+ 		mutex_lock(&head->mutex);
+ 		mutex_unlock(&head->mutex);
+ 		btrfs_put_delayed_ref_head(head);
++		btrfs_put_transaction(cur_trans);
+ 		return -EAGAIN;
+ 	}
+ 	spin_unlock(&delayed_refs->lock);
+@@ -3212,6 +3218,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
+ 	}
+ 	spin_unlock(&head->lock);
+ 	mutex_unlock(&head->mutex);
++	btrfs_put_transaction(cur_trans);
+ 	return ret;
+ }
+ 
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index 9ea08326f876..a7e8fdb5fc72 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -812,22 +812,26 @@ xfs_file_fallocate(
+ 		if (error)
+ 			goto out_unlock;
+ 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
+-		unsigned int blksize_mask = i_blocksize(inode) - 1;
++		unsigned int	blksize_mask = i_blocksize(inode) - 1;
++		loff_t		isize = i_size_read(inode);
+ 
+-		new_size = i_size_read(inode) + len;
+ 		if (offset & blksize_mask || len & blksize_mask) {
+ 			error = -EINVAL;
+ 			goto out_unlock;
+ 		}
+ 
+-		/* check the new inode size does not wrap through zero */
+-		if (new_size > inode->i_sb->s_maxbytes) {
++		/*
++		 * New inode size must not exceed ->s_maxbytes, accounting for
++		 * possible signed overflow.
++		 */
++		if (inode->i_sb->s_maxbytes - isize < len) {
+ 			error = -EFBIG;
+ 			goto out_unlock;
+ 		}
++		new_size = isize + len;
+ 
+ 		/* Offset should be less than i_size */
+-		if (offset >= i_size_read(inode)) {
++		if (offset >= isize) {
+ 			error = -EINVAL;
+ 			goto out_unlock;
+ 		}
+diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
+index 65f9e3f24dde..0e1b4be93dbf 100644
+--- a/kernel/time/clocksource.c
++++ b/kernel/time/clocksource.c
+@@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock);
+ static int watchdog_running;
+ static atomic_t watchdog_reset_pending;
+ 
++static void inline clocksource_watchdog_lock(unsigned long *flags)
++{
++	spin_lock_irqsave(&watchdog_lock, *flags);
++}
++
++static void inline clocksource_watchdog_unlock(unsigned long *flags)
++{
++	spin_unlock_irqrestore(&watchdog_lock, *flags);
++}
++
+ static int clocksource_watchdog_kthread(void *data);
+ static void __clocksource_change_rating(struct clocksource *cs, int rating);
+ 
+@@ -142,9 +152,19 @@ static void __clocksource_unstable(struct clocksource *cs)
+ 	cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
+ 	cs->flags |= CLOCK_SOURCE_UNSTABLE;
+ 
++	/*
++	 * If the clocksource is registered clocksource_watchdog_kthread() will
++	 * re-rate and re-select.
++	 */
++	if (list_empty(&cs->list)) {
++		cs->rating = 0;
++		return;
++	}
++
+ 	if (cs->mark_unstable)
+ 		cs->mark_unstable(cs);
+ 
++	/* kick clocksource_watchdog_kthread() */
+ 	if (finished_booting)
+ 		schedule_work(&watchdog_work);
+ }
+@@ -164,7 +184,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
+ 
+ 	spin_lock_irqsave(&watchdog_lock, flags);
+ 	if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
+-		if (list_empty(&cs->wd_list))
++		if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
+ 			list_add(&cs->wd_list, &watchdog_list);
+ 		__clocksource_unstable(cs);
+ 	}
+@@ -319,9 +339,8 @@ static void clocksource_resume_watchdog(void)
+ 
+ static void clocksource_enqueue_watchdog(struct clocksource *cs)
+ {
+-	unsigned long flags;
++	INIT_LIST_HEAD(&cs->wd_list);
+ 
+-	spin_lock_irqsave(&watchdog_lock, flags);
+ 	if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
+ 		/* cs is a clocksource to be watched. */
+ 		list_add(&cs->wd_list, &watchdog_list);
+@@ -331,7 +350,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
+ 		if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
+ 			cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
+ 	}
+-	spin_unlock_irqrestore(&watchdog_lock, flags);
+ }
+ 
+ static void clocksource_select_watchdog(bool fallback)
+@@ -373,9 +391,6 @@ static void clocksource_select_watchdog(bool fallback)
+ 
+ static void clocksource_dequeue_watchdog(struct clocksource *cs)
+ {
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&watchdog_lock, flags);
+ 	if (cs != watchdog) {
+ 		if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
+ 			/* cs is a watched clocksource. */
+@@ -384,21 +399,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
+ 			clocksource_stop_watchdog();
+ 		}
+ 	}
+-	spin_unlock_irqrestore(&watchdog_lock, flags);
+ }
+ 
+ static int __clocksource_watchdog_kthread(void)
+ {
+ 	struct clocksource *cs, *tmp;
+ 	unsigned long flags;
+-	LIST_HEAD(unstable);
+ 	int select = 0;
+ 
+ 	spin_lock_irqsave(&watchdog_lock, flags);
+ 	list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
+ 		if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
+ 			list_del_init(&cs->wd_list);
+-			list_add(&cs->wd_list, &unstable);
++			__clocksource_change_rating(cs, 0);
+ 			select = 1;
+ 		}
+ 		if (cs->flags & CLOCK_SOURCE_RESELECT) {
+@@ -410,11 +423,6 @@ static int __clocksource_watchdog_kthread(void)
+ 	clocksource_stop_watchdog();
+ 	spin_unlock_irqrestore(&watchdog_lock, flags);
+ 
+-	/* Needs to be done outside of watchdog lock */
+-	list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
+-		list_del_init(&cs->wd_list);
+-		__clocksource_change_rating(cs, 0);
+-	}
+ 	return select;
+ }
+ 
+@@ -447,6 +455,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; }
+ static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
+ void clocksource_mark_unstable(struct clocksource *cs) { }
+ 
++static void inline clocksource_watchdog_lock(unsigned long *flags) { }
++static void inline clocksource_watchdog_unlock(unsigned long *flags) { }
++
+ #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
+ 
+ /**
+@@ -775,14 +786,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
+  */
+ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
+ {
++	unsigned long flags;
+ 
+ 	/* Initialize mult/shift and max_idle_ns */
+ 	__clocksource_update_freq_scale(cs, scale, freq);
+ 
+ 	/* Add clocksource to the clocksource list */
+ 	mutex_lock(&clocksource_mutex);
++
++	clocksource_watchdog_lock(&flags);
+ 	clocksource_enqueue(cs);
+ 	clocksource_enqueue_watchdog(cs);
++	clocksource_watchdog_unlock(&flags);
++
+ 	clocksource_select();
+ 	clocksource_select_watchdog(false);
+ 	mutex_unlock(&clocksource_mutex);
+@@ -804,8 +820,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
+  */
+ void clocksource_change_rating(struct clocksource *cs, int rating)
+ {
++	unsigned long flags;
++
+ 	mutex_lock(&clocksource_mutex);
++	clocksource_watchdog_lock(&flags);
+ 	__clocksource_change_rating(cs, rating);
++	clocksource_watchdog_unlock(&flags);
++
+ 	clocksource_select();
+ 	clocksource_select_watchdog(false);
+ 	mutex_unlock(&clocksource_mutex);
+@@ -817,6 +838,8 @@ EXPORT_SYMBOL(clocksource_change_rating);
+  */
+ static int clocksource_unbind(struct clocksource *cs)
+ {
++	unsigned long flags;
++
+ 	if (clocksource_is_watchdog(cs)) {
+ 		/* Select and try to install a replacement watchdog. */
+ 		clocksource_select_watchdog(true);
+@@ -830,8 +853,12 @@ static int clocksource_unbind(struct clocksource *cs)
+ 		if (curr_clocksource == cs)
+ 			return -EBUSY;
+ 	}
++
++	clocksource_watchdog_lock(&flags);
+ 	clocksource_dequeue_watchdog(cs);
+ 	list_del_init(&cs->list);
++	clocksource_watchdog_unlock(&flags);
++
+ 	return 0;
+ }
+ 
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index df08863e6d53..fff97dc0b70f 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -55,6 +55,7 @@ struct trace_uprobe {
+ 	struct list_head		list;
+ 	struct trace_uprobe_filter	filter;
+ 	struct uprobe_consumer		consumer;
++	struct path			path;
+ 	struct inode			*inode;
+ 	char				*filename;
+ 	unsigned long			offset;
+@@ -287,7 +288,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu)
+ 	for (i = 0; i < tu->tp.nr_args; i++)
+ 		traceprobe_free_probe_arg(&tu->tp.args[i]);
+ 
+-	iput(tu->inode);
++	path_put(&tu->path);
+ 	kfree(tu->tp.call.class->system);
+ 	kfree(tu->tp.call.name);
+ 	kfree(tu->filename);
+@@ -361,7 +362,6 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
+ static int create_trace_uprobe(int argc, char **argv)
+ {
+ 	struct trace_uprobe *tu;
+-	struct inode *inode;
+ 	char *arg, *event, *group, *filename;
+ 	char buf[MAX_EVENT_NAME_LEN];
+ 	struct path path;
+@@ -369,7 +369,6 @@ static int create_trace_uprobe(int argc, char **argv)
+ 	bool is_delete, is_return;
+ 	int i, ret;
+ 
+-	inode = NULL;
+ 	ret = 0;
+ 	is_delete = false;
+ 	is_return = false;
+@@ -435,21 +434,16 @@ static int create_trace_uprobe(int argc, char **argv)
+ 	}
+ 	/* Find the last occurrence, in case the path contains ':' too. */
+ 	arg = strrchr(argv[1], ':');
+-	if (!arg) {
+-		ret = -EINVAL;
+-		goto fail_address_parse;
+-	}
++	if (!arg)
++		return -EINVAL;
+ 
+ 	*arg++ = '\0';
+ 	filename = argv[1];
+ 	ret = kern_path(filename, LOOKUP_FOLLOW, &path);
+ 	if (ret)
+-		goto fail_address_parse;
+-
+-	inode = igrab(d_inode(path.dentry));
+-	path_put(&path);
++		return ret;
+ 
+-	if (!inode || !S_ISREG(inode->i_mode)) {
++	if (!d_is_reg(path.dentry)) {
+ 		ret = -EINVAL;
+ 		goto fail_address_parse;
+ 	}
+@@ -488,7 +482,7 @@ static int create_trace_uprobe(int argc, char **argv)
+ 		goto fail_address_parse;
+ 	}
+ 	tu->offset = offset;
+-	tu->inode = inode;
++	tu->path = path;
+ 	tu->filename = kstrdup(filename, GFP_KERNEL);
+ 
+ 	if (!tu->filename) {
+@@ -556,7 +550,7 @@ static int create_trace_uprobe(int argc, char **argv)
+ 	return ret;
+ 
+ fail_address_parse:
+-	iput(inode);
++	path_put(&path);
+ 
+ 	pr_info("Failed to parse address or file.\n");
+ 
+@@ -935,6 +929,7 @@ probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
+ 		goto err_flags;
+ 
+ 	tu->consumer.filter = filter;
++	tu->inode = d_real_inode(tu->path.dentry);
+ 	ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
+ 	if (ret)
+ 		goto err_buffer;
+@@ -980,6 +975,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
+ 	WARN_ON(!uprobe_filter_is_empty(&tu->filter));
+ 
+ 	uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
++	tu->inode = NULL;
+ 	tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
+ 
+ 	uprobe_buffer_disable();
+diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
+index 671b13457387..1e37da2e0c25 100644
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -207,7 +207,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
+ 			lockdep_is_held(&tracepoints_mutex));
+ 	old = func_add(&tp_funcs, func, prio);
+ 	if (IS_ERR(old)) {
+-		WARN_ON_ONCE(1);
++		WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
+ 		return PTR_ERR(old);
+ 	}
+ 
+@@ -239,7 +239,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
+ 			lockdep_is_held(&tracepoints_mutex));
+ 	old = func_remove(&tp_funcs, func);
+ 	if (IS_ERR(old)) {
+-		WARN_ON_ONCE(1);
++		WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
+ 		return PTR_ERR(old);
+ 	}
+ 
+diff --git a/lib/errseq.c b/lib/errseq.c
+index df782418b333..81f9e33aa7e7 100644
+--- a/lib/errseq.c
++++ b/lib/errseq.c
+@@ -111,27 +111,22 @@ EXPORT_SYMBOL(errseq_set);
+  * errseq_sample() - Grab current errseq_t value.
+  * @eseq: Pointer to errseq_t to be sampled.
+  *
+- * This function allows callers to sample an errseq_t value, marking it as
+- * "seen" if required.
++ * This function allows callers to initialise their errseq_t variable.
++ * If the error has been "seen", new callers will not see an old error.
++ * If there is an unseen error in @eseq, the caller of this function will
++ * see it the next time it checks for an error.
+  *
++ * Context: Any context.
+  * Return: The current errseq value.
+  */
+ errseq_t errseq_sample(errseq_t *eseq)
+ {
+ 	errseq_t old = READ_ONCE(*eseq);
+-	errseq_t new = old;
+ 
+-	/*
+-	 * For the common case of no errors ever having been set, we can skip
+-	 * marking the SEEN bit. Once an error has been set, the value will
+-	 * never go back to zero.
+-	 */
+-	if (old != 0) {
+-		new |= ERRSEQ_SEEN;
+-		if (old != new)
+-			cmpxchg(eseq, old, new);
+-	}
+-	return new;
++	/* If nobody has seen this error yet, then we can be the first. */
++	if (!(old & ERRSEQ_SEEN))
++		old = 0;
++	return old;
+ }
+ EXPORT_SYMBOL(errseq_sample);
+ 
+diff --git a/lib/swiotlb.c b/lib/swiotlb.c
+index 44f7eb408fdb..0331de0e9144 100644
+--- a/lib/swiotlb.c
++++ b/lib/swiotlb.c
+@@ -750,7 +750,7 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ 	swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
+ 			DMA_ATTR_SKIP_CPU_SYNC);
+ out_warn:
+-	if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
++	if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
+ 		dev_warn(dev,
+ 			"swiotlb: coherent allocation failed, size=%zu\n",
+ 			size);
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index 06d7c40af570..6491afbb5fd5 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -423,6 +423,8 @@ static int snd_pcm_ioctl_xfern_compat(struct snd_pcm_substream *substream,
+ 		return -ENOTTY;
+ 	if (substream->stream != dir)
+ 		return -EINVAL;
++	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
++		return -EBADFD;
+ 
+ 	if ((ch = substream->runtime->channels) > 128)
+ 		return -EINVAL;
+diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
+index f48a4cd24ffc..289ae6bb81d9 100644
+--- a/sound/core/seq/seq_virmidi.c
++++ b/sound/core/seq/seq_virmidi.c
+@@ -174,12 +174,12 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
+ 			}
+ 			return;
+ 		}
++		spin_lock_irqsave(&substream->runtime->lock, flags);
+ 		if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
+ 			if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
+-				return;
++				goto out;
+ 			vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
+ 		}
+-		spin_lock_irqsave(&substream->runtime->lock, flags);
+ 		while (1) {
+ 			count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
+ 			if (count <= 0)
+diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
+index 1063a4377502..eab7f594ebe7 100644
+--- a/sound/drivers/aloop.c
++++ b/sound/drivers/aloop.c
+@@ -296,6 +296,8 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
+ 		cable->pause |= stream;
+ 		loopback_timer_stop(dpcm);
+ 		spin_unlock(&cable->lock);
++		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++			loopback_active_notify(dpcm);
+ 		break;
+ 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ 	case SNDRV_PCM_TRIGGER_RESUME:
+@@ -304,6 +306,8 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
+ 		cable->pause &= ~stream;
+ 		loopback_timer_start(dpcm);
+ 		spin_unlock(&cable->lock);
++		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++			loopback_active_notify(dpcm);
+ 		break;
+ 	default:
+ 		return -EINVAL;
+@@ -827,9 +831,11 @@ static int loopback_rate_shift_get(struct snd_kcontrol *kcontrol,
+ {
+ 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ 	
++	mutex_lock(&loopback->cable_lock);
+ 	ucontrol->value.integer.value[0] =
+ 		loopback->setup[kcontrol->id.subdevice]
+ 			       [kcontrol->id.device].rate_shift;
++	mutex_unlock(&loopback->cable_lock);
+ 	return 0;
+ }
+ 
+@@ -861,9 +867,11 @@ static int loopback_notify_get(struct snd_kcontrol *kcontrol,
+ {
+ 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ 	
++	mutex_lock(&loopback->cable_lock);
+ 	ucontrol->value.integer.value[0] =
+ 		loopback->setup[kcontrol->id.subdevice]
+ 			       [kcontrol->id.device].notify;
++	mutex_unlock(&loopback->cable_lock);
+ 	return 0;
+ }
+ 
+@@ -875,12 +883,14 @@ static int loopback_notify_put(struct snd_kcontrol *kcontrol,
+ 	int change = 0;
+ 
+ 	val = ucontrol->value.integer.value[0] ? 1 : 0;
++	mutex_lock(&loopback->cable_lock);
+ 	if (val != loopback->setup[kcontrol->id.subdevice]
+ 				[kcontrol->id.device].notify) {
+ 		loopback->setup[kcontrol->id.subdevice]
+ 			[kcontrol->id.device].notify = val;
+ 		change = 1;
+ 	}
++	mutex_unlock(&loopback->cable_lock);
+ 	return change;
+ }
+ 
+@@ -888,13 +898,18 @@ static int loopback_active_get(struct snd_kcontrol *kcontrol,
+ 			       struct snd_ctl_elem_value *ucontrol)
+ {
+ 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+-	struct loopback_cable *cable = loopback->cables
+-			[kcontrol->id.subdevice][kcontrol->id.device ^ 1];
++	struct loopback_cable *cable;
++
+ 	unsigned int val = 0;
+ 
+-	if (cable != NULL)
+-		val = (cable->running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ?
+-									1 : 0;
++	mutex_lock(&loopback->cable_lock);
++	cable = loopback->cables[kcontrol->id.subdevice][kcontrol->id.device ^ 1];
++	if (cable != NULL) {
++		unsigned int running = cable->running ^ cable->pause;
++
++		val = (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ? 1 : 0;
++	}
++	mutex_unlock(&loopback->cable_lock);
+ 	ucontrol->value.integer.value[0] = val;
+ 	return 0;
+ }
+@@ -937,9 +952,11 @@ static int loopback_rate_get(struct snd_kcontrol *kcontrol,
+ {
+ 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ 	
++	mutex_lock(&loopback->cable_lock);
+ 	ucontrol->value.integer.value[0] =
+ 		loopback->setup[kcontrol->id.subdevice]
+ 			       [kcontrol->id.device].rate;
++	mutex_unlock(&loopback->cable_lock);
+ 	return 0;
+ }
+ 
+@@ -959,9 +976,11 @@ static int loopback_channels_get(struct snd_kcontrol *kcontrol,
+ {
+ 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ 	
++	mutex_lock(&loopback->cable_lock);
+ 	ucontrol->value.integer.value[0] =
+ 		loopback->setup[kcontrol->id.subdevice]
+ 			       [kcontrol->id.device].channels;
++	mutex_unlock(&loopback->cable_lock);
+ 	return 0;
+ }
+ 
+diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
+index 4a1dc145327b..cb9acfe60f6a 100644
+--- a/sound/firewire/amdtp-stream.c
++++ b/sound/firewire/amdtp-stream.c
+@@ -773,8 +773,6 @@ static void amdtp_stream_first_callback(struct fw_iso_context *context,
+ 	u32 cycle;
+ 	unsigned int packets;
+ 
+-	s->max_payload_length = amdtp_stream_get_max_payload(s);
+-
+ 	/*
+ 	 * For in-stream, first packet has come.
+ 	 * For out-stream, prepared to transmit first packet
+@@ -879,6 +877,9 @@ int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
+ 
+ 	amdtp_stream_update(s);
+ 
++	if (s->direction == AMDTP_IN_STREAM)
++		s->max_payload_length = amdtp_stream_get_max_payload(s);
++
+ 	if (s->flags & CIP_NO_HEADER)
+ 		s->tag = TAG_NO_CIP_HEADER;
+ 	else
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 8c238e51bb5a..2dd34dd77447 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3832,7 +3832,7 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
+ 	}
+ }
+ 
+-#if IS_REACHABLE(INPUT)
++#if IS_REACHABLE(CONFIG_INPUT)
+ static void gpio2_mic_hotkey_event(struct hda_codec *codec,
+ 				   struct hda_jack_callback *event)
+ {
+diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
+index 2cccbba64418..f304be71c278 100644
+--- a/tools/power/acpi/Makefile.config
++++ b/tools/power/acpi/Makefile.config
+@@ -56,6 +56,7 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM}
+ # to compile vs uClibc, that can be done here as well.
+ CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
+ CROSS_COMPILE ?= $(CROSS)
++LD = $(CC)
+ HOSTCC = gcc
+ 
+ # check if compiler option is supported
+diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
+index f9508e1a4058..6b63b6bf2661 100755
+--- a/tools/testing/selftests/firmware/fw_filesystem.sh
++++ b/tools/testing/selftests/firmware/fw_filesystem.sh
+@@ -46,9 +46,11 @@ test_finish()
+ 		echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
+ 	fi
+ 	if [ "$OLD_FWPATH" = "" ]; then
+-		OLD_FWPATH=" "
++		# A zero-length write won't work; write a null byte
++		printf '\000' >/sys/module/firmware_class/parameters/path
++	else
++		echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
+ 	fi
+-	echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
+ 	rm -f "$FW"
+ 	rmdir "$FWPATH"
+ }


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-05-02 16:15 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-05-02 16:15 UTC (permalink / raw
  To: gentoo-commits

commit:     7cde86eb95c44a9bfb0eab9ae50e3ac566563f2c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed May  2 16:15:30 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed May  2 16:15:30 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7cde86eb

Linux patch 4.16.7

 0000_README             |    4 +
 1006_linux-4.16.7.patch | 4737 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4741 insertions(+)

diff --git a/0000_README b/0000_README
index d4182dc..1139362 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch:  1005_linux-4.16.6.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.6
 
+Patch:  1006_linux-4.16.7.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1006_linux-4.16.7.patch b/1006_linux-4.16.7.patch
new file mode 100644
index 0000000..4dec6c8
--- /dev/null
+++ b/1006_linux-4.16.7.patch
@@ -0,0 +1,4737 @@
+diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
+index d6b3ff51a14f..36187fc32ab2 100644
+--- a/Documentation/virtual/kvm/api.txt
++++ b/Documentation/virtual/kvm/api.txt
+@@ -1960,6 +1960,9 @@ ARM 32-bit VFP control registers have the following id bit patterns:
+ ARM 64-bit FP registers have the following id bit patterns:
+   0x4030 0000 0012 0 <regno:12>
+ 
++ARM firmware pseudo-registers have the following bit pattern:
++  0x4030 0000 0014 <regno:16>
++
+ 
+ arm64 registers are mapped using the lower 32 bits. The upper 16 of
+ that is the register group type, or coprocessor number:
+@@ -1976,6 +1979,9 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value:
+ arm64 system registers have the following id bit patterns:
+   0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
+ 
++arm64 firmware pseudo-registers have the following bit pattern:
++  0x6030 0000 0014 <regno:16>
++
+ 
+ MIPS registers are mapped using the lower 32 bits.  The upper 16 of that is
+ the register group type:
+@@ -2510,7 +2516,8 @@ Possible features:
+ 	  and execute guest code when KVM_RUN is called.
+ 	- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
+ 	  Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
+-	- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
++	- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision
++          backward compatible with v0.2) for the CPU.
+ 	  Depends on KVM_CAP_ARM_PSCI_0_2.
+ 	- KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
+ 	  Depends on KVM_CAP_ARM_PMU_V3.
+diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virtual/kvm/arm/psci.txt
+new file mode 100644
+index 000000000000..aafdab887b04
+--- /dev/null
++++ b/Documentation/virtual/kvm/arm/psci.txt
+@@ -0,0 +1,30 @@
++KVM implements the PSCI (Power State Coordination Interface)
++specification in order to provide services such as CPU on/off, reset
++and power-off to the guest.
++
++The PSCI specification is regularly updated to provide new features,
++and KVM implements these updates if they make sense from a virtualization
++point of view.
++
++This means that a guest booted on two different versions of KVM can
++observe two different "firmware" revisions. This could cause issues if
++a given guest is tied to a particular PSCI revision (unlikely), or if
++a migration causes a different PSCI version to be exposed out of the
++blue to an unsuspecting guest.
++
++In order to remedy this situation, KVM exposes a set of "firmware
++pseudo-registers" that can be manipulated using the GET/SET_ONE_REG
++interface. These registers can be saved/restored by userspace, and set
++to a convenient value if required.
++
++The following register is defined:
++
++* KVM_REG_ARM_PSCI_VERSION:
++
++  - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set
++    (and thus has already been initialized)
++  - Returns the current PSCI version on GET_ONE_REG (defaulting to the
++    highest PSCI version implemented by KVM and compatible with v0.2)
++  - Allows any PSCI version implemented by KVM and compatible with
++    v0.2 to be set with SET_ONE_REG
++  - Affects the whole VM (even if the register view is per-vcpu)
+diff --git a/Makefile b/Makefile
+index 41f07b2b7905..1c5d5d8c45e2 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts
+index 8bbb6f85d161..4785fbcc41ed 100644
+--- a/arch/arm/boot/dts/gemini-nas4220b.dts
++++ b/arch/arm/boot/dts/gemini-nas4220b.dts
+@@ -134,37 +134,37 @@
+ 						function = "gmii";
+ 						groups = "gmii_gmac0_grp";
+ 					};
+-					/* Settings come from OpenWRT */
++					/* Settings come from OpenWRT, pins on SL3516 */
+ 					conf0 {
+-						pins = "R8 GMAC0 RXDV", "U11 GMAC1 RXDV";
++						pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV";
+ 						skew-delay = <0>;
+ 					};
+ 					conf1 {
+-						pins = "T8 GMAC0 RXC", "T11 GMAC1 RXC";
++						pins = "Y7 GMAC0 RXC", "Y11 GMAC1 RXC";
+ 						skew-delay = <15>;
+ 					};
+ 					conf2 {
+-						pins = "P8 GMAC0 TXEN", "V11 GMAC1 TXEN";
++						pins = "T8 GMAC0 TXEN", "W11 GMAC1 TXEN";
+ 						skew-delay = <7>;
+ 					};
+ 					conf3 {
+-						pins = "V7 GMAC0 TXC";
++						pins = "U8 GMAC0 TXC";
+ 						skew-delay = <11>;
+ 					};
+ 					conf4 {
+-						pins = "P10 GMAC1 TXC";
++						pins = "V11 GMAC1 TXC";
+ 						skew-delay = <10>;
+ 					};
+ 					conf5 {
+ 						/* The data lines all have default skew */
+-						pins = "U8 GMAC0 RXD0", "V8 GMAC0 RXD1",
+-						       "P9 GMAC0 RXD2", "R9 GMAC0 RXD3",
+-						       "U7 GMAC0 TXD0", "T7 GMAC0 TXD1",
+-						       "R7 GMAC0 TXD2", "P7 GMAC0 TXD3",
+-						       "R11 GMAC1 RXD0", "P11 GMAC1 RXD1",
+-						       "V12 GMAC1 RXD2", "U12 GMAC1 RXD3",
+-						       "R10 GMAC1 TXD0", "T10 GMAC1 TXD1",
+-						       "U10 GMAC1 TXD2", "V10 GMAC1 TXD3";
++						pins = "W8 GMAC0 RXD0", "V9 GMAC0 RXD1",
++						       "Y8 GMAC0 RXD2", "U9 GMAC0 RXD3",
++						       "T7 GMAC0 TXD0", "U6 GMAC0 TXD1",
++						       "V7 GMAC0 TXD2", "U7 GMAC0 TXD3",
++						       "Y12 GMAC1 RXD0", "V12 GMAC1 RXD1",
++						       "T11 GMAC1 RXD2", "W12 GMAC1 RXD3",
++						       "U10 GMAC1 TXD0", "Y10 GMAC1 TXD1",
++						       "W10 GMAC1 TXD2", "T9 GMAC1 TXD3";
+ 						skew-delay = <7>;
+ 					};
+ 					/* Set up drive strength on GMAC0 to 16 mA */
+diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
+index 2620ce790db0..371fca4e1ab7 100644
+--- a/arch/arm/configs/socfpga_defconfig
++++ b/arch/arm/configs/socfpga_defconfig
+@@ -57,6 +57,7 @@ CONFIG_MTD_M25P80=y
+ CONFIG_MTD_NAND=y
+ CONFIG_MTD_NAND_DENALI_DT=y
+ CONFIG_MTD_SPI_NOR=y
++# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
+ CONFIG_SPI_CADENCE_QUADSPI=y
+ CONFIG_OF_OVERLAY=y
+ CONFIG_OF_CONFIGFS=y
+diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
+index 248b930563e5..8b908d23c58a 100644
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -77,6 +77,9 @@ struct kvm_arch {
+ 	/* Interrupt controller */
+ 	struct vgic_dist	vgic;
+ 	int max_vcpus;
++
++	/* Mandated version of PSCI */
++	u32 psci_version;
+ };
+ 
+ #define KVM_NR_MEM_OBJS     40
+diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
+index 6edd177bb1c7..47dfc99f5cd0 100644
+--- a/arch/arm/include/uapi/asm/kvm.h
++++ b/arch/arm/include/uapi/asm/kvm.h
+@@ -186,6 +186,12 @@ struct kvm_arch_memory_slot {
+ #define KVM_REG_ARM_VFP_FPINST		0x1009
+ #define KVM_REG_ARM_VFP_FPINST2		0x100A
+ 
++/* KVM-as-firmware specific pseudo-registers */
++#define KVM_REG_ARM_FW			(0x0014 << KVM_REG_ARM_COPROC_SHIFT)
++#define KVM_REG_ARM_FW_REG(r)		(KVM_REG_ARM | KVM_REG_SIZE_U64 | \
++					 KVM_REG_ARM_FW | ((r) & 0xffff))
++#define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)
++
+ /* Device Control API: ARM VGIC */
+ #define KVM_DEV_ARM_VGIC_GRP_ADDR	0
+ #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS	1
+diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
+index 1e0784ebbfd6..a18f33edc471 100644
+--- a/arch/arm/kvm/guest.c
++++ b/arch/arm/kvm/guest.c
+@@ -22,6 +22,7 @@
+ #include <linux/module.h>
+ #include <linux/vmalloc.h>
+ #include <linux/fs.h>
++#include <kvm/arm_psci.h>
+ #include <asm/cputype.h>
+ #include <linux/uaccess.h>
+ #include <asm/kvm.h>
+@@ -176,6 +177,7 @@ static unsigned long num_core_regs(void)
+ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
+ {
+ 	return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
++		+ kvm_arm_get_fw_num_regs(vcpu)
+ 		+ NUM_TIMER_REGS;
+ }
+ 
+@@ -196,6 +198,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+ 		uindices++;
+ 	}
+ 
++	ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
++	if (ret)
++		return ret;
++	uindices += kvm_arm_get_fw_num_regs(vcpu);
++
+ 	ret = copy_timer_indices(vcpu, uindices);
+ 	if (ret)
+ 		return ret;
+@@ -214,6 +221,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
+ 		return get_core_reg(vcpu, reg);
+ 
++	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
++		return kvm_arm_get_fw_reg(vcpu, reg);
++
+ 	if (is_timer_reg(reg->id))
+ 		return get_timer_reg(vcpu, reg);
+ 
+@@ -230,6 +240,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
+ 		return set_core_reg(vcpu, reg);
+ 
++	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
++		return kvm_arm_set_fw_reg(vcpu, reg);
++
+ 	if (is_timer_reg(reg->id))
+ 		return set_timer_reg(vcpu, reg);
+ 
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index 596f8e414a4c..b9e355bd3b78 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -75,6 +75,9 @@ struct kvm_arch {
+ 
+ 	/* Interrupt controller */
+ 	struct vgic_dist	vgic;
++
++	/* Mandated version of PSCI */
++	u32 psci_version;
+ };
+ 
+ #define KVM_NR_MEM_OBJS     40
+diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
+index 9abbf3044654..04b3256f8e6d 100644
+--- a/arch/arm64/include/uapi/asm/kvm.h
++++ b/arch/arm64/include/uapi/asm/kvm.h
+@@ -206,6 +206,12 @@ struct kvm_arch_memory_slot {
+ #define KVM_REG_ARM_TIMER_CNT		ARM64_SYS_REG(3, 3, 14, 3, 2)
+ #define KVM_REG_ARM_TIMER_CVAL		ARM64_SYS_REG(3, 3, 14, 0, 2)
+ 
++/* KVM-as-firmware specific pseudo-registers */
++#define KVM_REG_ARM_FW			(0x0014 << KVM_REG_ARM_COPROC_SHIFT)
++#define KVM_REG_ARM_FW_REG(r)		(KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
++					 KVM_REG_ARM_FW | ((r) & 0xffff))
++#define KVM_REG_ARM_PSCI_VERSION	KVM_REG_ARM_FW_REG(0)
++
+ /* Device Control API: ARM VGIC */
+ #define KVM_DEV_ARM_VGIC_GRP_ADDR	0
+ #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS	1
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index 959e50d2588c..56a0260ceb11 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -25,6 +25,7 @@
+ #include <linux/module.h>
+ #include <linux/vmalloc.h>
+ #include <linux/fs.h>
++#include <kvm/arm_psci.h>
+ #include <asm/cputype.h>
+ #include <linux/uaccess.h>
+ #include <asm/kvm.h>
+@@ -205,7 +206,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
+ {
+ 	return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
+-                + NUM_TIMER_REGS;
++		+ kvm_arm_get_fw_num_regs(vcpu)	+ NUM_TIMER_REGS;
+ }
+ 
+ /**
+@@ -225,6 +226,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+ 		uindices++;
+ 	}
+ 
++	ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
++	if (ret)
++		return ret;
++	uindices += kvm_arm_get_fw_num_regs(vcpu);
++
+ 	ret = copy_timer_indices(vcpu, uindices);
+ 	if (ret)
+ 		return ret;
+@@ -243,6 +249,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
+ 		return get_core_reg(vcpu, reg);
+ 
++	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
++		return kvm_arm_get_fw_reg(vcpu, reg);
++
+ 	if (is_timer_reg(reg->id))
+ 		return get_timer_reg(vcpu, reg);
+ 
+@@ -259,6 +268,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ 	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
+ 		return set_core_reg(vcpu, reg);
+ 
++	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
++		return kvm_arm_set_fw_reg(vcpu, reg);
++
+ 	if (is_timer_reg(reg->id))
+ 		return set_timer_reg(vcpu, reg);
+ 
+diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
+index fe6fc63251fe..38c5b4764bfe 100644
+--- a/arch/powerpc/kernel/mce_power.c
++++ b/arch/powerpc/kernel/mce_power.c
+@@ -441,7 +441,6 @@ static int mce_handle_ierror(struct pt_regs *regs,
+ 					if (pfn != ULONG_MAX) {
+ 						*phys_addr =
+ 							(pfn << PAGE_SHIFT);
+-						handled = 1;
+ 					}
+ 				}
+ 			}
+@@ -532,9 +531,7 @@ static int mce_handle_derror(struct pt_regs *regs,
+ 			 * kernel/exception-64s.h
+ 			 */
+ 			if (get_paca()->in_mce < MAX_MCE_DEPTH)
+-				if (!mce_find_instr_ea_and_pfn(regs, addr,
+-								phys_addr))
+-					handled = 1;
++				mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
+ 		}
+ 		found = 1;
+ 	}
+@@ -572,7 +569,7 @@ static long mce_handle_error(struct pt_regs *regs,
+ 		const struct mce_ierror_table itable[])
+ {
+ 	struct mce_error_info mce_err = { 0 };
+-	uint64_t addr, phys_addr;
++	uint64_t addr, phys_addr = ULONG_MAX;
+ 	uint64_t srr1 = regs->msr;
+ 	long handled;
+ 
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index fe8c61149fb8..0cd9031b6b54 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -143,6 +143,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
+ 			start, start + size, rc);
+ 		return -EFAULT;
+ 	}
++	flush_inval_dcache_range(start, start + size);
+ 
+ 	return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
+ }
+@@ -169,6 +170,7 @@ int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
+ 
+ 	/* Remove htab bolted mappings for this section of memory */
+ 	start = (unsigned long)__va(start);
++	flush_inval_dcache_range(start, start + size);
+ 	ret = remove_section_mapping(start, start + size);
+ 
+ 	/* Ensure all vmalloc mappings are flushed in case they also
+diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
+index 0a253b64ac5f..e7b621f619b2 100644
+--- a/arch/powerpc/platforms/powernv/npu-dma.c
++++ b/arch/powerpc/platforms/powernv/npu-dma.c
+@@ -33,6 +33,13 @@
+ 
+ #define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
+ 
++/*
++ * When an address shootdown range exceeds this threshold we invalidate the
++ * entire TLB on the GPU for the given PID rather than each specific address in
++ * the range.
++ */
++#define ATSD_THRESHOLD (2*1024*1024)
++
+ /*
+  * Other types of TCE cache invalidation are not functional in the
+  * hardware.
+@@ -627,11 +634,19 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
+ 	struct npu_context *npu_context = mn_to_npu_context(mn);
+ 	unsigned long address;
+ 
+-	for (address = start; address < end; address += PAGE_SIZE)
+-		mmio_invalidate(npu_context, 1, address, false);
++	if (end - start > ATSD_THRESHOLD) {
++		/*
++		 * Just invalidate the entire PID if the address range is too
++		 * large.
++		 */
++		mmio_invalidate(npu_context, 0, 0, true);
++	} else {
++		for (address = start; address < end; address += PAGE_SIZE)
++			mmio_invalidate(npu_context, 1, address, false);
+ 
+-	/* Do the flush only on the final addess == end */
+-	mmio_invalidate(npu_context, 1, address, true);
++		/* Do the flush only on the final addess == end */
++		mmio_invalidate(npu_context, 1, address, true);
++	}
+ }
+ 
+ static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
+diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
+index f8868864f373..aa2a5139462e 100644
+--- a/arch/powerpc/platforms/powernv/opal-rtc.c
++++ b/arch/powerpc/platforms/powernv/opal-rtc.c
+@@ -48,10 +48,12 @@ unsigned long __init opal_get_boot_time(void)
+ 
+ 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ 		rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
+-		if (rc == OPAL_BUSY_EVENT)
++		if (rc == OPAL_BUSY_EVENT) {
++			mdelay(OPAL_BUSY_DELAY_MS);
+ 			opal_poll_events(NULL);
+-		else if (rc == OPAL_BUSY)
+-			mdelay(10);
++		} else if (rc == OPAL_BUSY) {
++			mdelay(OPAL_BUSY_DELAY_MS);
++		}
+ 	}
+ 	if (rc != OPAL_SUCCESS)
+ 		return 0;
+diff --git a/arch/sparc/include/uapi/asm/oradax.h b/arch/sparc/include/uapi/asm/oradax.h
+index 722951908b0a..4f6676fe4bcc 100644
+--- a/arch/sparc/include/uapi/asm/oradax.h
++++ b/arch/sparc/include/uapi/asm/oradax.h
+@@ -3,7 +3,7 @@
+  *
+  * This program is free software: you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation, either version 3 of the License, or
++ * the Free Software Foundation, either version 2 of the License, or
+  * (at your option) any later version.
+  *
+  * This program is distributed in the hope that it will be useful,
+diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
+index 809134c644a6..90ab9a795b49 100644
+--- a/arch/x86/include/uapi/asm/msgbuf.h
++++ b/arch/x86/include/uapi/asm/msgbuf.h
+@@ -1 +1,32 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++#ifndef __ASM_X64_MSGBUF_H
++#define __ASM_X64_MSGBUF_H
++
++#if !defined(__x86_64__) || !defined(__ILP32__)
+ #include <asm-generic/msgbuf.h>
++#else
++/*
++ * The msqid64_ds structure for x86 architecture with x32 ABI.
++ *
++ * On x86-32 and x86-64 we can just use the generic definition, but
++ * x32 uses the same binary layout as x86_64, which is differnet
++ * from other 32-bit architectures.
++ */
++
++struct msqid64_ds {
++	struct ipc64_perm msg_perm;
++	__kernel_time_t msg_stime;	/* last msgsnd time */
++	__kernel_time_t msg_rtime;	/* last msgrcv time */
++	__kernel_time_t msg_ctime;	/* last change time */
++	__kernel_ulong_t msg_cbytes;	/* current number of bytes on queue */
++	__kernel_ulong_t msg_qnum;	/* number of messages in queue */
++	__kernel_ulong_t msg_qbytes;	/* max number of bytes on queue */
++	__kernel_pid_t msg_lspid;	/* pid of last msgsnd */
++	__kernel_pid_t msg_lrpid;	/* last receive pid */
++	__kernel_ulong_t __unused4;
++	__kernel_ulong_t __unused5;
++};
++
++#endif
++
++#endif /* __ASM_GENERIC_MSGBUF_H */
+diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
+index 83c05fc2de38..644421f3823b 100644
+--- a/arch/x86/include/uapi/asm/shmbuf.h
++++ b/arch/x86/include/uapi/asm/shmbuf.h
+@@ -1 +1,43 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++#ifndef __ASM_X86_SHMBUF_H
++#define __ASM_X86_SHMBUF_H
++
++#if !defined(__x86_64__) || !defined(__ILP32__)
+ #include <asm-generic/shmbuf.h>
++#else
++/*
++ * The shmid64_ds structure for x86 architecture with x32 ABI.
++ *
++ * On x86-32 and x86-64 we can just use the generic definition, but
++ * x32 uses the same binary layout as x86_64, which is differnet
++ * from other 32-bit architectures.
++ */
++
++struct shmid64_ds {
++	struct ipc64_perm	shm_perm;	/* operation perms */
++	size_t			shm_segsz;	/* size of segment (bytes) */
++	__kernel_time_t		shm_atime;	/* last attach time */
++	__kernel_time_t		shm_dtime;	/* last detach time */
++	__kernel_time_t		shm_ctime;	/* last change time */
++	__kernel_pid_t		shm_cpid;	/* pid of creator */
++	__kernel_pid_t		shm_lpid;	/* pid of last operator */
++	__kernel_ulong_t	shm_nattch;	/* no. of current attaches */
++	__kernel_ulong_t	__unused4;
++	__kernel_ulong_t	__unused5;
++};
++
++struct shminfo64 {
++	__kernel_ulong_t	shmmax;
++	__kernel_ulong_t	shmmin;
++	__kernel_ulong_t	shmmni;
++	__kernel_ulong_t	shmseg;
++	__kernel_ulong_t	shmall;
++	__kernel_ulong_t	__unused1;
++	__kernel_ulong_t	__unused2;
++	__kernel_ulong_t	__unused3;
++	__kernel_ulong_t	__unused4;
++};
++
++#endif
++
++#endif /* __ASM_X86_SHMBUF_H */
+diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
+index 10c4fc2c91f8..77e201301528 100644
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -564,14 +564,12 @@ static int __reload_late(void *info)
+ 	apply_microcode_local(&err);
+ 	spin_unlock(&update_lock);
+ 
++	/* siblings return UCODE_OK because their engine got updated already */
+ 	if (err > UCODE_NFOUND) {
+ 		pr_warn("Error reloading microcode on CPU %d\n", cpu);
+-		return -1;
+-	/* siblings return UCODE_OK because their engine got updated already */
++		ret = -1;
+ 	} else if (err == UCODE_UPDATED || err == UCODE_OK) {
+ 		ret = 1;
+-	} else {
+-		return ret;
+ 	}
+ 
+ 	/*
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index 32b8e5724f96..1c2cfa0644aa 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -485,7 +485,6 @@ static void show_saved_mc(void)
+  */
+ static void save_mc_for_early(u8 *mc, unsigned int size)
+ {
+-#ifdef CONFIG_HOTPLUG_CPU
+ 	/* Synchronization during CPU hotplug. */
+ 	static DEFINE_MUTEX(x86_cpu_microcode_mutex);
+ 
+@@ -495,7 +494,6 @@ static void save_mc_for_early(u8 *mc, unsigned int size)
+ 	show_saved_mc();
+ 
+ 	mutex_unlock(&x86_cpu_microcode_mutex);
+-#endif
+ }
+ 
+ static bool load_builtin_intel_microcode(struct cpio_data *cp)
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index ff99e2b6fc54..12599e55e040 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1536,6 +1536,8 @@ static inline void mwait_play_dead(void)
+ 	void *mwait_ptr;
+ 	int i;
+ 
++	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
++		return;
+ 	if (!this_cpu_has(X86_FEATURE_MWAIT))
+ 		return;
+ 	if (!this_cpu_has(X86_FEATURE_CLFLUSH))
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index aeca22d91101..3193b2663bed 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -4911,8 +4911,16 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
+ 	bool new_queue = false;
+ 	bool bfqq_already_existing = false, split = false;
+ 
+-	if (!rq->elv.icq)
++	/*
++	 * Even if we don't have an icq attached, we should still clear
++	 * the scheduler pointers, as they might point to previously
++	 * allocated bic/bfqq structs.
++	 */
++	if (!rq->elv.icq) {
++		rq->elv.priv[0] = rq->elv.priv[1] = NULL;
+ 		return;
++	}
++
+ 	bic = icq_to_bic(rq->elv.icq);
+ 
+ 	spin_lock_irq(&bfqd->lock);
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 3b489527c8f2..b459d277d170 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -129,6 +129,10 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
+ 	rq->part = NULL;
+ 	seqcount_init(&rq->gstate_seq);
+ 	u64_stats_init(&rq->aborted_gstate_sync);
++	/*
++	 * See comment of blk_mq_init_request
++	 */
++	WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
+ }
+ EXPORT_SYMBOL(blk_rq_init);
+ 
+@@ -825,7 +829,6 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
+ 
+ 	while (true) {
+ 		bool success = false;
+-		int ret;
+ 
+ 		rcu_read_lock();
+ 		if (percpu_ref_tryget_live(&q->q_usage_counter)) {
+@@ -857,14 +860,12 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
+ 		 */
+ 		smp_rmb();
+ 
+-		ret = wait_event_interruptible(q->mq_freeze_wq,
+-				(atomic_read(&q->mq_freeze_depth) == 0 &&
+-				 (preempt || !blk_queue_preempt_only(q))) ||
+-				blk_queue_dying(q));
++		wait_event(q->mq_freeze_wq,
++			   (atomic_read(&q->mq_freeze_depth) == 0 &&
++			    (preempt || !blk_queue_preempt_only(q))) ||
++			   blk_queue_dying(q));
+ 		if (blk_queue_dying(q))
+ 			return -ENODEV;
+-		if (ret)
+-			return ret;
+ 	}
+ }
+ 
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 56e0c3699f9e..96de7aa4f62a 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2076,6 +2076,13 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ 
+ 	seqcount_init(&rq->gstate_seq);
+ 	u64_stats_init(&rq->aborted_gstate_sync);
++	/*
++	 * start gstate with gen 1 instead of 0, otherwise it will be equal
++	 * to aborted_gstate, and be identified timed out by
++	 * blk_mq_terminate_expired.
++	 */
++	WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
++
+ 	return 0;
+ }
+ 
+diff --git a/crypto/drbg.c b/crypto/drbg.c
+index 4faa2781c964..466a112a4446 100644
+--- a/crypto/drbg.c
++++ b/crypto/drbg.c
+@@ -1134,8 +1134,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
+ 	if (!drbg)
+ 		return;
+ 	kzfree(drbg->Vbuf);
++	drbg->Vbuf = NULL;
+ 	drbg->V = NULL;
+ 	kzfree(drbg->Cbuf);
++	drbg->Cbuf = NULL;
+ 	drbg->C = NULL;
+ 	kzfree(drbg->scratchpadbuf);
+ 	drbg->scratchpadbuf = NULL;
+diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
+index 594c228d2f02..4a3ac31c07d0 100644
+--- a/drivers/amba/bus.c
++++ b/drivers/amba/bus.c
+@@ -69,11 +69,12 @@ static ssize_t driver_override_show(struct device *_dev,
+ 				    struct device_attribute *attr, char *buf)
+ {
+ 	struct amba_device *dev = to_amba_device(_dev);
++	ssize_t len;
+ 
+-	if (!dev->driver_override)
+-		return 0;
+-
+-	return sprintf(buf, "%s\n", dev->driver_override);
++	device_lock(_dev);
++	len = sprintf(buf, "%s\n", dev->driver_override);
++	device_unlock(_dev);
++	return len;
+ }
+ 
+ static ssize_t driver_override_store(struct device *_dev,
+@@ -81,9 +82,10 @@ static ssize_t driver_override_store(struct device *_dev,
+ 				     const char *buf, size_t count)
+ {
+ 	struct amba_device *dev = to_amba_device(_dev);
+-	char *driver_override, *old = dev->driver_override, *cp;
++	char *driver_override, *old, *cp;
+ 
+-	if (count > PATH_MAX)
++	/* We need to keep extra room for a newline */
++	if (count >= (PAGE_SIZE - 1))
+ 		return -EINVAL;
+ 
+ 	driver_override = kstrndup(buf, count, GFP_KERNEL);
+@@ -94,12 +96,15 @@ static ssize_t driver_override_store(struct device *_dev,
+ 	if (cp)
+ 		*cp = '\0';
+ 
++	device_lock(_dev);
++	old = dev->driver_override;
+ 	if (strlen(driver_override)) {
+ 		dev->driver_override = driver_override;
+ 	} else {
+ 	       kfree(driver_override);
+ 	       dev->driver_override = NULL;
+ 	}
++	device_unlock(_dev);
+ 
+ 	kfree(old);
+ 
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 764b63a5aade..e578eee31589 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2839,6 +2839,14 @@ static void binder_transaction(struct binder_proc *proc,
+ 			else
+ 				return_error = BR_DEAD_REPLY;
+ 			mutex_unlock(&context->context_mgr_node_lock);
++			if (target_node && target_proc == proc) {
++				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
++						  proc->pid, thread->pid);
++				return_error = BR_FAILED_REPLY;
++				return_error_param = -EINVAL;
++				return_error_line = __LINE__;
++				goto err_invalid_target_handle;
++			}
+ 		}
+ 		if (!target_node) {
+ 			/*
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 38729baed6ee..8f4e11842c60 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -261,6 +261,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/workqueue.h>
+ #include <linux/irq.h>
++#include <linux/ratelimit.h>
+ #include <linux/syscalls.h>
+ #include <linux/completion.h>
+ #include <linux/uuid.h>
+@@ -438,6 +439,16 @@ static void _crng_backtrack_protect(struct crng_state *crng,
+ static void process_random_ready_list(void);
+ static void _get_random_bytes(void *buf, int nbytes);
+ 
++static struct ratelimit_state unseeded_warning =
++	RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
++static struct ratelimit_state urandom_warning =
++	RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
++
++static int ratelimit_disable __read_mostly;
++
++module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
++MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
++
+ /**********************************************************************
+  *
+  * OS independent entropy store.   Here are the functions which handle
+@@ -787,6 +798,39 @@ static void crng_initialize(struct crng_state *crng)
+ 	crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+ }
+ 
++#ifdef CONFIG_NUMA
++static void do_numa_crng_init(struct work_struct *work)
++{
++	int i;
++	struct crng_state *crng;
++	struct crng_state **pool;
++
++	pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
++	for_each_online_node(i) {
++		crng = kmalloc_node(sizeof(struct crng_state),
++				    GFP_KERNEL | __GFP_NOFAIL, i);
++		spin_lock_init(&crng->lock);
++		crng_initialize(crng);
++		pool[i] = crng;
++	}
++	mb();
++	if (cmpxchg(&crng_node_pool, NULL, pool)) {
++		for_each_node(i)
++			kfree(pool[i]);
++		kfree(pool);
++	}
++}
++
++static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
++
++static void numa_crng_init(void)
++{
++	schedule_work(&numa_crng_init_work);
++}
++#else
++static void numa_crng_init(void) {}
++#endif
++
+ /*
+  * crng_fast_load() can be called by code in the interrupt service
+  * path.  So we can't afford to dilly-dally.
+@@ -893,10 +937,23 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
+ 	spin_unlock_irqrestore(&crng->lock, flags);
+ 	if (crng == &primary_crng && crng_init < 2) {
+ 		invalidate_batched_entropy();
++		numa_crng_init();
+ 		crng_init = 2;
+ 		process_random_ready_list();
+ 		wake_up_interruptible(&crng_init_wait);
+ 		pr_notice("random: crng init done\n");
++		if (unseeded_warning.missed) {
++			pr_notice("random: %d get_random_xx warning(s) missed "
++				  "due to ratelimiting\n",
++				  unseeded_warning.missed);
++			unseeded_warning.missed = 0;
++		}
++		if (urandom_warning.missed) {
++			pr_notice("random: %d urandom warning(s) missed "
++				  "due to ratelimiting\n",
++				  urandom_warning.missed);
++			urandom_warning.missed = 0;
++		}
+ 	}
+ }
+ 
+@@ -1540,8 +1597,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
+ #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+ 	print_once = true;
+ #endif
+-	pr_notice("random: %s called from %pS with crng_init=%d\n",
+-		  func_name, caller, crng_init);
++	if (__ratelimit(&unseeded_warning))
++		pr_notice("random: %s called from %pS with crng_init=%d\n",
++			  func_name, caller, crng_init);
+ }
+ 
+ /*
+@@ -1731,29 +1789,14 @@ static void init_std_data(struct entropy_store *r)
+  */
+ static int rand_initialize(void)
+ {
+-#ifdef CONFIG_NUMA
+-	int i;
+-	struct crng_state *crng;
+-	struct crng_state **pool;
+-#endif
+-
+ 	init_std_data(&input_pool);
+ 	init_std_data(&blocking_pool);
+ 	crng_initialize(&primary_crng);
+ 	crng_global_init_time = jiffies;
+-
+-#ifdef CONFIG_NUMA
+-	pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+-	for_each_online_node(i) {
+-		crng = kmalloc_node(sizeof(struct crng_state),
+-				    GFP_KERNEL | __GFP_NOFAIL, i);
+-		spin_lock_init(&crng->lock);
+-		crng_initialize(crng);
+-		pool[i] = crng;
++	if (ratelimit_disable) {
++		urandom_warning.interval = 0;
++		unseeded_warning.interval = 0;
+ 	}
+-	mb();
+-	crng_node_pool = pool;
+-#endif
+ 	return 0;
+ }
+ early_initcall(rand_initialize);
+@@ -1821,9 +1864,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+ 
+ 	if (!crng_ready() && maxwarn > 0) {
+ 		maxwarn--;
+-		printk(KERN_NOTICE "random: %s: uninitialized urandom read "
+-		       "(%zd bytes read)\n",
+-		       current->comm, nbytes);
++		if (__ratelimit(&urandom_warning))
++			printk(KERN_NOTICE "random: %s: uninitialized "
++			       "urandom read (%zd bytes read)\n",
++			       current->comm, nbytes);
+ 		spin_lock_irqsave(&primary_crng.lock, flags);
+ 		crng_init_cnt = 0;
+ 		spin_unlock_irqrestore(&primary_crng.lock, flags);
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 468f06134012..21085515814f 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void)
+ 	}
+ }
+ 
+-static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
++static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
+ 				     int pages)
+ {
+ 	struct port_buffer *buf;
+@@ -445,16 +445,16 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
+ 		return buf;
+ 	}
+ 
+-	if (is_rproc_serial(vq->vdev)) {
++	if (is_rproc_serial(vdev)) {
+ 		/*
+ 		 * Allocate DMA memory from ancestor. When a virtio
+ 		 * device is created by remoteproc, the DMA memory is
+ 		 * associated with the grandparent device:
+ 		 * vdev => rproc => platform-dev.
+ 		 */
+-		if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
++		if (!vdev->dev.parent || !vdev->dev.parent->parent)
+ 			goto free_buf;
+-		buf->dev = vq->vdev->dev.parent->parent;
++		buf->dev = vdev->dev.parent->parent;
+ 
+ 		/* Increase device refcnt to avoid freeing it */
+ 		get_device(buf->dev);
+@@ -838,7 +838,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
+ 
+ 	count = min((size_t)(32 * 1024), count);
+ 
+-	buf = alloc_buf(port->out_vq, count, 0);
++	buf = alloc_buf(port->portdev->vdev, count, 0);
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
+@@ -957,7 +957,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
+ 	if (ret < 0)
+ 		goto error_out;
+ 
+-	buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
++	buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
+ 	if (!buf) {
+ 		ret = -ENOMEM;
+ 		goto error_out;
+@@ -1374,7 +1374,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+ 
+ 	nr_added_bufs = 0;
+ 	do {
+-		buf = alloc_buf(vq, PAGE_SIZE, 0);
++		buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
+ 		if (!buf)
+ 			break;
+ 
+@@ -1402,7 +1402,6 @@ static int add_port(struct ports_device *portdev, u32 id)
+ {
+ 	char debugfs_name[16];
+ 	struct port *port;
+-	struct port_buffer *buf;
+ 	dev_t devt;
+ 	unsigned int nr_added_bufs;
+ 	int err;
+@@ -1513,8 +1512,6 @@ static int add_port(struct ports_device *portdev, u32 id)
+ 	return 0;
+ 
+ free_inbufs:
+-	while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
+-		free_buf(buf, true);
+ free_device:
+ 	device_destroy(pdrvdata.class, port->dev->devt);
+ free_cdev:
+@@ -1539,34 +1536,14 @@ static void remove_port(struct kref *kref)
+ 
+ static void remove_port_data(struct port *port)
+ {
+-	struct port_buffer *buf;
+-
+ 	spin_lock_irq(&port->inbuf_lock);
+ 	/* Remove unused data this port might have received. */
+ 	discard_port_data(port);
+ 	spin_unlock_irq(&port->inbuf_lock);
+ 
+-	/* Remove buffers we queued up for the Host to send us data in. */
+-	do {
+-		spin_lock_irq(&port->inbuf_lock);
+-		buf = virtqueue_detach_unused_buf(port->in_vq);
+-		spin_unlock_irq(&port->inbuf_lock);
+-		if (buf)
+-			free_buf(buf, true);
+-	} while (buf);
+-
+ 	spin_lock_irq(&port->outvq_lock);
+ 	reclaim_consumed_buffers(port);
+ 	spin_unlock_irq(&port->outvq_lock);
+-
+-	/* Free pending buffers from the out-queue. */
+-	do {
+-		spin_lock_irq(&port->outvq_lock);
+-		buf = virtqueue_detach_unused_buf(port->out_vq);
+-		spin_unlock_irq(&port->outvq_lock);
+-		if (buf)
+-			free_buf(buf, true);
+-	} while (buf);
+ }
+ 
+ /*
+@@ -1791,13 +1768,24 @@ static void control_work_handler(struct work_struct *work)
+ 	spin_unlock(&portdev->c_ivq_lock);
+ }
+ 
++static void flush_bufs(struct virtqueue *vq, bool can_sleep)
++{
++	struct port_buffer *buf;
++	unsigned int len;
++
++	while ((buf = virtqueue_get_buf(vq, &len)))
++		free_buf(buf, can_sleep);
++}
++
+ static void out_intr(struct virtqueue *vq)
+ {
+ 	struct port *port;
+ 
+ 	port = find_port_by_vq(vq->vdev->priv, vq);
+-	if (!port)
++	if (!port) {
++		flush_bufs(vq, false);
+ 		return;
++	}
+ 
+ 	wake_up_interruptible(&port->waitqueue);
+ }
+@@ -1808,8 +1796,10 @@ static void in_intr(struct virtqueue *vq)
+ 	unsigned long flags;
+ 
+ 	port = find_port_by_vq(vq->vdev->priv, vq);
+-	if (!port)
++	if (!port) {
++		flush_bufs(vq, false);
+ 		return;
++	}
+ 
+ 	spin_lock_irqsave(&port->inbuf_lock, flags);
+ 	port->inbuf = get_inbuf(port);
+@@ -1984,24 +1974,54 @@ static const struct file_operations portdev_fops = {
+ 
+ static void remove_vqs(struct ports_device *portdev)
+ {
++	struct virtqueue *vq;
++
++	virtio_device_for_each_vq(portdev->vdev, vq) {
++		struct port_buffer *buf;
++
++		flush_bufs(vq, true);
++		while ((buf = virtqueue_detach_unused_buf(vq)))
++			free_buf(buf, true);
++	}
+ 	portdev->vdev->config->del_vqs(portdev->vdev);
+ 	kfree(portdev->in_vqs);
+ 	kfree(portdev->out_vqs);
+ }
+ 
+-static void remove_controlq_data(struct ports_device *portdev)
++static void virtcons_remove(struct virtio_device *vdev)
+ {
+-	struct port_buffer *buf;
+-	unsigned int len;
++	struct ports_device *portdev;
++	struct port *port, *port2;
+ 
+-	if (!use_multiport(portdev))
+-		return;
++	portdev = vdev->priv;
+ 
+-	while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
+-		free_buf(buf, true);
++	spin_lock_irq(&pdrvdata_lock);
++	list_del(&portdev->list);
++	spin_unlock_irq(&pdrvdata_lock);
+ 
+-	while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
+-		free_buf(buf, true);
++	/* Disable interrupts for vqs */
++	vdev->config->reset(vdev);
++	/* Finish up work that's lined up */
++	if (use_multiport(portdev))
++		cancel_work_sync(&portdev->control_work);
++	else
++		cancel_work_sync(&portdev->config_work);
++
++	list_for_each_entry_safe(port, port2, &portdev->ports, list)
++		unplug_port(port);
++
++	unregister_chrdev(portdev->chr_major, "virtio-portsdev");
++
++	/*
++	 * When yanking out a device, we immediately lose the
++	 * (device-side) queues.  So there's no point in keeping the
++	 * guest side around till we drop our final reference.  This
++	 * also means that any ports which are in an open state will
++	 * have to just stop using the port, as the vqs are going
++	 * away.
++	 */
++	remove_vqs(portdev);
++	kfree(portdev);
+ }
+ 
+ /*
+@@ -2070,6 +2090,7 @@ static int virtcons_probe(struct virtio_device *vdev)
+ 
+ 	spin_lock_init(&portdev->ports_lock);
+ 	INIT_LIST_HEAD(&portdev->ports);
++	INIT_LIST_HEAD(&portdev->list);
+ 
+ 	virtio_device_ready(portdev->vdev);
+ 
+@@ -2087,8 +2108,15 @@ static int virtcons_probe(struct virtio_device *vdev)
+ 		if (!nr_added_bufs) {
+ 			dev_err(&vdev->dev,
+ 				"Error allocating buffers for control queue\n");
+-			err = -ENOMEM;
+-			goto free_vqs;
++			/*
++			 * The host might want to notify mgmt sw about device
++			 * add failure.
++			 */
++			__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
++					   VIRTIO_CONSOLE_DEVICE_READY, 0);
++			/* Device was functional: we need full cleanup. */
++			virtcons_remove(vdev);
++			return -ENOMEM;
+ 		}
+ 	} else {
+ 		/*
+@@ -2119,11 +2147,6 @@ static int virtcons_probe(struct virtio_device *vdev)
+ 
+ 	return 0;
+ 
+-free_vqs:
+-	/* The host might want to notify mgmt sw about device add failure */
+-	__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+-			   VIRTIO_CONSOLE_DEVICE_READY, 0);
+-	remove_vqs(portdev);
+ free_chrdev:
+ 	unregister_chrdev(portdev->chr_major, "virtio-portsdev");
+ free:
+@@ -2132,43 +2155,6 @@ static int virtcons_probe(struct virtio_device *vdev)
+ 	return err;
+ }
+ 
+-static void virtcons_remove(struct virtio_device *vdev)
+-{
+-	struct ports_device *portdev;
+-	struct port *port, *port2;
+-
+-	portdev = vdev->priv;
+-
+-	spin_lock_irq(&pdrvdata_lock);
+-	list_del(&portdev->list);
+-	spin_unlock_irq(&pdrvdata_lock);
+-
+-	/* Disable interrupts for vqs */
+-	vdev->config->reset(vdev);
+-	/* Finish up work that's lined up */
+-	if (use_multiport(portdev))
+-		cancel_work_sync(&portdev->control_work);
+-	else
+-		cancel_work_sync(&portdev->config_work);
+-
+-	list_for_each_entry_safe(port, port2, &portdev->ports, list)
+-		unplug_port(port);
+-
+-	unregister_chrdev(portdev->chr_major, "virtio-portsdev");
+-
+-	/*
+-	 * When yanking out a device, we immediately lose the
+-	 * (device-side) queues.  So there's no point in keeping the
+-	 * guest side around till we drop our final reference.  This
+-	 * also means that any ports which are in an open state will
+-	 * have to just stop using the port, as the vqs are going
+-	 * away.
+-	 */
+-	remove_controlq_data(portdev);
+-	remove_vqs(portdev);
+-	kfree(portdev);
+-}
+-
+ static struct virtio_device_id id_table[] = {
+ 	{ VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
+ 	{ 0 },
+@@ -2209,7 +2195,6 @@ static int virtcons_freeze(struct virtio_device *vdev)
+ 	 */
+ 	if (use_multiport(portdev))
+ 		virtqueue_disable_cb(portdev->c_ivq);
+-	remove_controlq_data(portdev);
+ 
+ 	list_for_each_entry(port, &portdev->ports, list) {
+ 		virtqueue_disable_cb(port->in_vq);
+diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
+index 29cdec198657..422e1fc38b43 100644
+--- a/drivers/cpufreq/powernv-cpufreq.c
++++ b/drivers/cpufreq/powernv-cpufreq.c
+@@ -679,6 +679,16 @@ void gpstate_timer_handler(struct timer_list *t)
+ 
+ 	if (!spin_trylock(&gpstates->gpstate_lock))
+ 		return;
++	/*
++	 * If the timer has migrated to the different cpu then bring
++	 * it back to one of the policy->cpus
++	 */
++	if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
++		gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
++		add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
++		spin_unlock(&gpstates->gpstate_lock);
++		return;
++	}
+ 
+ 	/*
+ 	 * If PMCR was last updated was using fast_swtich then
+@@ -718,10 +728,8 @@ void gpstate_timer_handler(struct timer_list *t)
+ 	if (gpstate_idx != gpstates->last_lpstate_idx)
+ 		queue_gpstate_timer(gpstates);
+ 
++	set_pstate(&freq_data);
+ 	spin_unlock(&gpstates->gpstate_lock);
+-
+-	/* Timer may get migrated to a different cpu on cpu hot unplug */
+-	smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
+ }
+ 
+ /*
+diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
+index eb0da6572720..e0459002eb71 100644
+--- a/drivers/crypto/ccp/sp-dev.c
++++ b/drivers/crypto/ccp/sp-dev.c
+@@ -252,12 +252,12 @@ struct sp_device *sp_get_psp_master_device(void)
+ 		goto unlock;
+ 
+ 	list_for_each_entry(i, &sp_units, entry) {
+-		if (i->psp_data)
++		if (i->psp_data && i->get_psp_master_device) {
++			ret = i->get_psp_master_device();
+ 			break;
++		}
+ 	}
+ 
+-	if (i->get_psp_master_device)
+-		ret = i->get_psp_master_device();
+ unlock:
+ 	write_unlock_irqrestore(&sp_unit_lock, flags);
+ 	return ret;
+diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
+index 14f14efdf0d5..06d212a3d49d 100644
+--- a/drivers/fpga/altera-ps-spi.c
++++ b/drivers/fpga/altera-ps-spi.c
+@@ -249,7 +249,7 @@ static int altera_ps_probe(struct spi_device *spi)
+ 
+ 	conf->data = of_id->data;
+ 	conf->spi = spi;
+-	conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH);
++	conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW);
+ 	if (IS_ERR(conf->config)) {
+ 		dev_err(&spi->dev, "Failed to get config gpio: %ld\n",
+ 			PTR_ERR(conf->config));
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 4e694ae9f308..45cc4d572897 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1459,10 +1459,11 @@ static const u32 sgpr_init_compute_shader[] =
+ static const u32 vgpr_init_regs[] =
+ {
+ 	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
+-	mmCOMPUTE_RESOURCE_LIMITS, 0,
++	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
+ 	mmCOMPUTE_NUM_THREAD_X, 256*4,
+ 	mmCOMPUTE_NUM_THREAD_Y, 1,
+ 	mmCOMPUTE_NUM_THREAD_Z, 1,
++	mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
+ 	mmCOMPUTE_PGM_RSRC2, 20,
+ 	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
+ 	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
+@@ -1479,10 +1480,11 @@ static const u32 vgpr_init_regs[] =
+ static const u32 sgpr1_init_regs[] =
+ {
+ 	mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
+-	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
++	mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
+ 	mmCOMPUTE_NUM_THREAD_X, 256*5,
+ 	mmCOMPUTE_NUM_THREAD_Y, 1,
+ 	mmCOMPUTE_NUM_THREAD_Z, 1,
++	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
+ 	mmCOMPUTE_PGM_RSRC2, 20,
+ 	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
+ 	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
+@@ -1503,6 +1505,7 @@ static const u32 sgpr2_init_regs[] =
+ 	mmCOMPUTE_NUM_THREAD_X, 256*5,
+ 	mmCOMPUTE_NUM_THREAD_Y, 1,
+ 	mmCOMPUTE_NUM_THREAD_Z, 1,
++	mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
+ 	mmCOMPUTE_PGM_RSRC2, 20,
+ 	mmCOMPUTE_USER_DATA_0, 0xedcedc00,
+ 	mmCOMPUTE_USER_DATA_1, 0xedcedc01,
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 8a6e6fbc78cd..2e94881d4f7f 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4506,6 +4506,7 @@ static int dm_update_crtcs_state(struct dc *dc,
+ 		struct amdgpu_dm_connector *aconnector = NULL;
+ 		struct drm_connector_state *new_con_state = NULL;
+ 		struct dm_connector_state *dm_conn_state = NULL;
++		struct drm_plane_state *new_plane_state = NULL;
+ 
+ 		new_stream = NULL;
+ 
+@@ -4513,6 +4514,13 @@ static int dm_update_crtcs_state(struct dc *dc,
+ 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ 		acrtc = to_amdgpu_crtc(crtc);
+ 
++		new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
++
++		if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
++			ret = -EINVAL;
++			goto fail;
++		}
++
+ 		aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
+ 
+ 		/* TODO This hack should go away */
+@@ -4685,7 +4693,7 @@ static int dm_update_planes_state(struct dc *dc,
+ 			if (!dm_old_crtc_state->stream)
+ 				continue;
+ 
+-			DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
++			DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
+ 					plane->base.id, old_plane_crtc->base.id);
+ 
+ 			if (!dc_remove_plane_from_context(
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+index 422055080df4..54a25fb048fb 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+@@ -400,14 +400,15 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
+ {
+ 	int src;
+ 	struct irq_list_head *lh;
++	unsigned long irq_table_flags;
+ 	DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
+-
+ 	for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+-
++		DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+ 		/* The handler was removed from the table,
+ 		 * it means it is safe to flush all the 'work'
+ 		 * (because no code can schedule a new one). */
+ 		lh = &adev->dm.irq_handler_list_low_tab[src];
++		DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+ 		flush_work(&lh->work);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 93421dad21bd..160933c16461 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -157,6 +157,11 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
+ 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ 	struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
+ 
++	if (amdgpu_dm_connector->edid) {
++		kfree(amdgpu_dm_connector->edid);
++		amdgpu_dm_connector->edid = NULL;
++	}
++
+ 	drm_encoder_cleanup(&amdgpu_encoder->base);
+ 	kfree(amdgpu_encoder);
+ 	drm_connector_cleanup(connector);
+@@ -183,28 +188,22 @@ static int dm_connector_update_modes(struct drm_connector *connector,
+ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
+ {
+ 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+-	struct edid *edid;
+ 	struct dc_sink *dc_sink;
+ 	struct dc_sink_init_data init_params = {
+ 			.link = aconnector->dc_link,
+ 			.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+ 
++	/* FIXME none of this is safe. we shouldn't touch aconnector here in
++	 * atomic_check
++	 */
++
+ 	/*
+ 	 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
+ 	 */
+ 	if (!aconnector->port || !aconnector->port->aux.ddc.algo)
+ 		return;
+ 
+-	edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+-
+-	if (!edid) {
+-		drm_mode_connector_update_edid_property(
+-			&aconnector->base,
+-			NULL);
+-		return;
+-	}
+-
+-	aconnector->edid = edid;
++	ASSERT(aconnector->edid);
+ 
+ 	dc_sink = dc_link_add_remote_sink(
+ 		aconnector->dc_link,
+@@ -217,9 +216,6 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
+ 
+ 	amdgpu_dm_add_sink_to_freesync_module(
+ 			connector, aconnector->edid);
+-
+-	drm_mode_connector_update_edid_property(
+-					&aconnector->base, aconnector->edid);
+ }
+ 
+ static int dm_dp_mst_get_modes(struct drm_connector *connector)
+@@ -426,14 +422,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ 		dc_sink_release(aconnector->dc_sink);
+ 		aconnector->dc_sink = NULL;
+ 	}
+-	if (aconnector->edid) {
+-		kfree(aconnector->edid);
+-		aconnector->edid = NULL;
+-	}
+-
+-	drm_mode_connector_update_edid_property(
+-			&aconnector->base,
+-			NULL);
+ 
+ 	aconnector->mst_connected = false;
+ }
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 4f751a9d71a3..2368ad0b3f4d 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -4450,6 +4450,7 @@ drm_reset_display_info(struct drm_connector *connector)
+ 	info->max_tmds_clock = 0;
+ 	info->dvi_dual = false;
+ 	info->has_hdmi_infoframe = false;
++	memset(&info->hdmi, 0, sizeof(info->hdmi));
+ 
+ 	info->non_desktop = 0;
+ }
+@@ -4461,17 +4462,11 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
+ 
+ 	u32 quirks = edid_get_quirks(edid);
+ 
++	drm_reset_display_info(connector);
++
+ 	info->width_mm = edid->width_cm * 10;
+ 	info->height_mm = edid->height_cm * 10;
+ 
+-	/* driver figures it out in this case */
+-	info->bpc = 0;
+-	info->color_formats = 0;
+-	info->cea_rev = 0;
+-	info->max_tmds_clock = 0;
+-	info->dvi_dual = false;
+-	info->has_hdmi_infoframe = false;
+-
+ 	info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+ 
+ 	DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
+diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
+index 1704c8897afd..fd58647fbff3 100644
+--- a/drivers/gpu/drm/i915/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/intel_cdclk.c
+@@ -1946,10 +1946,22 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
+ 		}
+ 	}
+ 
+-	/* According to BSpec, "The CD clock frequency must be at least twice
++	/*
++	 * According to BSpec, "The CD clock frequency must be at least twice
+ 	 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
++	 *
++	 * FIXME: Check the actual, not default, BCLK being used.
++	 *
++	 * FIXME: This does not depend on ->has_audio because the higher CDCLK
++	 * is required for audio probe, also when there are no audio capable
++	 * displays connected at probe time. This leads to unnecessarily high
++	 * CDCLK when audio is not required.
++	 *
++	 * FIXME: This limit is only applied when there are displays connected
++	 * at probe time. If we probe without displays, we'll still end up using
++	 * the platform minimum CDCLK, failing audio probe.
+ 	 */
+-	if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
++	if (INTEL_GEN(dev_priv) >= 9)
+ 		min_cdclk = max(2 * 96000, min_cdclk);
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
+index da48af11eb6b..0cf33034a8ba 100644
+--- a/drivers/gpu/drm/i915/intel_fbdev.c
++++ b/drivers/gpu/drm/i915/intel_fbdev.c
+@@ -801,7 +801,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
+ 		return;
+ 
+ 	intel_fbdev_sync(ifbdev);
+-	if (ifbdev->vma)
++	if (ifbdev->vma || ifbdev->helper.deferred_setup)
+ 		drm_fb_helper_hotplug_event(&ifbdev->helper);
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
+index d758da6156a8..9faee4875ddf 100644
+--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
++++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
+@@ -624,19 +624,18 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
+ 
+ 	DRM_DEBUG_KMS("Enabling DC6\n");
+ 
+-	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
++	/* Wa Display #1183: skl,kbl,cfl */
++	if (IS_GEN9_BC(dev_priv))
++		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
++			   SKL_SELECT_ALTERNATE_DC_EXIT);
+ 
++	gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ }
+ 
+ void skl_disable_dc6(struct drm_i915_private *dev_priv)
+ {
+ 	DRM_DEBUG_KMS("Disabling DC6\n");
+ 
+-	/* Wa Display #1183: skl,kbl,cfl */
+-	if (IS_GEN9_BC(dev_priv))
+-		I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+-			   SKL_SELECT_ALTERNATE_DC_EXIT);
+-
+ 	gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ }
+ 
+diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
+index 9eb96fb2c147..26a2da1f712d 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
+@@ -291,7 +291,7 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
+ 	ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
+ 	if (ret == -ENOSPC) {
+ 		spin_unlock(&vgdev->ctrlq.qlock);
+-		wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
++		wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
+ 		spin_lock(&vgdev->ctrlq.qlock);
+ 		goto retry;
+ 	} else {
+@@ -366,7 +366,7 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
+ 	ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
+ 	if (ret == -ENOSPC) {
+ 		spin_unlock(&vgdev->cursorq.qlock);
+-		wait_event(vgdev->cursorq.ack_queue, vq->num_free);
++		wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
+ 		spin_lock(&vgdev->cursorq.qlock);
+ 		goto retry;
+ 	} else {
+diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
+index 5e1b68cbcd0a..e1b603ca0170 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0001.c
++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
+@@ -45,6 +45,7 @@
+ #define I82802AB	0x00ad
+ #define I82802AC	0x00ac
+ #define PF38F4476	0x881c
++#define M28F00AP30	0x8963
+ /* STMicroelectronics chips */
+ #define M50LPW080       0x002F
+ #define M50FLW080A	0x0080
+@@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
+ 		extp->MinorVersion = '1';
+ }
+ 
++static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
++{
++	/*
++	 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
++	 * Erase Supend for their small Erase Blocks(0x8000)
++	 */
++	if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
++		return 1;
++	return 0;
++}
++
+ static inline struct cfi_pri_intelext *
+ read_pri_intelext(struct map_info *map, __u16 adr)
+ {
+@@ -831,21 +843,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
+ 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
+ 			goto sleep;
+ 
++		/* Do not allow suspend iff read/write to EB address */
++		if ((adr & chip->in_progress_block_mask) ==
++		    chip->in_progress_block_addr)
++			goto sleep;
++
++		/* do not suspend small EBs, buggy Micron Chips */
++		if (cfi_is_micron_28F00AP30(cfi, chip) &&
++		    (chip->in_progress_block_mask == ~(0x8000-1)))
++			goto sleep;
+ 
+ 		/* Erase suspend */
+-		map_write(map, CMD(0xB0), adr);
++		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
+ 
+ 		/* If the flash has finished erasing, then 'erase suspend'
+ 		 * appears to make some (28F320) flash devices switch to
+ 		 * 'read' mode.  Make sure that we switch to 'read status'
+ 		 * mode so we get the right data. --rmk
+ 		 */
+-		map_write(map, CMD(0x70), adr);
++		map_write(map, CMD(0x70), chip->in_progress_block_addr);
+ 		chip->oldstate = FL_ERASING;
+ 		chip->state = FL_ERASE_SUSPENDING;
+ 		chip->erase_suspended = 1;
+ 		for (;;) {
+-			status = map_read(map, adr);
++			status = map_read(map, chip->in_progress_block_addr);
+ 			if (map_word_andequal(map, status, status_OK, status_OK))
+ 			        break;
+ 
+@@ -1041,8 +1062,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
+ 		   sending the 0x70 (Read Status) command to an erasing
+ 		   chip and expecting it to be ignored, that's what we
+ 		   do. */
+-		map_write(map, CMD(0xd0), adr);
+-		map_write(map, CMD(0x70), adr);
++		map_write(map, CMD(0xd0), chip->in_progress_block_addr);
++		map_write(map, CMD(0x70), chip->in_progress_block_addr);
+ 		chip->oldstate = FL_READY;
+ 		chip->state = FL_ERASING;
+ 		break;
+@@ -1933,6 +1954,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+ 	map_write(map, CMD(0xD0), adr);
+ 	chip->state = FL_ERASING;
+ 	chip->erase_suspended = 0;
++	chip->in_progress_block_addr = adr;
++	chip->in_progress_block_mask = ~(len - 1);
+ 
+ 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
+ 				   adr, len,
+diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
+index 56aa6b75213d..d524a64ed754 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -816,9 +816,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+ 		    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
+ 			goto sleep;
+ 
+-		/* We could check to see if we're trying to access the sector
+-		 * that is currently being erased. However, no user will try
+-		 * anything like that so we just wait for the timeout. */
++		/* Do not allow suspend iff read/write to EB address */
++		if ((adr & chip->in_progress_block_mask) ==
++		    chip->in_progress_block_addr)
++			goto sleep;
+ 
+ 		/* Erase suspend */
+ 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
+@@ -2267,6 +2268,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
+ 	chip->state = FL_ERASING;
+ 	chip->erase_suspended = 0;
+ 	chip->in_progress_block_addr = adr;
++	chip->in_progress_block_mask = ~(map->size - 1);
+ 
+ 	INVALIDATE_CACHE_UDELAY(map, chip,
+ 				adr, map->size,
+@@ -2356,6 +2358,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+ 	chip->state = FL_ERASING;
+ 	chip->erase_suspended = 0;
+ 	chip->in_progress_block_addr = adr;
++	chip->in_progress_block_mask = ~(len - 1);
+ 
+ 	INVALIDATE_CACHE_UDELAY(map, chip,
+ 				adr, len,
+diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
+index 2196f2a233d6..795f868fe1f7 100644
+--- a/drivers/mtd/nand/marvell_nand.c
++++ b/drivers/mtd/nand/marvell_nand.c
+@@ -2277,29 +2277,20 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
+ 	/*
+ 	 * The legacy "num-cs" property indicates the number of CS on the only
+ 	 * chip connected to the controller (legacy bindings does not support
+-	 * more than one chip). CS are only incremented one by one while the RB
+-	 * pin is always the #0.
++	 * more than one chip). The CS and RB pins are always the #0.
+ 	 *
+ 	 * When not using legacy bindings, a couple of "reg" and "nand-rb"
+ 	 * properties must be filled. For each chip, expressed as a subnode,
+ 	 * "reg" points to the CS lines and "nand-rb" to the RB line.
+ 	 */
+-	if (pdata) {
++	if (pdata || nfc->caps->legacy_of_bindings) {
+ 		nsels = 1;
+-	} else if (nfc->caps->legacy_of_bindings &&
+-		   !of_get_property(np, "num-cs", &nsels)) {
+-		dev_err(dev, "missing num-cs property\n");
+-		return -EINVAL;
+-	} else if (!of_get_property(np, "reg", &nsels)) {
+-		dev_err(dev, "missing reg property\n");
+-		return -EINVAL;
+-	}
+-
+-	if (!pdata)
+-		nsels /= sizeof(u32);
+-	if (!nsels) {
+-		dev_err(dev, "invalid reg property size\n");
+-		return -EINVAL;
++	} else {
++		nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
++		if (nsels <= 0) {
++			dev_err(dev, "missing/invalid reg property\n");
++			return -EINVAL;
++		}
+ 	}
+ 
+ 	/* Alloc the nand chip structure */
+diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
+index c5bee00b7f5e..76761b841f1f 100644
+--- a/drivers/mtd/nand/tango_nand.c
++++ b/drivers/mtd/nand/tango_nand.c
+@@ -643,7 +643,7 @@ static int tango_nand_probe(struct platform_device *pdev)
+ 
+ 	writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
+ 
+-	clk = clk_get(&pdev->dev, NULL);
++	clk = devm_clk_get(&pdev->dev, NULL);
+ 	if (IS_ERR(clk))
+ 		return PTR_ERR(clk);
+ 
+diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
+index 4b8e9183489a..5872f31eaa60 100644
+--- a/drivers/mtd/spi-nor/cadence-quadspi.c
++++ b/drivers/mtd/spi-nor/cadence-quadspi.c
+@@ -501,7 +501,9 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
+ 	void __iomem *reg_base = cqspi->iobase;
+ 	void __iomem *ahb_base = cqspi->ahb_base;
+ 	unsigned int remaining = n_rx;
++	unsigned int mod_bytes = n_rx % 4;
+ 	unsigned int bytes_to_read = 0;
++	u8 *rxbuf_end = rxbuf + n_rx;
+ 	int ret = 0;
+ 
+ 	writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
+@@ -530,11 +532,24 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
+ 		}
+ 
+ 		while (bytes_to_read != 0) {
++			unsigned int word_remain = round_down(remaining, 4);
++
+ 			bytes_to_read *= cqspi->fifo_width;
+ 			bytes_to_read = bytes_to_read > remaining ?
+ 					remaining : bytes_to_read;
+-			ioread32_rep(ahb_base, rxbuf,
+-				     DIV_ROUND_UP(bytes_to_read, 4));
++			bytes_to_read = round_down(bytes_to_read, 4);
++			/* Read 4 byte word chunks then single bytes */
++			if (bytes_to_read) {
++				ioread32_rep(ahb_base, rxbuf,
++					     (bytes_to_read / 4));
++			} else if (!word_remain && mod_bytes) {
++				unsigned int temp = ioread32(ahb_base);
++
++				bytes_to_read = mod_bytes;
++				memcpy(rxbuf, &temp, min((unsigned int)
++							 (rxbuf_end - rxbuf),
++							 bytes_to_read));
++			}
+ 			rxbuf += bytes_to_read;
+ 			remaining -= bytes_to_read;
+ 			bytes_to_read = cqspi_get_rd_sram_level(cqspi);
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index 84aa9d676375..6da20b9688f7 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -942,7 +942,7 @@ int __init early_init_dt_scan_chosen_stdout(void)
+ 	int offset;
+ 	const char *p, *q, *options = NULL;
+ 	int l;
+-	const struct earlycon_id *match;
++	const struct earlycon_id **p_match;
+ 	const void *fdt = initial_boot_params;
+ 
+ 	offset = fdt_path_offset(fdt, "/chosen");
+@@ -969,7 +969,10 @@ int __init early_init_dt_scan_chosen_stdout(void)
+ 		return 0;
+ 	}
+ 
+-	for (match = __earlycon_table; match < __earlycon_table_end; match++) {
++	for (p_match = __earlycon_table; p_match < __earlycon_table_end;
++	     p_match++) {
++		const struct earlycon_id *match = *p_match;
++
+ 		if (!match->compatible[0])
+ 			continue;
+ 
+diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
+index b04d37b3c5de..9abf549631b4 100644
+--- a/drivers/pci/host/pci-aardvark.c
++++ b/drivers/pci/host/pci-aardvark.c
+@@ -29,6 +29,7 @@
+ #define     PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT	5
+ #define     PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE		(0 << 11)
+ #define     PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT	12
++#define     PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ		0x2
+ #define PCIE_CORE_LINK_CTRL_STAT_REG				0xd0
+ #define     PCIE_CORE_LINK_L0S_ENTRY				BIT(0)
+ #define     PCIE_CORE_LINK_TRAINING				BIT(5)
+@@ -100,7 +101,8 @@
+ #define PCIE_ISR1_MASK_REG			(CONTROL_BASE_ADDR + 0x4C)
+ #define     PCIE_ISR1_POWER_STATE_CHANGE	BIT(4)
+ #define     PCIE_ISR1_FLUSH			BIT(5)
+-#define     PCIE_ISR1_ALL_MASK			GENMASK(5, 4)
++#define     PCIE_ISR1_INTX_ASSERT(val)		BIT(8 + (val))
++#define     PCIE_ISR1_ALL_MASK			GENMASK(11, 4)
+ #define PCIE_MSI_ADDR_LOW_REG			(CONTROL_BASE_ADDR + 0x50)
+ #define PCIE_MSI_ADDR_HIGH_REG			(CONTROL_BASE_ADDR + 0x54)
+ #define PCIE_MSI_STATUS_REG			(CONTROL_BASE_ADDR + 0x58)
+@@ -172,8 +174,6 @@
+ #define PCIE_CONFIG_WR_TYPE0			0xa
+ #define PCIE_CONFIG_WR_TYPE1			0xb
+ 
+-/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
+-#define PCIE_BDF(dev)				(dev << 4)
+ #define PCIE_CONF_BUS(bus)			(((bus) & 0xff) << 20)
+ #define PCIE_CONF_DEV(dev)			(((dev) & 0x1f) << 15)
+ #define PCIE_CONF_FUNC(fun)			(((fun) & 0x7)	<< 12)
+@@ -296,7 +296,8 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
+ 	reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
+ 		(7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
+ 		PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
+-		PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT;
++		(PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
++		 PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
+ 	advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
+ 
+ 	/* Program PCIe Control 2 to disable strict ordering */
+@@ -437,7 +438,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
+ 	u32 reg;
+ 	int ret;
+ 
+-	if (PCI_SLOT(devfn) != 0) {
++	if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
+ 		*val = 0xffffffff;
+ 		return PCIBIOS_DEVICE_NOT_FOUND;
+ 	}
+@@ -456,7 +457,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
+ 	advk_writel(pcie, reg, PIO_CTRL);
+ 
+ 	/* Program the address registers */
+-	reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where);
++	reg = PCIE_CONF_ADDR(bus->number, devfn, where);
+ 	advk_writel(pcie, reg, PIO_ADDR_LS);
+ 	advk_writel(pcie, 0, PIO_ADDR_MS);
+ 
+@@ -491,7 +492,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ 	int offset;
+ 	int ret;
+ 
+-	if (PCI_SLOT(devfn) != 0)
++	if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
+ 		return PCIBIOS_DEVICE_NOT_FOUND;
+ 
+ 	if (where % size)
+@@ -609,9 +610,9 @@ static void advk_pcie_irq_mask(struct irq_data *d)
+ 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ 	u32 mask;
+ 
+-	mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+-	mask |= PCIE_ISR0_INTX_ASSERT(hwirq);
+-	advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
++	mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
++	mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
++	advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+ }
+ 
+ static void advk_pcie_irq_unmask(struct irq_data *d)
+@@ -620,9 +621,9 @@ static void advk_pcie_irq_unmask(struct irq_data *d)
+ 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ 	u32 mask;
+ 
+-	mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+-	mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq);
+-	advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
++	mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
++	mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
++	advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
+ }
+ 
+ static int advk_pcie_irq_map(struct irq_domain *h,
+@@ -765,29 +766,35 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
+ 
+ static void advk_pcie_handle_int(struct advk_pcie *pcie)
+ {
+-	u32 val, mask, status;
++	u32 isr0_val, isr0_mask, isr0_status;
++	u32 isr1_val, isr1_mask, isr1_status;
+ 	int i, virq;
+ 
+-	val = advk_readl(pcie, PCIE_ISR0_REG);
+-	mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+-	status = val & ((~mask) & PCIE_ISR0_ALL_MASK);
++	isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
++	isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
++	isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
++
++	isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
++	isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
++	isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
+ 
+-	if (!status) {
+-		advk_writel(pcie, val, PCIE_ISR0_REG);
++	if (!isr0_status && !isr1_status) {
++		advk_writel(pcie, isr0_val, PCIE_ISR0_REG);
++		advk_writel(pcie, isr1_val, PCIE_ISR1_REG);
+ 		return;
+ 	}
+ 
+ 	/* Process MSI interrupts */
+-	if (status & PCIE_ISR0_MSI_INT_PENDING)
++	if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
+ 		advk_pcie_handle_msi(pcie);
+ 
+ 	/* Process legacy interrupts */
+ 	for (i = 0; i < PCI_NUM_INTX; i++) {
+-		if (!(status & PCIE_ISR0_INTX_ASSERT(i)))
++		if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
+ 			continue;
+ 
+-		advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i),
+-			    PCIE_ISR0_REG);
++		advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
++			    PCIE_ISR1_REG);
+ 
+ 		virq = irq_find_mapping(pcie->irq_domain, i);
+ 		generic_handle_irq(virq);
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 3bed6beda051..eede34e5ada2 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -945,10 +945,11 @@ static int pci_pm_freeze(struct device *dev)
+ 	 * devices should not be touched during freeze/thaw transitions,
+ 	 * however.
+ 	 */
+-	if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
++	if (!dev_pm_smart_suspend_and_suspended(dev)) {
+ 		pm_runtime_resume(dev);
++		pci_dev->state_saved = false;
++	}
+ 
+-	pci_dev->state_saved = false;
+ 	if (pm->freeze) {
+ 		int error;
+ 
+diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
+index 304e891e35fc..60f2250fd96b 100644
+--- a/drivers/rtc/rtc-opal.c
++++ b/drivers/rtc/rtc-opal.c
+@@ -57,7 +57,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
+ 
+ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
+ {
+-	long rc = OPAL_BUSY;
++	s64 rc = OPAL_BUSY;
+ 	int retries = 10;
+ 	u32 y_m_d;
+ 	u64 h_m_s_ms;
+@@ -66,13 +66,17 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
+ 
+ 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ 		rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
+-		if (rc == OPAL_BUSY_EVENT)
++		if (rc == OPAL_BUSY_EVENT) {
++			msleep(OPAL_BUSY_DELAY_MS);
+ 			opal_poll_events(NULL);
+-		else if (retries-- && (rc == OPAL_HARDWARE
+-				       || rc == OPAL_INTERNAL_ERROR))
+-			msleep(10);
+-		else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
+-			break;
++		} else if (rc == OPAL_BUSY) {
++			msleep(OPAL_BUSY_DELAY_MS);
++		} else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
++			if (retries--) {
++				msleep(10); /* Wait 10ms before retry */
++				rc = OPAL_BUSY; /* go around again */
++			}
++		}
+ 	}
+ 
+ 	if (rc != OPAL_SUCCESS)
+@@ -87,21 +91,26 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
+ 
+ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
+ {
+-	long rc = OPAL_BUSY;
++	s64 rc = OPAL_BUSY;
+ 	int retries = 10;
+ 	u32 y_m_d = 0;
+ 	u64 h_m_s_ms = 0;
+ 
+ 	tm_to_opal(tm, &y_m_d, &h_m_s_ms);
++
+ 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ 		rc = opal_rtc_write(y_m_d, h_m_s_ms);
+-		if (rc == OPAL_BUSY_EVENT)
++		if (rc == OPAL_BUSY_EVENT) {
++			msleep(OPAL_BUSY_DELAY_MS);
+ 			opal_poll_events(NULL);
+-		else if (retries-- && (rc == OPAL_HARDWARE
+-				       || rc == OPAL_INTERNAL_ERROR))
+-			msleep(10);
+-		else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
+-			break;
++		} else if (rc == OPAL_BUSY) {
++			msleep(OPAL_BUSY_DELAY_MS);
++		} else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
++			if (retries--) {
++				msleep(10); /* Wait 10ms before retry */
++				rc = OPAL_BUSY; /* go around again */
++			}
++		}
+ 	}
+ 
+ 	return rc == OPAL_SUCCESS ? 0 : -EIO;
+diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
+index c30420c517b1..e96b85579f21 100644
+--- a/drivers/s390/cio/vfio_ccw_fsm.c
++++ b/drivers/s390/cio/vfio_ccw_fsm.c
+@@ -20,12 +20,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
+ 	int ccode;
+ 	__u8 lpm;
+ 	unsigned long flags;
++	int ret;
+ 
+ 	sch = private->sch;
+ 
+ 	spin_lock_irqsave(sch->lock, flags);
+ 	private->state = VFIO_CCW_STATE_BUSY;
+-	spin_unlock_irqrestore(sch->lock, flags);
+ 
+ 	orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
+ 
+@@ -38,10 +38,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
+ 		 * Initialize device status information
+ 		 */
+ 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
+-		return 0;
++		ret = 0;
++		break;
+ 	case 1:		/* Status pending */
+ 	case 2:		/* Busy */
+-		return -EBUSY;
++		ret = -EBUSY;
++		break;
+ 	case 3:		/* Device/path not operational */
+ 	{
+ 		lpm = orb->cmd.lpm;
+@@ -51,13 +53,16 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
+ 			sch->lpm = 0;
+ 
+ 		if (cio_update_schib(sch))
+-			return -ENODEV;
+-
+-		return sch->lpm ? -EACCES : -ENODEV;
++			ret = -ENODEV;
++		else
++			ret = sch->lpm ? -EACCES : -ENODEV;
++		break;
+ 	}
+ 	default:
+-		return ccode;
++		ret = ccode;
+ 	}
++	spin_unlock_irqrestore(sch->lock, flags);
++	return ret;
+ }
+ 
+ static void fsm_notoper(struct vfio_ccw_private *private,
+diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
+index c44d7c7ffc92..1754f55e2fac 100644
+--- a/drivers/sbus/char/oradax.c
++++ b/drivers/sbus/char/oradax.c
+@@ -3,7 +3,7 @@
+  *
+  * This program is free software: you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation, either version 3 of the License, or
++ * the Free Software Foundation, either version 2 of the License, or
+  * (at your option) any later version.
+  *
+  * This program is distributed in the hope that it will be useful,
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 1fa84d6a0f8b..d19b41bcebea 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2121,6 +2121,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
+ 				break;	/* standby */
+ 			if (sshdr.asc == 4 && sshdr.ascq == 0xc)
+ 				break;	/* unavailable */
++			if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
++				break;	/* sanitize in progress */
+ 			/*
+ 			 * Issue command to spin up drive when not ready
+ 			 */
+diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
+index 89cf4498f535..973a497739f0 100644
+--- a/drivers/scsi/sd_zbc.c
++++ b/drivers/scsi/sd_zbc.c
+@@ -400,8 +400,10 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
+  *
+  * Check that all zones of the device are equal. The last zone can however
+  * be smaller. The zone size must also be a power of two number of LBAs.
++ *
++ * Returns the zone size in bytes upon success or an error code upon failure.
+  */
+-static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
++static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+ {
+ 	u64 zone_blocks = 0;
+ 	sector_t block = 0;
+@@ -412,8 +414,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+ 	int ret;
+ 	u8 same;
+ 
+-	sdkp->zone_blocks = 0;
+-
+ 	/* Get a buffer */
+ 	buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
+ 	if (!buf)
+@@ -445,16 +445,17 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+ 
+ 		/* Parse zone descriptors */
+ 		while (rec < buf + buf_len) {
+-			zone_blocks = get_unaligned_be64(&rec[8]);
+-			if (sdkp->zone_blocks == 0) {
+-				sdkp->zone_blocks = zone_blocks;
+-			} else if (zone_blocks != sdkp->zone_blocks &&
+-				   (block + zone_blocks < sdkp->capacity
+-				    || zone_blocks > sdkp->zone_blocks)) {
+-				zone_blocks = 0;
++			u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
++
++			if (zone_blocks == 0) {
++				zone_blocks = this_zone_blocks;
++			} else if (this_zone_blocks != zone_blocks &&
++				   (block + this_zone_blocks < sdkp->capacity
++				    || this_zone_blocks > zone_blocks)) {
++				this_zone_blocks = 0;
+ 				goto out;
+ 			}
+-			block += zone_blocks;
++			block += this_zone_blocks;
+ 			rec += 64;
+ 		}
+ 
+@@ -467,8 +468,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+ 
+ 	} while (block < sdkp->capacity);
+ 
+-	zone_blocks = sdkp->zone_blocks;
+-
+ out:
+ 	if (!zone_blocks) {
+ 		if (sdkp->first_scan)
+@@ -488,8 +487,7 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+ 				  "Zone size too large\n");
+ 		ret = -ENODEV;
+ 	} else {
+-		sdkp->zone_blocks = zone_blocks;
+-		sdkp->zone_shift = ilog2(zone_blocks);
++		ret = zone_blocks;
+ 	}
+ 
+ out_free:
+@@ -500,21 +498,21 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+ 
+ /**
+  * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
+- * @sdkp: The disk of the bitmap
++ * @nr_zones: Number of zones to allocate space for.
++ * @numa_node: NUMA node to allocate the memory from.
+  */
+-static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
++static inline unsigned long *
++sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node)
+ {
+-	struct request_queue *q = sdkp->disk->queue;
+-
+-	return kzalloc_node(BITS_TO_LONGS(sdkp->nr_zones)
+-			    * sizeof(unsigned long),
+-			    GFP_KERNEL, q->node);
++	return kzalloc_node(BITS_TO_LONGS(nr_zones) * sizeof(unsigned long),
++			    GFP_KERNEL, numa_node);
+ }
+ 
+ /**
+  * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
+  * @sdkp: disk used
+  * @buf: report reply buffer
++ * @zone_shift: logarithm base 2 of the number of blocks in a zone
+  * @seq_zone_bitamp: bitmap of sequential zones to set
+  *
+  * Parse reported zone descriptors in @buf to identify sequential zones and
+@@ -524,7 +522,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
+  * Return the LBA after the last zone reported.
+  */
+ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
+-				     unsigned int buflen,
++				     unsigned int buflen, u32 zone_shift,
+ 				     unsigned long *seq_zones_bitmap)
+ {
+ 	sector_t lba, next_lba = sdkp->capacity;
+@@ -543,7 +541,7 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
+ 		if (type != ZBC_ZONE_TYPE_CONV &&
+ 		    cond != ZBC_ZONE_COND_READONLY &&
+ 		    cond != ZBC_ZONE_COND_OFFLINE)
+-			set_bit(lba >> sdkp->zone_shift, seq_zones_bitmap);
++			set_bit(lba >> zone_shift, seq_zones_bitmap);
+ 		next_lba = lba + get_unaligned_be64(&rec[8]);
+ 		rec += 64;
+ 	}
+@@ -552,12 +550,16 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
+ }
+ 
+ /**
+- * sd_zbc_setup_seq_zones_bitmap - Initialize the disk seq zone bitmap.
++ * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
+  * @sdkp: target disk
++ * @zone_shift: logarithm base 2 of the number of blocks in a zone
++ * @nr_zones: number of zones to set up a seq zone bitmap for
+  *
+  * Allocate a zone bitmap and initialize it by identifying sequential zones.
+  */
+-static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
++static unsigned long *
++sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
++			      u32 nr_zones)
+ {
+ 	struct request_queue *q = sdkp->disk->queue;
+ 	unsigned long *seq_zones_bitmap;
+@@ -565,9 +567,9 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
+ 	unsigned char *buf;
+ 	int ret = -ENOMEM;
+ 
+-	seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(sdkp);
++	seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node);
+ 	if (!seq_zones_bitmap)
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
+ 
+ 	buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
+ 	if (!buf)
+@@ -578,7 +580,7 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
+ 		if (ret)
+ 			goto out;
+ 		lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
+-					   seq_zones_bitmap);
++					   zone_shift, seq_zones_bitmap);
+ 	}
+ 
+ 	if (lba != sdkp->capacity) {
+@@ -590,12 +592,9 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
+ 	kfree(buf);
+ 	if (ret) {
+ 		kfree(seq_zones_bitmap);
+-		return ret;
++		return ERR_PTR(ret);
+ 	}
+-
+-	q->seq_zones_bitmap = seq_zones_bitmap;
+-
+-	return 0;
++	return seq_zones_bitmap;
+ }
+ 
+ static void sd_zbc_cleanup(struct scsi_disk *sdkp)
+@@ -611,44 +610,64 @@ static void sd_zbc_cleanup(struct scsi_disk *sdkp)
+ 	q->nr_zones = 0;
+ }
+ 
+-static int sd_zbc_setup(struct scsi_disk *sdkp)
++static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
+ {
+ 	struct request_queue *q = sdkp->disk->queue;
++	u32 zone_shift = ilog2(zone_blocks);
++	u32 nr_zones;
+ 	int ret;
+ 
+-	/* READ16/WRITE16 is mandatory for ZBC disks */
+-	sdkp->device->use_16_for_rw = 1;
+-	sdkp->device->use_10_for_rw = 0;
+-
+ 	/* chunk_sectors indicates the zone size */
+-	blk_queue_chunk_sectors(sdkp->disk->queue,
+-			logical_to_sectors(sdkp->device, sdkp->zone_blocks));
+-	sdkp->nr_zones =
+-		round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
++	blk_queue_chunk_sectors(q,
++			logical_to_sectors(sdkp->device, zone_blocks));
++	nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
+ 
+ 	/*
+ 	 * Initialize the device request queue information if the number
+ 	 * of zones changed.
+ 	 */
+-	if (sdkp->nr_zones != q->nr_zones) {
+-
+-		sd_zbc_cleanup(sdkp);
+-
+-		q->nr_zones = sdkp->nr_zones;
+-		if (sdkp->nr_zones) {
+-			q->seq_zones_wlock = sd_zbc_alloc_zone_bitmap(sdkp);
+-			if (!q->seq_zones_wlock) {
++	if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) {
++		unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
++		size_t zone_bitmap_size;
++
++		if (nr_zones) {
++			seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones,
++								   q->node);
++			if (!seq_zones_wlock) {
+ 				ret = -ENOMEM;
+ 				goto err;
+ 			}
+ 
+-			ret = sd_zbc_setup_seq_zones_bitmap(sdkp);
+-			if (ret) {
+-				sd_zbc_cleanup(sdkp);
++			seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp,
++							zone_shift, nr_zones);
++			if (IS_ERR(seq_zones_bitmap)) {
++				ret = PTR_ERR(seq_zones_bitmap);
++				kfree(seq_zones_wlock);
+ 				goto err;
+ 			}
+ 		}
+-
++		zone_bitmap_size = BITS_TO_LONGS(nr_zones) *
++			sizeof(unsigned long);
++		blk_mq_freeze_queue(q);
++		if (q->nr_zones != nr_zones) {
++			/* READ16/WRITE16 is mandatory for ZBC disks */
++			sdkp->device->use_16_for_rw = 1;
++			sdkp->device->use_10_for_rw = 0;
++
++			sdkp->zone_blocks = zone_blocks;
++			sdkp->zone_shift = zone_shift;
++			sdkp->nr_zones = nr_zones;
++			q->nr_zones = nr_zones;
++			swap(q->seq_zones_wlock, seq_zones_wlock);
++			swap(q->seq_zones_bitmap, seq_zones_bitmap);
++		} else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap,
++				  zone_bitmap_size) != 0) {
++			memcpy(q->seq_zones_bitmap, seq_zones_bitmap,
++			       zone_bitmap_size);
++		}
++		blk_mq_unfreeze_queue(q);
++		kfree(seq_zones_wlock);
++		kfree(seq_zones_bitmap);
+ 	}
+ 
+ 	return 0;
+@@ -660,6 +679,7 @@ static int sd_zbc_setup(struct scsi_disk *sdkp)
+ 
+ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
+ {
++	int64_t zone_blocks;
+ 	int ret;
+ 
+ 	if (!sd_is_zoned(sdkp))
+@@ -696,12 +716,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
+ 	 * Check zone size: only devices with a constant zone size (except
+ 	 * an eventual last runt zone) that is a power of 2 are supported.
+ 	 */
+-	ret = sd_zbc_check_zone_size(sdkp);
+-	if (ret)
++	zone_blocks = sd_zbc_check_zone_size(sdkp);
++	ret = -EFBIG;
++	if (zone_blocks != (u32)zone_blocks)
++		goto err;
++	ret = zone_blocks;
++	if (ret < 0)
+ 		goto err;
+ 
+ 	/* The drive satisfies the kernel restrictions: set it up */
+-	ret = sd_zbc_setup(sdkp);
++	ret = sd_zbc_setup(sdkp, zone_blocks);
+ 	if (ret)
+ 		goto err;
+ 
+diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
+index 884419c37e84..457ea1f8db30 100644
+--- a/drivers/slimbus/messaging.c
++++ b/drivers/slimbus/messaging.c
+@@ -183,7 +183,7 @@ static u16 slim_slicesize(int code)
+ 		0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7
+ 	};
+ 
+-	clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
++	code = clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
+ 
+ 	return sizetocode[code - 1];
+ }
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 3b3e1f6632d7..1dbe27c9946c 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -121,6 +121,9 @@ struct gsm_dlci {
+ 	struct mutex mutex;
+ 
+ 	/* Link layer */
++	int mode;
++#define DLCI_MODE_ABM		0	/* Normal Asynchronous Balanced Mode */
++#define DLCI_MODE_ADM		1	/* Asynchronous Disconnected Mode */
+ 	spinlock_t lock;	/* Protects the internal state */
+ 	struct timer_list t1;	/* Retransmit timer for SABM and UA */
+ 	int retries;
+@@ -1364,7 +1367,13 @@ static struct gsm_control *gsm_control_send(struct gsm_mux *gsm,
+ 	ctrl->data = data;
+ 	ctrl->len = clen;
+ 	gsm->pending_cmd = ctrl;
+-	gsm->cretries = gsm->n2;
++
++	/* If DLCI0 is in ADM mode skip retries, it won't respond */
++	if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
++		gsm->cretries = 1;
++	else
++		gsm->cretries = gsm->n2;
++
+ 	mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
+ 	gsm_control_transmit(gsm, ctrl);
+ 	spin_unlock_irqrestore(&gsm->control_lock, flags);
+@@ -1472,6 +1481,7 @@ static void gsm_dlci_t1(struct timer_list *t)
+ 			if (debug & 8)
+ 				pr_info("DLCI %d opening in ADM mode.\n",
+ 					dlci->addr);
++			dlci->mode = DLCI_MODE_ADM;
+ 			gsm_dlci_open(dlci);
+ 		} else {
+ 			gsm_dlci_close(dlci);
+@@ -2861,11 +2871,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
+ static int gsm_carrier_raised(struct tty_port *port)
+ {
+ 	struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
++	struct gsm_mux *gsm = dlci->gsm;
++
+ 	/* Not yet open so no carrier info */
+ 	if (dlci->state != DLCI_OPEN)
+ 		return 0;
+ 	if (debug & 2)
+ 		return 1;
++
++	/*
++	 * Basic mode with control channel in ADM mode may not respond
++	 * to CMD_MSC at all and modem_rx is empty.
++	 */
++	if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
++	    !dlci->modem_rx)
++		return 1;
++
+ 	return dlci->modem_rx & TIOCM_CD;
+ }
+ 
+diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
+index a24278380fec..22683393a0f2 100644
+--- a/drivers/tty/serial/earlycon.c
++++ b/drivers/tty/serial/earlycon.c
+@@ -169,7 +169,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
+  */
+ int __init setup_earlycon(char *buf)
+ {
+-	const struct earlycon_id *match;
++	const struct earlycon_id **p_match;
+ 
+ 	if (!buf || !buf[0])
+ 		return -EINVAL;
+@@ -177,7 +177,9 @@ int __init setup_earlycon(char *buf)
+ 	if (early_con.flags & CON_ENABLED)
+ 		return -EALREADY;
+ 
+-	for (match = __earlycon_table; match < __earlycon_table_end; match++) {
++	for (p_match = __earlycon_table; p_match < __earlycon_table_end;
++	     p_match++) {
++		const struct earlycon_id *match = *p_match;
+ 		size_t len = strlen(match->name);
+ 
+ 		if (strncmp(buf, match->name, len))
+diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
+index a100e98259d7..03d26aabb0c4 100644
+--- a/drivers/tty/serial/mvebu-uart.c
++++ b/drivers/tty/serial/mvebu-uart.c
+@@ -495,7 +495,6 @@ static void mvebu_uart_set_termios(struct uart_port *port,
+ 		termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
+ 		termios->c_cflag &= CREAD | CBAUD;
+ 		termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
+-		termios->c_lflag = old->c_lflag;
+ 	}
+ 
+ 	spin_unlock_irqrestore(&port->lock, flags);
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 63114ea35ec1..7c838b90a31d 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -2816,7 +2816,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
+ 
+ 	kref_init(&tty->kref);
+ 	tty->magic = TTY_MAGIC;
+-	tty_ldisc_init(tty);
++	if (tty_ldisc_init(tty)) {
++		kfree(tty);
++		return NULL;
++	}
+ 	tty->session = NULL;
+ 	tty->pgrp = NULL;
+ 	mutex_init(&tty->legacy_mutex);
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index 050f4d650891..fb7329ab2b37 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -176,12 +176,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
+ 			return ERR_CAST(ldops);
+ 	}
+ 
+-	ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
+-	if (ld == NULL) {
+-		put_ldops(ldops);
+-		return ERR_PTR(-ENOMEM);
+-	}
+-
++	/*
++	 * There is no way to handle allocation failure of only 16 bytes.
++	 * Let's simplify error handling and save more memory.
++	 */
++	ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
+ 	ld->ops = ldops;
+ 	ld->tty = tty;
+ 
+@@ -527,19 +526,16 @@ static int tty_ldisc_failto(struct tty_struct *tty, int ld)
+ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
+ {
+ 	/* There is an outstanding reference here so this is safe */
+-	old = tty_ldisc_get(tty, old->ops->num);
+-	WARN_ON(IS_ERR(old));
+-	tty->ldisc = old;
+-	tty_set_termios_ldisc(tty, old->ops->num);
+-	if (tty_ldisc_open(tty, old) < 0) {
+-		tty_ldisc_put(old);
++	if (tty_ldisc_failto(tty, old->ops->num) < 0) {
++		const char *name = tty_name(tty);
++
++		pr_warn("Falling back ldisc for %s.\n", name);
+ 		/* The traditional behaviour is to fall back to N_TTY, we
+ 		   want to avoid falling back to N_NULL unless we have no
+ 		   choice to avoid the risk of breaking anything */
+ 		if (tty_ldisc_failto(tty, N_TTY) < 0 &&
+ 		    tty_ldisc_failto(tty, N_NULL) < 0)
+-			panic("Couldn't open N_NULL ldisc for %s.",
+-			      tty_name(tty));
++			panic("Couldn't open N_NULL ldisc for %s.", name);
+ 	}
+ }
+ 
+@@ -824,12 +820,13 @@ EXPORT_SYMBOL_GPL(tty_ldisc_release);
+  *	the tty structure is not completely set up when this call is made.
+  */
+ 
+-void tty_ldisc_init(struct tty_struct *tty)
++int tty_ldisc_init(struct tty_struct *tty)
+ {
+ 	struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
+ 	if (IS_ERR(ld))
+-		panic("n_tty: init_tty");
++		return PTR_ERR(ld);
+ 	tty->ldisc = ld;
++	return 0;
+ }
+ 
+ /**
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index fc32391a34d5..15736b462c55 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -2365,6 +2365,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
+ 
+ 	spin_lock_irqsave (&hcd_root_hub_lock, flags);
+ 	if (hcd->rh_registered) {
++		pm_wakeup_event(&hcd->self.root_hub->dev, 0);
+ 		set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
+ 		queue_work(pm_wq, &hcd->wakeup_work);
+ 	}
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index c5c1f6cf3228..83c58a20d16f 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -653,12 +653,17 @@ void usb_wakeup_notification(struct usb_device *hdev,
+ 		unsigned int portnum)
+ {
+ 	struct usb_hub *hub;
++	struct usb_port *port_dev;
+ 
+ 	if (!hdev)
+ 		return;
+ 
+ 	hub = usb_hub_to_struct_hub(hdev);
+ 	if (hub) {
++		port_dev = hub->ports[portnum - 1];
++		if (port_dev && port_dev->child)
++			pm_wakeup_event(&port_dev->child->dev, 0);
++
+ 		set_bit(portnum, hub->wakeup_bits);
+ 		kick_hub_wq(hub);
+ 	}
+@@ -3430,8 +3435,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ 
+ 	/* Skip the initial Clear-Suspend step for a remote wakeup */
+ 	status = hub_port_status(hub, port1, &portstatus, &portchange);
+-	if (status == 0 && !port_is_suspended(hub, portstatus))
++	if (status == 0 && !port_is_suspended(hub, portstatus)) {
++		if (portchange & USB_PORT_STAT_C_SUSPEND)
++			pm_wakeup_event(&udev->dev, 0);
+ 		goto SuspendCleared;
++	}
+ 
+ 	/* see 7.1.7.7; affects power usage, but not budgeting */
+ 	if (hub_is_superspeed(hub->hdev))
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 54b019e267c5..9f5f78b7bb55 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -40,6 +40,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
+ 			USB_QUIRK_STRING_FETCH_255 },
+ 
++	/* HP v222w 16GB Mini USB Drive */
++	{ USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ 	/* Creative SB Audigy 2 NX */
+ 	{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
+index 75f0b92694ba..50203e77c925 100644
+--- a/drivers/usb/host/xhci-dbgtty.c
++++ b/drivers/usb/host/xhci-dbgtty.c
+@@ -320,9 +320,11 @@ int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
+ 
+ void xhci_dbc_tty_unregister_driver(void)
+ {
+-	tty_unregister_driver(dbc_tty_driver);
+-	put_tty_driver(dbc_tty_driver);
+-	dbc_tty_driver = NULL;
++	if (dbc_tty_driver) {
++		tty_unregister_driver(dbc_tty_driver);
++		put_tty_driver(dbc_tty_driver);
++		dbc_tty_driver = NULL;
++	}
+ }
+ 
+ static void dbc_rx_push(unsigned long _port)
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index d9f831b67e57..93ce34bce7b5 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -126,7 +126,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
+ 		xhci->quirks |= XHCI_AMD_PLL_FIX;
+ 
+-	if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb)
++	if (pdev->vendor == PCI_VENDOR_ID_AMD &&
++		(pdev->device == 0x15e0 ||
++		 pdev->device == 0x15e1 ||
++		 pdev->device == 0x43bb))
+ 		xhci->quirks |= XHCI_SUSPEND_DELAY;
+ 
+ 	if (pdev->vendor == PCI_VENDOR_ID_AMD)
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 6652e2d5bd2e..c435df29cdb8 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -419,7 +419,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
+ static struct platform_driver usb_xhci_driver = {
+ 	.probe	= xhci_plat_probe,
+ 	.remove	= xhci_plat_remove,
+-	.shutdown	= usb_hcd_platform_shutdown,
+ 	.driver	= {
+ 		.name = "xhci-hcd",
+ 		.pm = &xhci_plat_pm_ops,
+diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
+index a646820f5a78..533f127c30ad 100644
+--- a/drivers/usb/serial/Kconfig
++++ b/drivers/usb/serial/Kconfig
+@@ -62,6 +62,7 @@ config USB_SERIAL_SIMPLE
+ 		- Fundamental Software dongle.
+ 		- Google USB serial devices
+ 		- HP4x calculators
++		- Libtransistor USB console
+ 		- a number of Motorola phones
+ 		- Motorola Tetra devices
+ 		- Novatel Wireless GPS receivers
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index de1e759dd512..eb6c26cbe579 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -214,6 +214,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+ 	{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+ 	{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
++	{ USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
+ 	{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
+ 	{ } /* Terminating Entry */
+ };
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 87202ad5a50d..7ea221d42dba 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1898,7 +1898,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
+ 		return ftdi_jtag_probe(serial);
+ 
+ 	if (udev->product &&
+-		(!strcmp(udev->product, "BeagleBone/XDS100V2") ||
++		(!strcmp(udev->product, "Arrow USB Blaster") ||
++		 !strcmp(udev->product, "BeagleBone/XDS100V2") ||
+ 		 !strcmp(udev->product, "SNAP Connect E10")))
+ 		return ftdi_jtag_probe(serial);
+ 
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index 4ef79e29cb26..40864c2bd9dc 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -63,6 +63,11 @@ DEVICE(flashloader, FLASHLOADER_IDS);
+ 					0x01) }
+ DEVICE(google, GOOGLE_IDS);
+ 
++/* Libtransistor USB console */
++#define LIBTRANSISTOR_IDS()			\
++	{ USB_DEVICE(0x1209, 0x8b00) }
++DEVICE(libtransistor, LIBTRANSISTOR_IDS);
++
+ /* ViVOpay USB Serial Driver */
+ #define VIVOPAY_IDS()			\
+ 	{ USB_DEVICE(0x1d5f, 0x1004) }	/* ViVOpay 8800 */
+@@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
+ 	&funsoft_device,
+ 	&flashloader_device,
+ 	&google_device,
++	&libtransistor_device,
+ 	&vivopay_device,
+ 	&moto_modem_device,
+ 	&motorola_tetra_device,
+@@ -126,6 +132,7 @@ static const struct usb_device_id id_table[] = {
+ 	FUNSOFT_IDS(),
+ 	FLASHLOADER_IDS(),
+ 	GOOGLE_IDS(),
++	LIBTRANSISTOR_IDS(),
+ 	VIVOPAY_IDS(),
+ 	MOTO_IDS(),
+ 	MOTOROLA_TETRA_IDS(),
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 79046fe66426..8d95b3a168d2 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -28,7 +28,7 @@
+  * difficult to estimate the time it takes for the system to process the command
+  * before it is actually passed to the PPM.
+  */
+-#define UCSI_TIMEOUT_MS		1000
++#define UCSI_TIMEOUT_MS		5000
+ 
+ /*
+  * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests
+diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
+index c31c8402a0c5..d41d0cdeec0f 100644
+--- a/drivers/usb/usbip/stub_main.c
++++ b/drivers/usb/usbip/stub_main.c
+@@ -186,7 +186,12 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
+ 	if (!bid)
+ 		return -ENODEV;
+ 
++	/* device_attach() callers should hold parent lock for USB */
++	if (bid->udev->dev.parent)
++		device_lock(bid->udev->dev.parent);
+ 	ret = device_attach(&bid->udev->dev);
++	if (bid->udev->dev.parent)
++		device_unlock(bid->udev->dev.parent);
+ 	if (ret < 0) {
+ 		dev_err(&bid->udev->dev, "rebind failed\n");
+ 		return ret;
+diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
+index 473fb8a87289..bf8afe9b5883 100644
+--- a/drivers/usb/usbip/usbip_common.h
++++ b/drivers/usb/usbip/usbip_common.h
+@@ -243,7 +243,7 @@ enum usbip_side {
+ #define	VUDC_EVENT_ERROR_USB	(USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
+ #define	VUDC_EVENT_ERROR_MALLOC	(USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
+ 
+-#define	VDEV_EVENT_REMOVED	(USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
++#define	VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
+ #define	VDEV_EVENT_DOWN		(USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
+ #define	VDEV_EVENT_ERROR_TCP	(USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
+ #define	VDEV_EVENT_ERROR_MALLOC	(USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
+diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
+index 5b4c0864ad92..5d88917c9631 100644
+--- a/drivers/usb/usbip/usbip_event.c
++++ b/drivers/usb/usbip/usbip_event.c
+@@ -91,10 +91,6 @@ static void event_handler(struct work_struct *work)
+ 			unset_event(ud, USBIP_EH_UNUSABLE);
+ 		}
+ 
+-		/* Stop the error handler. */
+-		if (ud->event & USBIP_EH_BYE)
+-			usbip_dbg_eh("removed %p\n", ud);
+-
+ 		wake_up(&ud->eh_waitq);
+ 	}
+ }
+diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
+index 20e3d4609583..d11f3f8dad40 100644
+--- a/drivers/usb/usbip/vhci_hcd.c
++++ b/drivers/usb/usbip/vhci_hcd.c
+@@ -354,6 +354,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 		usbip_dbg_vhci_rh(" ClearHubFeature\n");
+ 		break;
+ 	case ClearPortFeature:
++		if (rhport < 0)
++			goto error;
+ 		switch (wValue) {
+ 		case USB_PORT_FEAT_SUSPEND:
+ 			if (hcd->speed == HCD_USB3) {
+@@ -511,11 +513,16 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 				goto error;
+ 			}
+ 
++			if (rhport < 0)
++				goto error;
++
+ 			vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND;
+ 			break;
+ 		case USB_PORT_FEAT_POWER:
+ 			usbip_dbg_vhci_rh(
+ 				" SetPortFeature: USB_PORT_FEAT_POWER\n");
++			if (rhport < 0)
++				goto error;
+ 			if (hcd->speed == HCD_USB3)
+ 				vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
+ 			else
+@@ -524,6 +531,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 		case USB_PORT_FEAT_BH_PORT_RESET:
+ 			usbip_dbg_vhci_rh(
+ 				" SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
++			if (rhport < 0)
++				goto error;
+ 			/* Applicable only for USB3.0 hub */
+ 			if (hcd->speed != HCD_USB3) {
+ 				pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
+@@ -534,6 +543,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 		case USB_PORT_FEAT_RESET:
+ 			usbip_dbg_vhci_rh(
+ 				" SetPortFeature: USB_PORT_FEAT_RESET\n");
++			if (rhport < 0)
++				goto error;
+ 			/* if it's already enabled, disable */
+ 			if (hcd->speed == HCD_USB3) {
+ 				vhci_hcd->port_status[rhport] = 0;
+@@ -554,6 +565,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 		default:
+ 			usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
+ 					  wValue);
++			if (rhport < 0)
++				goto error;
+ 			if (hcd->speed == HCD_USB3) {
+ 				if ((vhci_hcd->port_status[rhport] &
+ 				     USB_SS_PORT_STAT_POWER) != 0) {
+diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
+index 190dbf8cfcb5..7411a535fda2 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.c
++++ b/drivers/virt/vboxguest/vboxguest_core.c
+@@ -114,7 +114,7 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
+ 	}
+ 
+ out:
+-	kfree(req);
++	vbg_req_free(req, sizeof(*req));
+ 	kfree(pages);
+ }
+ 
+@@ -144,7 +144,7 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
+ 
+ 	rc = vbg_req_perform(gdev, req);
+ 
+-	kfree(req);
++	vbg_req_free(req, sizeof(*req));
+ 
+ 	if (rc < 0) {
+ 		vbg_err("%s error: %d\n", __func__, rc);
+@@ -214,8 +214,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
+ 	ret = vbg_status_code_to_errno(rc);
+ 
+ out_free:
+-	kfree(req2);
+-	kfree(req1);
++	vbg_req_free(req2, sizeof(*req2));
++	vbg_req_free(req1, sizeof(*req1));
+ 	return ret;
+ }
+ 
+@@ -245,7 +245,7 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
+ 	if (rc == VERR_NOT_IMPLEMENTED)	/* Compatibility with older hosts. */
+ 		rc = VINF_SUCCESS;
+ 
+-	kfree(req);
++	vbg_req_free(req, sizeof(*req));
+ 
+ 	return vbg_status_code_to_errno(rc);
+ }
+@@ -431,7 +431,7 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
+ 	rc = vbg_req_perform(gdev, req);
+ 	do_div(req->interval_ns, 1000000); /* ns -> ms */
+ 	gdev->heartbeat_interval_ms = req->interval_ns;
+-	kfree(req);
++	vbg_req_free(req, sizeof(*req));
+ 
+ 	return vbg_status_code_to_errno(rc);
+ }
+@@ -454,12 +454,6 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	/*
+-	 * Preallocate the request to use it from the timer callback because:
+-	 *    1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
+-	 *       and the timer callback runs at DISPATCH_LEVEL;
+-	 *    2) avoid repeated allocations.
+-	 */
+ 	gdev->guest_heartbeat_req = vbg_req_alloc(
+ 					sizeof(*gdev->guest_heartbeat_req),
+ 					VMMDEVREQ_GUEST_HEARTBEAT);
+@@ -481,8 +475,8 @@ static void vbg_heartbeat_exit(struct vbg_dev *gdev)
+ {
+ 	del_timer_sync(&gdev->heartbeat_timer);
+ 	vbg_heartbeat_host_config(gdev, false);
+-	kfree(gdev->guest_heartbeat_req);
+-
++	vbg_req_free(gdev->guest_heartbeat_req,
++		     sizeof(*gdev->guest_heartbeat_req));
+ }
+ 
+ /**
+@@ -543,7 +537,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
+ 	if (rc < 0)
+ 		vbg_err("%s error, rc: %d\n", __func__, rc);
+ 
+-	kfree(req);
++	vbg_req_free(req, sizeof(*req));
+ 	return vbg_status_code_to_errno(rc);
+ }
+ 
+@@ -617,7 +611,7 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
+ 
+ out:
+ 	mutex_unlock(&gdev->session_mutex);
+-	kfree(req);
++	vbg_req_free(req, sizeof(*req));
+ 
+ 	return ret;
+ }
+@@ -642,7 +636,7 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
+ 	if (rc < 0)
+ 		vbg_err("%s error, rc: %d\n", __func__, rc);
+ 
+-	kfree(req);
++	vbg_req_free(req, sizeof(*req));
+ 	return vbg_status_code_to_errno(rc);
+ }
+ 
+@@ -712,7 +706,7 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
+ 
+ out:
+ 	mutex_unlock(&gdev->session_mutex);
+-	kfree(req);
++	vbg_req_free(req, sizeof(*req));
+ 
+ 	return ret;
+ }
+@@ -749,7 +743,7 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
+ 	}
+ 
+ out:
+-	kfree(req);
++	vbg_req_free(req, sizeof(*req));
+ 	return ret;
+ }
+ 
+@@ -847,11 +841,16 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
+ 	return 0;
+ 
+ err_free_reqs:
+-	kfree(gdev->mouse_status_req);
+-	kfree(gdev->ack_events_req);
+-	kfree(gdev->cancel_req);
+-	kfree(gdev->mem_balloon.change_req);
+-	kfree(gdev->mem_balloon.get_req);
++	vbg_req_free(gdev->mouse_status_req,
++		     sizeof(*gdev->mouse_status_req));
++	vbg_req_free(gdev->ack_events_req,
++		     sizeof(*gdev->ack_events_req));
++	vbg_req_free(gdev->cancel_req,
++		     sizeof(*gdev->cancel_req));
++	vbg_req_free(gdev->mem_balloon.change_req,
++		     sizeof(*gdev->mem_balloon.change_req));
++	vbg_req_free(gdev->mem_balloon.get_req,
++		     sizeof(*gdev->mem_balloon.get_req));
+ 	return ret;
+ }
+ 
+@@ -872,11 +871,16 @@ void vbg_core_exit(struct vbg_dev *gdev)
+ 	vbg_reset_host_capabilities(gdev);
+ 	vbg_core_set_mouse_status(gdev, 0);
+ 
+-	kfree(gdev->mouse_status_req);
+-	kfree(gdev->ack_events_req);
+-	kfree(gdev->cancel_req);
+-	kfree(gdev->mem_balloon.change_req);
+-	kfree(gdev->mem_balloon.get_req);
++	vbg_req_free(gdev->mouse_status_req,
++		     sizeof(*gdev->mouse_status_req));
++	vbg_req_free(gdev->ack_events_req,
++		     sizeof(*gdev->ack_events_req));
++	vbg_req_free(gdev->cancel_req,
++		     sizeof(*gdev->cancel_req));
++	vbg_req_free(gdev->mem_balloon.change_req,
++		     sizeof(*gdev->mem_balloon.change_req));
++	vbg_req_free(gdev->mem_balloon.get_req,
++		     sizeof(*gdev->mem_balloon.get_req));
+ }
+ 
+ /**
+@@ -1415,7 +1419,7 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
+ 	req->flags = dump->u.in.flags;
+ 	dump->hdr.rc = vbg_req_perform(gdev, req);
+ 
+-	kfree(req);
++	vbg_req_free(req, sizeof(*req));
+ 	return 0;
+ }
+ 
+@@ -1513,7 +1517,7 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
+ 	if (rc < 0)
+ 		vbg_err("%s error, rc: %d\n", __func__, rc);
+ 
+-	kfree(req);
++	vbg_req_free(req, sizeof(*req));
+ 	return vbg_status_code_to_errno(rc);
+ }
+ 
+diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
+index 6c784bf4fa6d..7ad9ec45bfa9 100644
+--- a/drivers/virt/vboxguest/vboxguest_core.h
++++ b/drivers/virt/vboxguest/vboxguest_core.h
+@@ -171,4 +171,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
+ 
+ void vbg_linux_mouse_event(struct vbg_dev *gdev);
+ 
++/* Private (non exported) functions form vboxguest_utils.c */
++void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
++void vbg_req_free(void *req, size_t len);
++int vbg_req_perform(struct vbg_dev *gdev, void *req);
++int vbg_hgcm_call32(
++	struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
++	struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
++	int *vbox_status);
++
+ #endif
+diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
+index 82e280d38cc2..398d22693234 100644
+--- a/drivers/virt/vboxguest/vboxguest_linux.c
++++ b/drivers/virt/vboxguest/vboxguest_linux.c
+@@ -87,6 +87,7 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
+ 	struct vbg_session *session = filp->private_data;
+ 	size_t returned_size, size;
+ 	struct vbg_ioctl_hdr hdr;
++	bool is_vmmdev_req;
+ 	int ret = 0;
+ 	void *buf;
+ 
+@@ -106,8 +107,17 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
+ 	if (size > SZ_16M)
+ 		return -E2BIG;
+ 
+-	/* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */
+-	buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32);
++	/*
++	 * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid
++	 * the need for a bounce-buffer and another copy later on.
++	 */
++	is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
++			 req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
++
++	if (is_vmmdev_req)
++		buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
++	else
++		buf = kmalloc(size, GFP_KERNEL);
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
+@@ -132,7 +142,10 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
+ 		ret = -EFAULT;
+ 
+ out:
+-	kfree(buf);
++	if (is_vmmdev_req)
++		vbg_req_free(buf, size);
++	else
++		kfree(buf);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
+index 0f0dab8023cf..bf4474214b4d 100644
+--- a/drivers/virt/vboxguest/vboxguest_utils.c
++++ b/drivers/virt/vboxguest/vboxguest_utils.c
+@@ -65,8 +65,9 @@ VBG_LOG(vbg_debug, pr_debug);
+ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
+ {
+ 	struct vmmdev_request_header *req;
++	int order = get_order(PAGE_ALIGN(len));
+ 
+-	req = kmalloc(len, GFP_KERNEL | __GFP_DMA32);
++	req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
+ 	if (!req)
+ 		return NULL;
+ 
+@@ -82,6 +83,14 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
+ 	return req;
+ }
+ 
++void vbg_req_free(void *req, size_t len)
++{
++	if (!req)
++		return;
++
++	free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
++}
++
+ /* Note this function returns a VBox status code, not a negative errno!! */
+ int vbg_req_perform(struct vbg_dev *gdev, void *req)
+ {
+@@ -137,7 +146,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
+ 		rc = hgcm_connect->header.result;
+ 	}
+ 
+-	kfree(hgcm_connect);
++	vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
+ 
+ 	*vbox_status = rc;
+ 	return 0;
+@@ -166,7 +175,7 @@ int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
+ 	if (rc >= 0)
+ 		rc = hgcm_disconnect->header.result;
+ 
+-	kfree(hgcm_disconnect);
++	vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
+ 
+ 	*vbox_status = rc;
+ 	return 0;
+@@ -623,7 +632,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
+ 	}
+ 
+ 	if (!leak_it)
+-		kfree(call);
++		vbg_req_free(call, size);
+ 
+ free_bounce_bufs:
+ 	if (bounce_bufs) {
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index 9ceebf30eb22..a82f91d75f29 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -453,6 +453,9 @@ cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
+ 		server->sign = true;
+ 	}
+ 
++	if (cifs_rdma_enabled(server) && server->sign)
++		cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled");
++
+ 	return 0;
+ }
+ 
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index dfd6fb02b7a3..1c1940d90c96 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -252,9 +252,14 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
+ 	wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
+ 	wsize = min_t(unsigned int, wsize, server->max_write);
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+-	if (server->rdma)
+-		wsize = min_t(unsigned int,
++	if (server->rdma) {
++		if (server->sign)
++			wsize = min_t(unsigned int,
++				wsize, server->smbd_conn->max_fragmented_send_size);
++		else
++			wsize = min_t(unsigned int,
+ 				wsize, server->smbd_conn->max_readwrite_size);
++	}
+ #endif
+ 	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+ 		wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
+@@ -272,9 +277,14 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
+ 	rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
+ 	rsize = min_t(unsigned int, rsize, server->max_read);
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+-	if (server->rdma)
+-		rsize = min_t(unsigned int,
++	if (server->rdma) {
++		if (server->sign)
++			rsize = min_t(unsigned int,
++				rsize, server->smbd_conn->max_fragmented_recv_size);
++		else
++			rsize = min_t(unsigned int,
+ 				rsize, server->smbd_conn->max_readwrite_size);
++	}
+ #endif
+ 
+ 	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index af62c75b17c4..8ae6a089489c 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2479,7 +2479,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
+ 	 * If we want to do a RDMA write, fill in and append
+ 	 * smbd_buffer_descriptor_v1 to the end of read request
+ 	 */
+-	if (server->rdma && rdata &&
++	if (server->rdma && rdata && !server->sign &&
+ 		rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
+ 
+ 		struct smbd_buffer_descriptor_v1 *v1;
+@@ -2857,7 +2857,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
+ 	 * If we want to do a server RDMA read, fill in and append
+ 	 * smbd_buffer_descriptor_v1 to the end of write request
+ 	 */
+-	if (server->rdma && wdata->bytes >=
++	if (server->rdma && !server->sign && wdata->bytes >=
+ 		server->smbd_conn->rdma_readwrite_threshold) {
+ 
+ 		struct smbd_buffer_descriptor_v1 *v1;
+diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
+index 34be5c5d027f..608ce9abd240 100644
+--- a/fs/cifs/smbdirect.c
++++ b/fs/cifs/smbdirect.c
+@@ -2086,7 +2086,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
+ 	int start, i, j;
+ 	int max_iov_size =
+ 		info->max_send_size - sizeof(struct smbd_data_transfer);
+-	struct kvec iov[SMBDIRECT_MAX_SGE];
++	struct kvec *iov;
+ 	int rc;
+ 
+ 	info->smbd_send_pending++;
+@@ -2096,32 +2096,20 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
+ 	}
+ 
+ 	/*
+-	 * This usually means a configuration error
+-	 * We use RDMA read/write for packet size > rdma_readwrite_threshold
+-	 * as long as it's properly configured we should never get into this
+-	 * situation
+-	 */
+-	if (rqst->rq_nvec + rqst->rq_npages > SMBDIRECT_MAX_SGE) {
+-		log_write(ERR, "maximum send segment %x exceeding %x\n",
+-			 rqst->rq_nvec + rqst->rq_npages, SMBDIRECT_MAX_SGE);
+-		rc = -EINVAL;
+-		goto done;
+-	}
+-
+-	/*
+-	 * Remove the RFC1002 length defined in MS-SMB2 section 2.1
+-	 * It is used only for TCP transport
++	 * Skip the RFC1002 length defined in MS-SMB2 section 2.1
++	 * It is used only for TCP transport in the iov[0]
+ 	 * In future we may want to add a transport layer under protocol
+ 	 * layer so this will only be issued to TCP transport
+ 	 */
+-	iov[0].iov_base = (char *)rqst->rq_iov[0].iov_base + 4;
+-	iov[0].iov_len = rqst->rq_iov[0].iov_len - 4;
+-	buflen += iov[0].iov_len;
++
++	if (rqst->rq_iov[0].iov_len != 4) {
++		log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
++		return -EINVAL;
++	}
++	iov = &rqst->rq_iov[1];
+ 
+ 	/* total up iov array first */
+-	for (i = 1; i < rqst->rq_nvec; i++) {
+-		iov[i].iov_base = rqst->rq_iov[i].iov_base;
+-		iov[i].iov_len = rqst->rq_iov[i].iov_len;
++	for (i = 0; i < rqst->rq_nvec-1; i++) {
+ 		buflen += iov[i].iov_len;
+ 	}
+ 
+@@ -2194,14 +2182,14 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
+ 						goto done;
+ 				}
+ 				i++;
+-				if (i == rqst->rq_nvec)
++				if (i == rqst->rq_nvec-1)
+ 					break;
+ 			}
+ 			start = i;
+ 			buflen = 0;
+ 		} else {
+ 			i++;
+-			if (i == rqst->rq_nvec) {
++			if (i == rqst->rq_nvec-1) {
+ 				/* send out all remaining vecs */
+ 				remaining_data_length -= buflen;
+ 				log_write(INFO,
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 665661464067..1b5cd3b8617c 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -753,7 +753,7 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ 		goto out;
+ 
+ #ifdef CONFIG_CIFS_SMB311
+-	if (ses->status == CifsNew)
++	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
+ 		smb311_update_preauth_hash(ses, rqst->rq_iov+1,
+ 					   rqst->rq_nvec-1);
+ #endif
+@@ -797,7 +797,7 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ 		*resp_buf_type = CIFS_SMALL_BUFFER;
+ 
+ #ifdef CONFIG_CIFS_SMB311
+-	if (ses->status == CifsNew) {
++	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
+ 		struct kvec iov = {
+ 			.iov_base = buf + 4,
+ 			.iov_len = get_rfc1002_length(buf)
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index f82c4966f4ce..508b905d744d 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -321,6 +321,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	ext4_grpblk_t offset;
+ 	ext4_grpblk_t next_zero_bit;
++	ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
+ 	ext4_fsblk_t blk;
+ 	ext4_fsblk_t group_first_block;
+ 
+@@ -338,20 +339,25 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
+ 	/* check whether block bitmap block number is set */
+ 	blk = ext4_block_bitmap(sb, desc);
+ 	offset = blk - group_first_block;
+-	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
++	if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
++	    !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+ 		/* bad block bitmap */
+ 		return blk;
+ 
+ 	/* check whether the inode bitmap block number is set */
+ 	blk = ext4_inode_bitmap(sb, desc);
+ 	offset = blk - group_first_block;
+-	if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
++	if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
++	    !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+ 		/* bad block bitmap */
+ 		return blk;
+ 
+ 	/* check whether the inode table block number is set */
+ 	blk = ext4_inode_table(sb, desc);
+ 	offset = blk - group_first_block;
++	if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
++	    EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
++		return blk;
+ 	next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
+ 			EXT4_B2C(sbi, offset + sbi->s_itb_per_group),
+ 			EXT4_B2C(sbi, offset));
+@@ -417,6 +423,7 @@ struct buffer_head *
+ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
+ {
+ 	struct ext4_group_desc *desc;
++	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct buffer_head *bh;
+ 	ext4_fsblk_t bitmap_blk;
+ 	int err;
+@@ -425,6 +432,12 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
+ 	if (!desc)
+ 		return ERR_PTR(-EFSCORRUPTED);
+ 	bitmap_blk = ext4_block_bitmap(sb, desc);
++	if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
++	    (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
++		ext4_error(sb, "Invalid block bitmap block %llu in "
++			   "block_group %u", bitmap_blk, block_group);
++		return ERR_PTR(-EFSCORRUPTED);
++	}
+ 	bh = sb_getblk(sb, bitmap_blk);
+ 	if (unlikely(!bh)) {
+ 		ext4_error(sb, "Cannot get buffer for block bitmap - "
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 054416e9d827..a7ca193a7480 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -5334,8 +5334,9 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
+ 	stop = le32_to_cpu(extent->ee_block);
+ 
+        /*
+-	 * In case of left shift, Don't start shifting extents until we make
+-	 * sure the hole is big enough to accommodate the shift.
++	* For left shifts, make sure the hole on the left is big enough to
++	* accommodate the shift.  For right shifts, make sure the last extent
++	* won't be shifted beyond EXT_MAX_BLOCKS.
+ 	*/
+ 	if (SHIFT == SHIFT_LEFT) {
+ 		path = ext4_find_extent(inode, start - 1, &path,
+@@ -5355,9 +5356,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
+ 
+ 		if ((start == ex_start && shift > ex_start) ||
+ 		    (shift > start - ex_end)) {
+-			ext4_ext_drop_refs(path);
+-			kfree(path);
+-			return -EINVAL;
++			ret = -EINVAL;
++			goto out;
++		}
++	} else {
++		if (shift > EXT_MAX_BLOCKS -
++		    (stop + ext4_ext_get_actual_len(extent))) {
++			ret = -EINVAL;
++			goto out;
+ 		}
+ 	}
+ 
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 3fa93665b4a3..df92e3ec9913 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -122,6 +122,7 @@ static struct buffer_head *
+ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
+ {
+ 	struct ext4_group_desc *desc;
++	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct buffer_head *bh = NULL;
+ 	ext4_fsblk_t bitmap_blk;
+ 	int err;
+@@ -131,6 +132,12 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
+ 		return ERR_PTR(-EFSCORRUPTED);
+ 
+ 	bitmap_blk = ext4_inode_bitmap(sb, desc);
++	if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
++	    (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
++		ext4_error(sb, "Invalid inode bitmap blk %llu in "
++			   "block_group %u", bitmap_blk, block_group);
++		return ERR_PTR(-EFSCORRUPTED);
++	}
+ 	bh = sb_getblk(sb, bitmap_blk);
+ 	if (unlikely(!bh)) {
+ 		ext4_error(sb, "Cannot read inode bitmap - "
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 192c5ad09d71..b8dace7abe09 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -5868,5 +5868,6 @@ static void __exit ext4_exit_fs(void)
+ MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
+ MODULE_DESCRIPTION("Fourth Extended Filesystem");
+ MODULE_LICENSE("GPL");
++MODULE_SOFTDEP("pre: crc32c");
+ module_init(ext4_init_fs)
+ module_exit(ext4_exit_fs)
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index ac311037d7a5..8aa453784402 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -532,6 +532,7 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
+ 	 */
+ 	ret = start_this_handle(journal, handle, GFP_NOFS);
+ 	if (ret < 0) {
++		handle->h_journal = journal;
+ 		jbd2_journal_free_reserved(handle);
+ 		return ret;
+ 	}
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 1ab0e520d6fc..e17de55c2542 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -179,7 +179,7 @@
+ #endif
+ 
+ #ifdef CONFIG_SERIAL_EARLYCON
+-#define EARLYCON_TABLE() STRUCT_ALIGN();			\
++#define EARLYCON_TABLE() . = ALIGN(8);				\
+ 			 VMLINUX_SYMBOL(__earlycon_table) = .;	\
+ 			 KEEP(*(__earlycon_table))		\
+ 			 VMLINUX_SYMBOL(__earlycon_table_end) = .;
+diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
+index e518e4e3dfb5..4b1548129fa2 100644
+--- a/include/kvm/arm_psci.h
++++ b/include/kvm/arm_psci.h
+@@ -37,10 +37,15 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
+ 	 * Our PSCI implementation stays the same across versions from
+ 	 * v0.2 onward, only adding the few mandatory functions (such
+ 	 * as FEATURES with 1.0) that are required by newer
+-	 * revisions. It is thus safe to return the latest.
++	 * revisions. It is thus safe to return the latest, unless
++	 * userspace has instructed us otherwise.
+ 	 */
+-	if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
++	if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
++		if (vcpu->kvm->arch.psci_version)
++			return vcpu->kvm->arch.psci_version;
++
+ 		return KVM_ARM_PSCI_LATEST;
++	}
+ 
+ 	return KVM_ARM_PSCI_0_1;
+ }
+@@ -48,4 +53,11 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
+ 
+ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+ 
++struct kvm_one_reg;
++
++int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
++int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
++int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
++int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
++
+ #endif /* __KVM_ARM_PSCI_H__ */
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index ed63f3b69c12..c9e601dce06f 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -605,6 +605,11 @@ struct request_queue {
+ 	 * initialized by the low level device driver (e.g. scsi/sd.c).
+ 	 * Stacking drivers (device mappers) may or may not initialize
+ 	 * these fields.
++	 *
++	 * Reads of this information must be protected with blk_queue_enter() /
++	 * blk_queue_exit(). Modifying this information is only allowed while
++	 * no requests are being processed. See also blk_mq_freeze_queue() and
++	 * blk_mq_unfreeze_queue().
+ 	 */
+ 	unsigned int		nr_zones;
+ 	unsigned long		*seq_zones_bitmap;
+diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
+index b63fa457febd..3529683f691e 100644
+--- a/include/linux/mtd/flashchip.h
++++ b/include/linux/mtd/flashchip.h
+@@ -85,6 +85,7 @@ struct flchip {
+ 	unsigned int write_suspended:1;
+ 	unsigned int erase_suspended:1;
+ 	unsigned long in_progress_block_addr;
++	unsigned long in_progress_block_mask;
+ 
+ 	struct mutex mutex;
+ 	wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index b32df49a3bd5..c4219b9cbb70 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -351,10 +351,10 @@ struct earlycon_id {
+ 	char	name[16];
+ 	char	compatible[128];
+ 	int	(*setup)(struct earlycon_device *, const char *options);
+-} __aligned(32);
++};
+ 
+-extern const struct earlycon_id __earlycon_table[];
+-extern const struct earlycon_id __earlycon_table_end[];
++extern const struct earlycon_id *__earlycon_table[];
++extern const struct earlycon_id *__earlycon_table_end[];
+ 
+ #if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
+ #define EARLYCON_USED_OR_UNUSED	__used
+@@ -362,12 +362,19 @@ extern const struct earlycon_id __earlycon_table_end[];
+ #define EARLYCON_USED_OR_UNUSED	__maybe_unused
+ #endif
+ 
+-#define OF_EARLYCON_DECLARE(_name, compat, fn)				\
+-	static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name)	\
+-	     EARLYCON_USED_OR_UNUSED __section(__earlycon_table)	\
++#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id)		\
++	static const struct earlycon_id unique_id			\
++	     EARLYCON_USED_OR_UNUSED __initconst			\
+ 		= { .name = __stringify(_name),				\
+ 		    .compatible = compat,				\
+-		    .setup = fn  }
++		    .setup = fn  };					\
++	static const struct earlycon_id EARLYCON_USED_OR_UNUSED		\
++		__section(__earlycon_table)				\
++		* const __PASTE(__p, unique_id) = &unique_id
++
++#define OF_EARLYCON_DECLARE(_name, compat, fn)				\
++	_OF_EARLYCON_DECLARE(_name, compat, fn,				\
++			     __UNIQUE_ID(__earlycon_##_name))
+ 
+ #define EARLYCON_DECLARE(_name, fn)	OF_EARLYCON_DECLARE(_name, "", fn)
+ 
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 47f8af22f216..1dd587ba6d88 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -701,7 +701,7 @@ extern int tty_unregister_ldisc(int disc);
+ extern int tty_set_ldisc(struct tty_struct *tty, int disc);
+ extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
+ extern void tty_ldisc_release(struct tty_struct *tty);
+-extern void tty_ldisc_init(struct tty_struct *tty);
++extern int __must_check tty_ldisc_init(struct tty_struct *tty);
+ extern void tty_ldisc_deinit(struct tty_struct *tty);
+ extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
+ 				 char *f, int count);
+diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
+index c71def6b310f..a240ed2a0372 100644
+--- a/include/linux/vbox_utils.h
++++ b/include/linux/vbox_utils.h
+@@ -24,24 +24,6 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
+ #define vbg_debug pr_debug
+ #endif
+ 
+-/**
+- * Allocate memory for generic request and initialize the request header.
+- *
+- * Return: the allocated memory
+- * @len:		Size of memory block required for the request.
+- * @req_type:		The generic request type.
+- */
+-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
+-
+-/**
+- * Perform a generic request.
+- *
+- * Return: VBox status code
+- * @gdev:		The Guest extension device.
+- * @req:		Pointer to the request structure.
+- */
+-int vbg_req_perform(struct vbg_dev *gdev, void *req);
+-
+ int vbg_hgcm_connect(struct vbg_dev *gdev,
+ 		     struct vmmdev_hgcm_service_location *loc,
+ 		     u32 *client_id, int *vbox_status);
+@@ -52,11 +34,6 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
+ 		  u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
+ 		  u32 parm_count, int *vbox_status);
+ 
+-int vbg_hgcm_call32(
+-	struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
+-	struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
+-	int *vbox_status);
+-
+ /**
+  * Convert a VirtualBox status code to a standard Linux kernel return value.
+  * Return: 0 or negative errno value.
+diff --git a/include/linux/virtio.h b/include/linux/virtio.h
+index 988c7355bc22..fa1b5da2804e 100644
+--- a/include/linux/virtio.h
++++ b/include/linux/virtio.h
+@@ -157,6 +157,9 @@ int virtio_device_freeze(struct virtio_device *dev);
+ int virtio_device_restore(struct virtio_device *dev);
+ #endif
+ 
++#define virtio_device_for_each_vq(vdev, vq) \
++	list_for_each_entry(vq, &vdev->vqs, list)
++
+ /**
+  * virtio_driver - operations for a virtio I/O driver
+  * @driver: underlying device driver (populate name and owner).
+diff --git a/include/sound/control.h b/include/sound/control.h
+index ca13a44ae9d4..6011a58d3e20 100644
+--- a/include/sound/control.h
++++ b/include/sound/control.h
+@@ -23,6 +23,7 @@
+  */
+ 
+ #include <linux/wait.h>
++#include <linux/nospec.h>
+ #include <sound/asound.h>
+ 
+ #define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data)
+@@ -148,12 +149,14 @@ int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type);
+ 
+ static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
+ {
+-	return id->numid - kctl->id.numid;
++	unsigned int ioff = id->numid - kctl->id.numid;
++	return array_index_nospec(ioff, kctl->count);
+ }
+ 
+ static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
+ {
+-	return id->index - kctl->id.index;
++	unsigned int ioff = id->index - kctl->id.index;
++	return array_index_nospec(ioff, kctl->count);
+ }
+ 
+ static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
+diff --git a/kernel/module.c b/kernel/module.c
+index e42764acedb4..bbb45c038321 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -1472,7 +1472,8 @@ static ssize_t module_sect_show(struct module_attribute *mattr,
+ {
+ 	struct module_sect_attr *sattr =
+ 		container_of(mattr, struct module_sect_attr, mattr);
+-	return sprintf(buf, "0x%pK\n", (void *)sattr->address);
++	return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
++		       (void *)sattr->address : NULL);
+ }
+ 
+ static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 29a5733eff83..741eadbeba58 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -797,12 +797,13 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
+ 		goto out;
+ 	}
+ 
+-	hrtimer_set_expires(&ts->sched_timer, tick);
+-
+-	if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
+-		hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
+-	else
++	if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
++		hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
++	} else {
++		hrtimer_set_expires(&ts->sched_timer, tick);
+ 		tick_program_event(tick, 1);
++	}
++
+ out:
+ 	/*
+ 	 * Update the estimated sleep length until the next timer
+diff --git a/lib/kobject.c b/lib/kobject.c
+index afd5a3fc6123..d20a97a7e168 100644
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -232,14 +232,12 @@ static int kobject_add_internal(struct kobject *kobj)
+ 
+ 		/* be noisy on error issues */
+ 		if (error == -EEXIST)
+-			WARN(1, "%s failed for %s with "
+-			     "-EEXIST, don't try to register things with "
+-			     "the same name in the same directory.\n",
+-			     __func__, kobject_name(kobj));
++			pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
++			       __func__, kobject_name(kobj));
+ 		else
+-			WARN(1, "%s failed for %s (error: %d parent: %s)\n",
+-			     __func__, kobject_name(kobj), error,
+-			     parent ? kobject_name(parent) : "'none'");
++			pr_err("%s failed for %s (error: %d parent: %s)\n",
++			       __func__, kobject_name(kobj), error,
++			       parent ? kobject_name(parent) : "'none'");
+ 	} else
+ 		kobj->state_in_sysfs = 1;
+ 
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 8a4d3758030b..02572130a77a 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -2531,6 +2531,11 @@ static int try_write(struct ceph_connection *con)
+ 	int ret = 1;
+ 
+ 	dout("try_write start %p state %lu\n", con, con->state);
++	if (con->state != CON_STATE_PREOPEN &&
++	    con->state != CON_STATE_CONNECTING &&
++	    con->state != CON_STATE_NEGOTIATING &&
++	    con->state != CON_STATE_OPEN)
++		return 0;
+ 
+ more:
+ 	dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
+@@ -2556,6 +2561,8 @@ static int try_write(struct ceph_connection *con)
+ 	}
+ 
+ more_kvec:
++	BUG_ON(!con->sock);
++
+ 	/* kvec data queued? */
+ 	if (con->out_kvec_left) {
+ 		ret = write_partial_kvec(con);
+diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
+index 1547107f4854..4887443f52dd 100644
+--- a/net/ceph/mon_client.c
++++ b/net/ceph/mon_client.c
+@@ -209,6 +209,14 @@ static void reopen_session(struct ceph_mon_client *monc)
+ 	__open_session(monc);
+ }
+ 
++static void un_backoff(struct ceph_mon_client *monc)
++{
++	monc->hunt_mult /= 2; /* reduce by 50% */
++	if (monc->hunt_mult < 1)
++		monc->hunt_mult = 1;
++	dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult);
++}
++
+ /*
+  * Reschedule delayed work timer.
+  */
+@@ -963,6 +971,7 @@ static void delayed_work(struct work_struct *work)
+ 		if (!monc->hunting) {
+ 			ceph_con_keepalive(&monc->con);
+ 			__validate_auth(monc);
++			un_backoff(monc);
+ 		}
+ 
+ 		if (is_auth &&
+@@ -1123,9 +1132,8 @@ static void finish_hunting(struct ceph_mon_client *monc)
+ 		dout("%s found mon%d\n", __func__, monc->cur_mon);
+ 		monc->hunting = false;
+ 		monc->had_a_connection = true;
+-		monc->hunt_mult /= 2; /* reduce by 50% */
+-		if (monc->hunt_mult < 1)
+-			monc->hunt_mult = 1;
++		un_backoff(monc);
++		__schedule_delayed(monc);
+ 	}
+ }
+ 
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index b719d0bd833e..06d7c40af570 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -27,10 +27,11 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
+ 				      s32 __user *src)
+ {
+ 	snd_pcm_sframes_t delay;
++	int err;
+ 
+-	delay = snd_pcm_delay(substream);
+-	if (delay < 0)
+-		return delay;
++	err = snd_pcm_delay(substream, &delay);
++	if (err)
++		return err;
+ 	if (put_user(delay, src))
+ 		return -EFAULT;
+ 	return 0;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index d18b3982548b..5ea0c1a3bbe6 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -2687,7 +2687,8 @@ static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
+ 	return err;
+ }
+ 		
+-static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream)
++static int snd_pcm_delay(struct snd_pcm_substream *substream,
++			 snd_pcm_sframes_t *delay)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 	int err;
+@@ -2703,7 +2704,9 @@ static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream)
+ 		n += runtime->delay;
+ 	}
+ 	snd_pcm_stream_unlock_irq(substream);
+-	return err < 0 ? err : n;
++	if (!err)
++		*delay = n;
++	return err;
+ }
+ 		
+ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
+@@ -2746,6 +2749,7 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
+ 	sync_ptr.s.status.hw_ptr = status->hw_ptr;
+ 	sync_ptr.s.status.tstamp = status->tstamp;
+ 	sync_ptr.s.status.suspended_state = status->suspended_state;
++	sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
+ 	snd_pcm_stream_unlock_irq(substream);
+ 	if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
+ 		return -EFAULT;
+@@ -2911,11 +2915,13 @@ static int snd_pcm_common_ioctl(struct file *file,
+ 		return snd_pcm_hwsync(substream);
+ 	case SNDRV_PCM_IOCTL_DELAY:
+ 	{
+-		snd_pcm_sframes_t delay = snd_pcm_delay(substream);
++		snd_pcm_sframes_t delay;
+ 		snd_pcm_sframes_t __user *res = arg;
++		int err;
+ 
+-		if (delay < 0)
+-			return delay;
++		err = snd_pcm_delay(substream, &delay);
++		if (err)
++			return err;
+ 		if (put_user(delay, res))
+ 			return -EFAULT;
+ 		return 0;
+@@ -3003,13 +3009,7 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
+ 	case SNDRV_PCM_IOCTL_DROP:
+ 		return snd_pcm_drop(substream);
+ 	case SNDRV_PCM_IOCTL_DELAY:
+-	{
+-		result = snd_pcm_delay(substream);
+-		if (result < 0)
+-			return result;
+-		*frames = result;
+-		return 0;
+-	}
++		return snd_pcm_delay(substream, frames);
+ 	default:
+ 		return -EINVAL;
+ 	}
+diff --git a/sound/core/seq/oss/seq_oss_event.c b/sound/core/seq/oss/seq_oss_event.c
+index c3908862bc8b..86ca584c27b2 100644
+--- a/sound/core/seq/oss/seq_oss_event.c
++++ b/sound/core/seq/oss/seq_oss_event.c
+@@ -26,6 +26,7 @@
+ #include <sound/seq_oss_legacy.h>
+ #include "seq_oss_readq.h"
+ #include "seq_oss_writeq.h"
++#include <linux/nospec.h>
+ 
+ 
+ /*
+@@ -287,10 +288,10 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
+ {
+ 	struct seq_oss_synthinfo *info;
+ 
+-	if (!snd_seq_oss_synth_is_valid(dp, dev))
++	info = snd_seq_oss_synth_info(dp, dev);
++	if (!info)
+ 		return -ENXIO;
+ 
+-	info = &dp->synths[dev];
+ 	switch (info->arg.event_passing) {
+ 	case SNDRV_SEQ_OSS_PROCESS_EVENTS:
+ 		if (! info->ch || ch < 0 || ch >= info->nr_voices) {
+@@ -298,6 +299,7 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
+ 			return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
+ 		}
+ 
++		ch = array_index_nospec(ch, info->nr_voices);
+ 		if (note == 255 && info->ch[ch].note >= 0) {
+ 			/* volume control */
+ 			int type;
+@@ -347,10 +349,10 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
+ {
+ 	struct seq_oss_synthinfo *info;
+ 
+-	if (!snd_seq_oss_synth_is_valid(dp, dev))
++	info = snd_seq_oss_synth_info(dp, dev);
++	if (!info)
+ 		return -ENXIO;
+ 
+-	info = &dp->synths[dev];
+ 	switch (info->arg.event_passing) {
+ 	case SNDRV_SEQ_OSS_PROCESS_EVENTS:
+ 		if (! info->ch || ch < 0 || ch >= info->nr_voices) {
+@@ -358,6 +360,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
+ 			return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
+ 		}
+ 
++		ch = array_index_nospec(ch, info->nr_voices);
+ 		if (info->ch[ch].note >= 0) {
+ 			note = info->ch[ch].note;
+ 			info->ch[ch].vel = 0;
+@@ -381,7 +384,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
+ static int
+ set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev)
+ {
+-	if (! snd_seq_oss_synth_is_valid(dp, dev))
++	if (!snd_seq_oss_synth_info(dp, dev))
+ 		return -ENXIO;
+ 	
+ 	ev->type = type;
+@@ -399,7 +402,7 @@ set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note,
+ static int
+ set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev)
+ {
+-	if (! snd_seq_oss_synth_is_valid(dp, dev))
++	if (!snd_seq_oss_synth_info(dp, dev))
+ 		return -ENXIO;
+ 	
+ 	ev->type = type;
+diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
+index b30b2139e3f0..9debd1b8fd28 100644
+--- a/sound/core/seq/oss/seq_oss_midi.c
++++ b/sound/core/seq/oss/seq_oss_midi.c
+@@ -29,6 +29,7 @@
+ #include "../seq_lock.h"
+ #include <linux/init.h>
+ #include <linux/slab.h>
++#include <linux/nospec.h>
+ 
+ 
+ /*
+@@ -315,6 +316,7 @@ get_mididev(struct seq_oss_devinfo *dp, int dev)
+ {
+ 	if (dev < 0 || dev >= dp->max_mididev)
+ 		return NULL;
++	dev = array_index_nospec(dev, dp->max_mididev);
+ 	return get_mdev(dev);
+ }
+ 
+diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
+index cd0e0ebbfdb1..278ebb993122 100644
+--- a/sound/core/seq/oss/seq_oss_synth.c
++++ b/sound/core/seq/oss/seq_oss_synth.c
+@@ -26,6 +26,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/nospec.h>
+ 
+ /*
+  * constants
+@@ -339,17 +340,13 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
+ 	dp->max_synthdev = 0;
+ }
+ 
+-/*
+- * check if the specified device is MIDI mapped device
+- */
+-static int
+-is_midi_dev(struct seq_oss_devinfo *dp, int dev)
++static struct seq_oss_synthinfo *
++get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev)
+ {
+ 	if (dev < 0 || dev >= dp->max_synthdev)
+-		return 0;
+-	if (dp->synths[dev].is_midi)
+-		return 1;
+-	return 0;
++		return NULL;
++	dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS);
++	return &dp->synths[dev];
+ }
+ 
+ /*
+@@ -359,14 +356,20 @@ static struct seq_oss_synth *
+ get_synthdev(struct seq_oss_devinfo *dp, int dev)
+ {
+ 	struct seq_oss_synth *rec;
+-	if (dev < 0 || dev >= dp->max_synthdev)
+-		return NULL;
+-	if (! dp->synths[dev].opened)
++	struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
++
++	if (!info)
+ 		return NULL;
+-	if (dp->synths[dev].is_midi)
+-		return &midi_synth_dev;
+-	if ((rec = get_sdev(dev)) == NULL)
++	if (!info->opened)
+ 		return NULL;
++	if (info->is_midi) {
++		rec = &midi_synth_dev;
++		snd_use_lock_use(&rec->use_lock);
++	} else {
++		rec = get_sdev(dev);
++		if (!rec)
++			return NULL;
++	}
+ 	if (! rec->opened) {
+ 		snd_use_lock_free(&rec->use_lock);
+ 		return NULL;
+@@ -402,10 +405,8 @@ snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev)
+ 	struct seq_oss_synth *rec;
+ 	struct seq_oss_synthinfo *info;
+ 
+-	if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev))
+-		return;
+-	info = &dp->synths[dev];
+-	if (! info->opened)
++	info = get_synthinfo_nospec(dp, dev);
++	if (!info || !info->opened)
+ 		return;
+ 	if (info->sysex)
+ 		info->sysex->len = 0; /* reset sysex */
+@@ -454,12 +455,14 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
+ 			    const char __user *buf, int p, int c)
+ {
+ 	struct seq_oss_synth *rec;
++	struct seq_oss_synthinfo *info;
+ 	int rc;
+ 
+-	if (dev < 0 || dev >= dp->max_synthdev)
++	info = get_synthinfo_nospec(dp, dev);
++	if (!info)
+ 		return -ENXIO;
+ 
+-	if (is_midi_dev(dp, dev))
++	if (info->is_midi)
+ 		return 0;
+ 	if ((rec = get_synthdev(dp, dev)) == NULL)
+ 		return -ENXIO;
+@@ -467,24 +470,25 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
+ 	if (rec->oper.load_patch == NULL)
+ 		rc = -ENXIO;
+ 	else
+-		rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c);
++		rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c);
+ 	snd_use_lock_free(&rec->use_lock);
+ 	return rc;
+ }
+ 
+ /*
+- * check if the device is valid synth device
++ * check if the device is valid synth device and return the synth info
+  */
+-int
+-snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev)
++struct seq_oss_synthinfo *
++snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev)
+ {
+ 	struct seq_oss_synth *rec;
++
+ 	rec = get_synthdev(dp, dev);
+ 	if (rec) {
+ 		snd_use_lock_free(&rec->use_lock);
+-		return 1;
++		return get_synthinfo_nospec(dp, dev);
+ 	}
+-	return 0;
++	return NULL;
+ }
+ 
+ 
+@@ -499,16 +503,18 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
+ 	int i, send;
+ 	unsigned char *dest;
+ 	struct seq_oss_synth_sysex *sysex;
++	struct seq_oss_synthinfo *info;
+ 
+-	if (! snd_seq_oss_synth_is_valid(dp, dev))
++	info = snd_seq_oss_synth_info(dp, dev);
++	if (!info)
+ 		return -ENXIO;
+ 
+-	sysex = dp->synths[dev].sysex;
++	sysex = info->sysex;
+ 	if (sysex == NULL) {
+ 		sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
+ 		if (sysex == NULL)
+ 			return -ENOMEM;
+-		dp->synths[dev].sysex = sysex;
++		info->sysex = sysex;
+ 	}
+ 
+ 	send = 0;
+@@ -553,10 +559,12 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
+ int
+ snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev)
+ {
+-	if (! snd_seq_oss_synth_is_valid(dp, dev))
++	struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev);
++
++	if (!info)
+ 		return -EINVAL;
+-	snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client,
+-			      dp->synths[dev].arg.addr.port);
++	snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client,
++			      info->arg.addr.port);
+ 	return 0;
+ }
+ 
+@@ -568,16 +576,18 @@ int
+ snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr)
+ {
+ 	struct seq_oss_synth *rec;
++	struct seq_oss_synthinfo *info;
+ 	int rc;
+ 
+-	if (is_midi_dev(dp, dev))
++	info = get_synthinfo_nospec(dp, dev);
++	if (!info || info->is_midi)
+ 		return -ENXIO;
+ 	if ((rec = get_synthdev(dp, dev)) == NULL)
+ 		return -ENXIO;
+ 	if (rec->oper.ioctl == NULL)
+ 		rc = -ENXIO;
+ 	else
+-		rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr);
++		rc = rec->oper.ioctl(&info->arg, cmd, addr);
+ 	snd_use_lock_free(&rec->use_lock);
+ 	return rc;
+ }
+@@ -589,7 +599,10 @@ snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, u
+ int
+ snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev)
+ {
+-	if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev))
++	struct seq_oss_synthinfo *info;
++
++	info = snd_seq_oss_synth_info(dp, dev);
++	if (!info || info->is_midi)
+ 		return -ENXIO;
+ 	ev->type = SNDRV_SEQ_EVENT_OSS;
+ 	memcpy(ev->data.raw8.d, data, 8);
+diff --git a/sound/core/seq/oss/seq_oss_synth.h b/sound/core/seq/oss/seq_oss_synth.h
+index 74ac55f166b6..a63f9e22974d 100644
+--- a/sound/core/seq/oss/seq_oss_synth.h
++++ b/sound/core/seq/oss/seq_oss_synth.h
+@@ -37,7 +37,8 @@ void snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp);
+ void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev);
+ int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
+ 				 const char __user *buf, int p, int c);
+-int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev);
++struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp,
++						 int dev);
+ int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
+ 			    struct snd_seq_event *ev);
+ int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev);
+diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
+index ddcc1a325a61..42920a243328 100644
+--- a/sound/drivers/opl3/opl3_synth.c
++++ b/sound/drivers/opl3/opl3_synth.c
+@@ -21,6 +21,7 @@
+ 
+ #include <linux/slab.h>
+ #include <linux/export.h>
++#include <linux/nospec.h>
+ #include <sound/opl3.h>
+ #include <sound/asound_fm.h>
+ 
+@@ -448,7 +449,7 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
+ {
+ 	unsigned short reg_side;
+ 	unsigned char op_offset;
+-	unsigned char voice_offset;
++	unsigned char voice_offset, voice_op;
+ 
+ 	unsigned short opl3_reg;
+ 	unsigned char reg_val;
+@@ -473,7 +474,9 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
+ 		voice_offset = voice->voice - MAX_OPL2_VOICES;
+ 	}
+ 	/* Get register offset of operator */
+-	op_offset = snd_opl3_regmap[voice_offset][voice->op];
++	voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES);
++	voice_op = array_index_nospec(voice->op, 4);
++	op_offset = snd_opl3_regmap[voice_offset][voice_op];
+ 
+ 	reg_val = 0x00;
+ 	/* Set amplitude modulation (tremolo) effect */
+diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
+index 8573289c381e..928a255bfc35 100644
+--- a/sound/firewire/dice/dice-stream.c
++++ b/sound/firewire/dice/dice-stream.c
+@@ -435,7 +435,7 @@ int snd_dice_stream_init_duplex(struct snd_dice *dice)
+ 		err = init_stream(dice, AMDTP_IN_STREAM, i);
+ 		if (err < 0) {
+ 			for (; i >= 0; i--)
+-				destroy_stream(dice, AMDTP_OUT_STREAM, i);
++				destroy_stream(dice, AMDTP_IN_STREAM, i);
+ 			goto end;
+ 		}
+ 	}
+diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
+index 4ddb4cdd054b..96bb01b6b751 100644
+--- a/sound/firewire/dice/dice.c
++++ b/sound/firewire/dice/dice.c
+@@ -14,7 +14,7 @@ MODULE_LICENSE("GPL v2");
+ #define OUI_WEISS		0x001c6a
+ #define OUI_LOUD		0x000ff2
+ #define OUI_FOCUSRITE		0x00130e
+-#define OUI_TCELECTRONIC	0x001486
++#define OUI_TCELECTRONIC	0x000166
+ 
+ #define DICE_CATEGORY_ID	0x04
+ #define WEISS_CATEGORY_ID	0x00
+diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c
+index 7eb617175fde..a31a70dccecf 100644
+--- a/sound/pci/asihpi/hpimsginit.c
++++ b/sound/pci/asihpi/hpimsginit.c
+@@ -23,6 +23,7 @@
+ 
+ #include "hpi_internal.h"
+ #include "hpimsginit.h"
++#include <linux/nospec.h>
+ 
+ /* The actual message size for each object type */
+ static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT;
+@@ -39,10 +40,12 @@ static void hpi_init_message(struct hpi_message *phm, u16 object,
+ {
+ 	u16 size;
+ 
+-	if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
++	if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
++		object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
+ 		size = msg_size[object];
+-	else
++	} else {
+ 		size = sizeof(*phm);
++	}
+ 
+ 	memset(phm, 0, size);
+ 	phm->size = size;
+@@ -66,10 +69,12 @@ void hpi_init_response(struct hpi_response *phr, u16 object, u16 function,
+ {
+ 	u16 size;
+ 
+-	if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
++	if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
++		object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
+ 		size = res_size[object];
+-	else
++	} else {
+ 		size = sizeof(*phr);
++	}
+ 
+ 	memset(phr, 0, sizeof(*phr));
+ 	phr->size = size;
+diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
+index 5badd08e1d69..b1a2a7ea4172 100644
+--- a/sound/pci/asihpi/hpioctl.c
++++ b/sound/pci/asihpi/hpioctl.c
+@@ -33,6 +33,7 @@
+ #include <linux/stringify.h>
+ #include <linux/module.h>
+ #include <linux/vmalloc.h>
++#include <linux/nospec.h>
+ 
+ #ifdef MODULE_FIRMWARE
+ MODULE_FIRMWARE("asihpi/dsp5000.bin");
+@@ -186,7 +187,8 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 		struct hpi_adapter *pa = NULL;
+ 
+ 		if (hm->h.adapter_index < ARRAY_SIZE(adapters))
+-			pa = &adapters[hm->h.adapter_index];
++			pa = &adapters[array_index_nospec(hm->h.adapter_index,
++							  ARRAY_SIZE(adapters))];
+ 
+ 		if (!pa || !pa->adapter || !pa->adapter->type) {
+ 			hpi_init_response(&hr->r0, hm->h.object,
+diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
+index 57df06e76968..cc009a4a3d1d 100644
+--- a/sound/pci/hda/hda_hwdep.c
++++ b/sound/pci/hda/hda_hwdep.c
+@@ -21,6 +21,7 @@
+ #include <linux/init.h>
+ #include <linux/slab.h>
+ #include <linux/compat.h>
++#include <linux/nospec.h>
+ #include <sound/core.h>
+ #include "hda_codec.h"
+ #include "hda_local.h"
+@@ -51,7 +52,16 @@ static int get_wcap_ioctl(struct hda_codec *codec,
+ 	
+ 	if (get_user(verb, &arg->verb))
+ 		return -EFAULT;
+-	res = get_wcaps(codec, verb >> 24);
++	/* open-code get_wcaps(verb>>24) with nospec */
++	verb >>= 24;
++	if (verb < codec->core.start_nid ||
++	    verb >= codec->core.start_nid + codec->core.num_nodes) {
++		res = 0;
++	} else {
++		verb -= codec->core.start_nid;
++		verb = array_index_nospec(verb, codec->core.num_nodes);
++		res = codec->wcaps[verb];
++	}
+ 	if (put_user(res, &arg->res))
+ 		return -EFAULT;
+ 	return 0;
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index b4f1b6e88305..7d7eb1354eee 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1383,6 +1383,8 @@ static void hdmi_pcm_setup_pin(struct hdmi_spec *spec,
+ 		pcm = get_pcm_rec(spec, per_pin->pcm_idx);
+ 	else
+ 		return;
++	if (!pcm->pcm)
++		return;
+ 	if (!test_bit(per_pin->pcm_idx, &spec->pcm_in_use))
+ 		return;
+ 
+@@ -2151,8 +2153,13 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
+ 	int dev, err;
+ 	int pin_idx, pcm_idx;
+ 
+-
+ 	for (pcm_idx = 0; pcm_idx < spec->pcm_used; pcm_idx++) {
++		if (!get_pcm_rec(spec, pcm_idx)->pcm) {
++			/* no PCM: mark this for skipping permanently */
++			set_bit(pcm_idx, &spec->pcm_bitmap);
++			continue;
++		}
++
+ 		err = generic_hdmi_build_jack(codec, pcm_idx);
+ 		if (err < 0)
+ 			return err;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index fc77bf7a1544..8c238e51bb5a 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -331,6 +331,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ 		/* fallthrough */
+ 	case 0x10ec0215:
+ 	case 0x10ec0233:
++	case 0x10ec0235:
+ 	case 0x10ec0236:
+ 	case 0x10ec0255:
+ 	case 0x10ec0256:
+@@ -6575,6 +6576,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
++	SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ 	SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ 	SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ 	SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+@@ -7160,8 +7162,11 @@ static int patch_alc269(struct hda_codec *codec)
+ 	case 0x10ec0298:
+ 		spec->codec_variant = ALC269_TYPE_ALC298;
+ 		break;
++	case 0x10ec0235:
+ 	case 0x10ec0255:
+ 		spec->codec_variant = ALC269_TYPE_ALC255;
++		spec->shutup = alc256_shutup;
++		spec->init_hook = alc256_init;
+ 		break;
+ 	case 0x10ec0236:
+ 	case 0x10ec0256:
+diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
+index 4c59983158e0..11b5b5e0e058 100644
+--- a/sound/pci/rme9652/hdspm.c
++++ b/sound/pci/rme9652/hdspm.c
+@@ -137,6 +137,7 @@
+ #include <linux/pci.h>
+ #include <linux/math64.h>
+ #include <linux/io.h>
++#include <linux/nospec.h>
+ 
+ #include <sound/core.h>
+ #include <sound/control.h>
+@@ -5698,40 +5699,43 @@ static int snd_hdspm_channel_info(struct snd_pcm_substream *substream,
+ 		struct snd_pcm_channel_info *info)
+ {
+ 	struct hdspm *hdspm = snd_pcm_substream_chip(substream);
++	unsigned int channel = info->channel;
+ 
+ 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+-		if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) {
++		if (snd_BUG_ON(channel >= hdspm->max_channels_out)) {
+ 			dev_info(hdspm->card->dev,
+ 				 "snd_hdspm_channel_info: output channel out of range (%d)\n",
+-				 info->channel);
++				 channel);
+ 			return -EINVAL;
+ 		}
+ 
+-		if (hdspm->channel_map_out[info->channel] < 0) {
++		channel = array_index_nospec(channel, hdspm->max_channels_out);
++		if (hdspm->channel_map_out[channel] < 0) {
+ 			dev_info(hdspm->card->dev,
+ 				 "snd_hdspm_channel_info: output channel %d mapped out\n",
+-				 info->channel);
++				 channel);
+ 			return -EINVAL;
+ 		}
+ 
+-		info->offset = hdspm->channel_map_out[info->channel] *
++		info->offset = hdspm->channel_map_out[channel] *
+ 			HDSPM_CHANNEL_BUFFER_BYTES;
+ 	} else {
+-		if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) {
++		if (snd_BUG_ON(channel >= hdspm->max_channels_in)) {
+ 			dev_info(hdspm->card->dev,
+ 				 "snd_hdspm_channel_info: input channel out of range (%d)\n",
+-				 info->channel);
++				 channel);
+ 			return -EINVAL;
+ 		}
+ 
+-		if (hdspm->channel_map_in[info->channel] < 0) {
++		channel = array_index_nospec(channel, hdspm->max_channels_in);
++		if (hdspm->channel_map_in[channel] < 0) {
+ 			dev_info(hdspm->card->dev,
+ 				 "snd_hdspm_channel_info: input channel %d mapped out\n",
+-				 info->channel);
++				 channel);
+ 			return -EINVAL;
+ 		}
+ 
+-		info->offset = hdspm->channel_map_in[info->channel] *
++		info->offset = hdspm->channel_map_in[channel] *
+ 			HDSPM_CHANNEL_BUFFER_BYTES;
+ 	}
+ 
+diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
+index df648b1d9217..edd765e22377 100644
+--- a/sound/pci/rme9652/rme9652.c
++++ b/sound/pci/rme9652/rme9652.c
+@@ -26,6 +26,7 @@
+ #include <linux/pci.h>
+ #include <linux/module.h>
+ #include <linux/io.h>
++#include <linux/nospec.h>
+ 
+ #include <sound/core.h>
+ #include <sound/control.h>
+@@ -2071,9 +2072,10 @@ static int snd_rme9652_channel_info(struct snd_pcm_substream *substream,
+ 	if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS))
+ 		return -EINVAL;
+ 
+-	if ((chn = rme9652->channel_map[info->channel]) < 0) {
++	chn = rme9652->channel_map[array_index_nospec(info->channel,
++						      RME9652_NCHANNELS)];
++	if (chn < 0)
+ 		return -EINVAL;
+-	}
+ 
+ 	info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES;
+ 	info->first = 0;
+diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
+index cef79a1a620b..81268760b7a9 100644
+--- a/sound/soc/fsl/fsl_esai.c
++++ b/sound/soc/fsl/fsl_esai.c
+@@ -144,6 +144,13 @@ static int fsl_esai_divisor_cal(struct snd_soc_dai *dai, bool tx, u32 ratio,
+ 
+ 	psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8;
+ 
++	/* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */
++	if (ratio <= 256) {
++		pm = ratio;
++		fp = 1;
++		goto out;
++	}
++
+ 	/* Set the max fluctuation -- 0.1% of the max devisor */
+ 	savesub = (psr ? 1 : 8)  * 256 * maxfp / 1000;
+ 
+diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
+index 09db2aec12a3..b2f5d2fa354d 100644
+--- a/sound/soc/omap/omap-dmic.c
++++ b/sound/soc/omap/omap-dmic.c
+@@ -281,7 +281,7 @@ static int omap_dmic_dai_trigger(struct snd_pcm_substream *substream,
+ static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id,
+ 				 unsigned int freq)
+ {
+-	struct clk *parent_clk;
++	struct clk *parent_clk, *mux;
+ 	char *parent_clk_name;
+ 	int ret = 0;
+ 
+@@ -329,14 +329,21 @@ static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id,
+ 		return -ENODEV;
+ 	}
+ 
++	mux = clk_get_parent(dmic->fclk);
++	if (IS_ERR(mux)) {
++		dev_err(dmic->dev, "can't get fck mux parent\n");
++		clk_put(parent_clk);
++		return -ENODEV;
++	}
++
+ 	mutex_lock(&dmic->mutex);
+ 	if (dmic->active) {
+ 		/* disable clock while reparenting */
+ 		pm_runtime_put_sync(dmic->dev);
+-		ret = clk_set_parent(dmic->fclk, parent_clk);
++		ret = clk_set_parent(mux, parent_clk);
+ 		pm_runtime_get_sync(dmic->dev);
+ 	} else {
+-		ret = clk_set_parent(dmic->fclk, parent_clk);
++		ret = clk_set_parent(mux, parent_clk);
+ 	}
+ 	mutex_unlock(&dmic->mutex);
+ 
+@@ -349,6 +356,7 @@ static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id,
+ 	dmic->fclk_freq = freq;
+ 
+ err_busy:
++	clk_put(mux);
+ 	clk_put(parent_clk);
+ 
+ 	return ret;
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index 9038b2e7df73..eaa03acd4686 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -353,8 +353,11 @@ static struct usbmix_name_map bose_companion5_map[] = {
+ /*
+  * Dell usb dock with ALC4020 codec had a firmware problem where it got
+  * screwed up when zero volume is passed; just skip it as a workaround
++ *
++ * Also the extension unit gives an access error, so skip it as well.
+  */
+ static const struct usbmix_name_map dell_alc4020_map[] = {
++	{ 4, NULL },	/* extension unit */
+ 	{ 16, NULL },
+ 	{ 19, NULL },
+ 	{ 0 }
+diff --git a/tools/lib/str_error_r.c b/tools/lib/str_error_r.c
+index d6d65537b0d9..6aad8308a0ac 100644
+--- a/tools/lib/str_error_r.c
++++ b/tools/lib/str_error_r.c
+@@ -22,6 +22,6 @@ char *str_error_r(int errnum, char *buf, size_t buflen)
+ {
+ 	int err = strerror_r(errnum, buf, buflen);
+ 	if (err)
+-		snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, %p, %zd)=%d", errnum, buf, buflen, err);
++		snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, [buf], %zd)=%d", errnum, buflen, err);
+ 	return buf;
+ }
+diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
+index 53572304843b..a6483b5576fd 100644
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
+ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
+ static u32 kvm_next_vmid;
+ static unsigned int kvm_vmid_bits __read_mostly;
+-static DEFINE_SPINLOCK(kvm_vmid_lock);
++static DEFINE_RWLOCK(kvm_vmid_lock);
+ 
+ static bool vgic_present;
+ 
+@@ -470,11 +470,16 @@ static void update_vttbr(struct kvm *kvm)
+ {
+ 	phys_addr_t pgd_phys;
+ 	u64 vmid;
++	bool new_gen;
+ 
+-	if (!need_new_vmid_gen(kvm))
++	read_lock(&kvm_vmid_lock);
++	new_gen = need_new_vmid_gen(kvm);
++	read_unlock(&kvm_vmid_lock);
++
++	if (!new_gen)
+ 		return;
+ 
+-	spin_lock(&kvm_vmid_lock);
++	write_lock(&kvm_vmid_lock);
+ 
+ 	/*
+ 	 * We need to re-check the vmid_gen here to ensure that if another vcpu
+@@ -482,7 +487,7 @@ static void update_vttbr(struct kvm *kvm)
+ 	 * use the same vmid.
+ 	 */
+ 	if (!need_new_vmid_gen(kvm)) {
+-		spin_unlock(&kvm_vmid_lock);
++		write_unlock(&kvm_vmid_lock);
+ 		return;
+ 	}
+ 
+@@ -516,7 +521,7 @@ static void update_vttbr(struct kvm *kvm)
+ 	vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
+ 	kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
+ 
+-	spin_unlock(&kvm_vmid_lock);
++	write_unlock(&kvm_vmid_lock);
+ }
+ 
+ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
+index 6919352cbf15..c4762bef13c6 100644
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -18,6 +18,7 @@
+ #include <linux/arm-smccc.h>
+ #include <linux/preempt.h>
+ #include <linux/kvm_host.h>
++#include <linux/uaccess.h>
+ #include <linux/wait.h>
+ 
+ #include <asm/cputype.h>
+@@ -427,3 +428,62 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+ 	smccc_set_retval(vcpu, val, 0, 0, 0);
+ 	return 1;
+ }
++
++int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
++{
++	return 1;		/* PSCI version */
++}
++
++int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
++{
++	if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
++		return -EFAULT;
++
++	return 0;
++}
++
++int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
++{
++	if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
++		void __user *uaddr = (void __user *)(long)reg->addr;
++		u64 val;
++
++		val = kvm_psci_version(vcpu, vcpu->kvm);
++		if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
++			return -EFAULT;
++
++		return 0;
++	}
++
++	return -EINVAL;
++}
++
++int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
++{
++	if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
++		void __user *uaddr = (void __user *)(long)reg->addr;
++		bool wants_02;
++		u64 val;
++
++		if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
++			return -EFAULT;
++
++		wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
++
++		switch (val) {
++		case KVM_ARM_PSCI_0_1:
++			if (wants_02)
++				return -EINVAL;
++			vcpu->kvm->arch.psci_version = val;
++			return 0;
++		case KVM_ARM_PSCI_0_2:
++		case KVM_ARM_PSCI_1_0:
++			if (!wants_02)
++				return -EINVAL;
++			vcpu->kvm->arch.psci_version = val;
++			return 0;
++		}
++	}
++
++	return -EINVAL;
++}


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-04-30 10:30 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-04-30 10:30 UTC (permalink / raw
  To: gentoo-commits

commit:     54039fa3feac7ed37530054550520d924419b496
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Apr 30 10:30:35 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Apr 30 10:30:35 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=54039fa3

Linux patch 4.16.6

 0000_README             |    4 +
 1005_linux-4.16.6.patch | 3759 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3763 insertions(+)

diff --git a/0000_README b/0000_README
index 344c387..d4182dc 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-4.16.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.5
 
+Patch:  1005_linux-4.16.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-4.16.6.patch b/1005_linux-4.16.6.patch
new file mode 100644
index 0000000..e5b7089
--- /dev/null
+++ b/1005_linux-4.16.6.patch
@@ -0,0 +1,3759 @@
+diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
+index a553d4e4a0fb..f778901c4297 100644
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -1386,26 +1386,26 @@ mld_qrv - INTEGER
+ 	Default: 2 (as specified by RFC3810 9.1)
+ 	Minimum: 1 (as specified by RFC6636 4.5)
+ 
+-max_dst_opts_cnt - INTEGER
++max_dst_opts_number - INTEGER
+ 	Maximum number of non-padding TLVs allowed in a Destination
+ 	options extension header. If this value is less than zero
+ 	then unknown options are disallowed and the number of known
+ 	TLVs allowed is the absolute value of this number.
+ 	Default: 8
+ 
+-max_hbh_opts_cnt - INTEGER
++max_hbh_opts_number - INTEGER
+ 	Maximum number of non-padding TLVs allowed in a Hop-by-Hop
+ 	options extension header. If this value is less than zero
+ 	then unknown options are disallowed and the number of known
+ 	TLVs allowed is the absolute value of this number.
+ 	Default: 8
+ 
+-max dst_opts_len - INTEGER
++max_dst_opts_length - INTEGER
+ 	Maximum length allowed for a Destination options extension
+ 	header.
+ 	Default: INT_MAX (unlimited)
+ 
+-max hbh_opts_len - INTEGER
++max_hbh_length - INTEGER
+ 	Maximum length allowed for a Hop-by-Hop options extension
+ 	header.
+ 	Default: INT_MAX (unlimited)
+diff --git a/Makefile b/Makefile
+index 6678a90f355b..41f07b2b7905 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
+index 5ee27dc9a10c..feebb2944882 100644
+--- a/arch/s390/kernel/perf_cpum_cf_events.c
++++ b/arch/s390/kernel/perf_cpum_cf_events.c
+@@ -123,7 +123,7 @@ CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1);
+ CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1);
+ CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2);
+ CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3);
+-CPUMF_EVENT_ATTR(cf_z13, L1D_WRITES_RO_EXCL, 0x0080);
++CPUMF_EVENT_ATTR(cf_z13, L1D_RO_EXCL_WRITES, 0x0080);
+ CPUMF_EVENT_ATTR(cf_z13, DTLB1_WRITES, 0x0081);
+ CPUMF_EVENT_ATTR(cf_z13, DTLB1_MISSES, 0x0082);
+ CPUMF_EVENT_ATTR(cf_z13, DTLB1_HPAGE_WRITES, 0x0083);
+@@ -179,7 +179,7 @@ CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_NO_SPECIAL, 0x00db);
+ CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc);
+ CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
+ CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
+-CPUMF_EVENT_ATTR(cf_z14, L1D_WRITES_RO_EXCL, 0x0080);
++CPUMF_EVENT_ATTR(cf_z14, L1D_RO_EXCL_WRITES, 0x0080);
+ CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081);
+ CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082);
+ CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083);
+@@ -371,7 +371,7 @@ static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = {
+ };
+ 
+ static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
+-	CPUMF_EVENT_PTR(cf_z13, L1D_WRITES_RO_EXCL),
++	CPUMF_EVENT_PTR(cf_z13, L1D_RO_EXCL_WRITES),
+ 	CPUMF_EVENT_PTR(cf_z13, DTLB1_WRITES),
+ 	CPUMF_EVENT_PTR(cf_z13, DTLB1_MISSES),
+ 	CPUMF_EVENT_PTR(cf_z13, DTLB1_HPAGE_WRITES),
+@@ -431,7 +431,7 @@ static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
+ };
+ 
+ static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = {
+-	CPUMF_EVENT_PTR(cf_z14, L1D_WRITES_RO_EXCL),
++	CPUMF_EVENT_PTR(cf_z14, L1D_RO_EXCL_WRITES),
+ 	CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES),
+ 	CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES),
+ 	CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES),
+diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
+index d9d1f512f019..5007fac01bb5 100644
+--- a/arch/s390/kernel/uprobes.c
++++ b/arch/s390/kernel/uprobes.c
+@@ -150,6 +150,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
+ 	return orig;
+ }
+ 
++bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
++			     struct pt_regs *regs)
++{
++	if (ctx == RP_CHECK_CHAIN_CALL)
++		return user_stack_pointer(regs) <= ret->stack;
++	else
++		return user_stack_pointer(regs) < ret->stack;
++}
++
+ /* Instruction Emulation */
+ 
+ static void adjust_psw_addr(psw_t *psw, unsigned long len)
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index 76fb96966f7b..2f2e737be0f8 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void)
+ 	return opregion;
+ }
+ 
++static bool dmi_is_desktop(void)
++{
++	const char *chassis_type;
++
++	chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
++	if (!chassis_type)
++		return false;
++
++	if (!strcmp(chassis_type, "3") || /*  3: Desktop */
++	    !strcmp(chassis_type, "4") || /*  4: Low Profile Desktop */
++	    !strcmp(chassis_type, "5") || /*  5: Pizza Box */
++	    !strcmp(chassis_type, "6") || /*  6: Mini Tower */
++	    !strcmp(chassis_type, "7") || /*  7: Tower */
++	    !strcmp(chassis_type, "11"))  /* 11: Main Server Chassis */
++		return true;
++
++	return false;
++}
++
+ int acpi_video_register(void)
+ {
+ 	int ret = 0;
+@@ -2143,8 +2162,12 @@ int acpi_video_register(void)
+ 	 * win8 ready (where we also prefer the native backlight driver, so
+ 	 * normally the acpi_video code should not register there anyways).
+ 	 */
+-	if (only_lcd == -1)
+-		only_lcd = acpi_osi_is_win8();
++	if (only_lcd == -1) {
++		if (dmi_is_desktop() && acpi_osi_is_win8())
++			only_lcd = true;
++		else
++			only_lcd = false;
++	}
+ 
+ 	dmi_check_system(video_dmi_table);
+ 
+diff --git a/drivers/block/swim.c b/drivers/block/swim.c
+index 64e066eba72e..0e31884a9519 100644
+--- a/drivers/block/swim.c
++++ b/drivers/block/swim.c
+@@ -110,7 +110,7 @@ struct iwm {
+ /* Select values for swim_select and swim_readbit */
+ 
+ #define READ_DATA_0	0x074
+-#define TWOMEG_DRIVE	0x075
++#define ONEMEG_DRIVE	0x075
+ #define SINGLE_SIDED	0x076
+ #define DRIVE_PRESENT	0x077
+ #define DISK_IN		0x170
+@@ -118,9 +118,9 @@ struct iwm {
+ #define TRACK_ZERO	0x172
+ #define TACHO		0x173
+ #define READ_DATA_1	0x174
+-#define MFM_MODE	0x175
++#define GCR_MODE	0x175
+ #define SEEK_COMPLETE	0x176
+-#define ONEMEG_MEDIA	0x177
++#define TWOMEG_MEDIA	0x177
+ 
+ /* Bits in handshake register */
+ 
+@@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs)
+ 		struct floppy_struct *g;
+ 		fs->disk_in = 1;
+ 		fs->write_protected = swim_readbit(base, WRITE_PROT);
+-		fs->type = swim_readbit(base, ONEMEG_MEDIA);
+ 
+ 		if (swim_track00(base))
+ 			printk(KERN_ERR
+@@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs)
+ 
+ 		swim_track00(base);
+ 
++		fs->type = swim_readbit(base, TWOMEG_MEDIA) ?
++			HD_MEDIA : DD_MEDIA;
++		fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2;
+ 		get_floppy_geometry(fs, 0, &g);
+ 		fs->total_secs = g->size;
+ 		fs->secpercyl = g->head * g->sect;
+@@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
+ 
+ 	swim_write(base, setup, S_IBM_DRIVE  | S_FCLK_DIV2);
+ 	udelay(10);
+-	swim_drive(base, INTERNAL_DRIVE);
++	swim_drive(base, fs->location);
+ 	swim_motor(base, ON);
+ 	swim_action(base, SETMFM);
+ 	if (fs->ejected)
+@@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
+ 		goto out;
+ 	}
+ 
++	set_capacity(fs->disk, fs->total_secs);
++
+ 	if (mode & FMODE_NDELAY)
+ 		return 0;
+ 
+@@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
+ 		if (copy_to_user((void __user *) param, (void *) &floppy_type,
+ 				 sizeof(struct floppy_struct)))
+ 			return -EFAULT;
+-		break;
+-
+-	default:
+-		printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n",
+-		       cmd);
+-		return -ENOSYS;
++		return 0;
+ 	}
+-	return 0;
++	return -ENOTTY;
+ }
+ 
+ static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+@@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
+ 	struct swim_priv *swd = data;
+ 	int drive = (*part & 3);
+ 
+-	if (drive > swd->floppy_count)
++	if (drive >= swd->floppy_count)
+ 		return NULL;
+ 
+ 	*part = 0;
+@@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
+ 
+ 	swim_motor(base, OFF);
+ 
+-	if (swim_readbit(base, SINGLE_SIDED))
+-		fs->head_number = 1;
+-	else
+-		fs->head_number = 2;
++	fs->type = HD_MEDIA;
++	fs->head_number = 2;
++
+ 	fs->ref_count = 0;
+ 	fs->ejected = 1;
+ 
+@@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd)
+ 	/* scan floppy drives */
+ 
+ 	swim_drive(base, INTERNAL_DRIVE);
+-	if (swim_readbit(base, DRIVE_PRESENT))
++	if (swim_readbit(base, DRIVE_PRESENT) &&
++	    !swim_readbit(base, ONEMEG_DRIVE))
+ 		swim_add_floppy(swd, INTERNAL_DRIVE);
+ 	swim_drive(base, EXTERNAL_DRIVE);
+-	if (swim_readbit(base, DRIVE_PRESENT))
++	if (swim_readbit(base, DRIVE_PRESENT) &&
++	    !swim_readbit(base, ONEMEG_DRIVE))
+ 		swim_add_floppy(swd, EXTERNAL_DRIVE);
+ 
+ 	/* register floppy drives */
+@@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd)
+ 							      &swd->lock);
+ 		if (!swd->unit[drive].disk->queue) {
+ 			err = -ENOMEM;
+-			put_disk(swd->unit[drive].disk);
+ 			goto exit_put_disks;
+ 		}
+ 		blk_queue_bounce_limit(swd->unit[drive].disk->queue,
+@@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev)
+ 		goto out;
+ 	}
+ 
+-	swim_base = ioremap(res->start, resource_size(res));
++	swim_base = (struct swim __iomem *)res->start;
+ 	if (!swim_base) {
+ 		ret = -ENOMEM;
+ 		goto out_release_io;
+@@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev)
+ 	if (!get_swim_mode(swim_base)) {
+ 		printk(KERN_INFO "SWIM device not found !\n");
+ 		ret = -ENODEV;
+-		goto out_iounmap;
++		goto out_release_io;
+ 	}
+ 
+ 	/* set platform driver data */
+@@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev)
+ 	swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL);
+ 	if (!swd) {
+ 		ret = -ENOMEM;
+-		goto out_iounmap;
++		goto out_release_io;
+ 	}
+ 	platform_set_drvdata(dev, swd);
+ 
+@@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev)
+ 
+ out_kfree:
+ 	kfree(swd);
+-out_iounmap:
+-	iounmap(swim_base);
+ out_release_io:
+ 	release_mem_region(res->start, resource_size(res));
+ out:
+@@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev)
+ 	for (drive = 0; drive < swd->floppy_count; drive++)
+ 		floppy_eject(&swd->unit[drive]);
+ 
+-	iounmap(swd->base);
+-
+ 	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ 	if (res)
+ 		release_mem_region(res->start, resource_size(res));
+diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
+index af51015d056e..469541c1e51e 100644
+--- a/drivers/block/swim3.c
++++ b/drivers/block/swim3.c
+@@ -148,7 +148,7 @@ struct swim3 {
+ #define MOTOR_ON	2
+ #define RELAX		3	/* also eject in progress */
+ #define READ_DATA_0	4
+-#define TWOMEG_DRIVE	5
++#define ONEMEG_DRIVE	5
+ #define SINGLE_SIDED	6	/* drive or diskette is 4MB type? */
+ #define DRIVE_PRESENT	7
+ #define DISK_IN		8
+@@ -156,9 +156,9 @@ struct swim3 {
+ #define TRACK_ZERO	10
+ #define TACHO		11
+ #define READ_DATA_1	12
+-#define MFM_MODE	13
++#define GCR_MODE	13
+ #define SEEK_COMPLETE	14
+-#define ONEMEG_MEDIA	15
++#define TWOMEG_MEDIA	15
+ 
+ /* Definitions of values used in writing and formatting */
+ #define DATA_ESCAPE	0x99
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index e36d160c458f..5f7d86509f2f 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2374,7 +2374,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
+ 	if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
+ 		return media_changed(cdi, 1);
+ 
+-	if ((unsigned int)arg >= cdi->capacity)
++	if (arg >= cdi->capacity)
+ 		return -EINVAL;
+ 
+ 	info = kmalloc(sizeof(*info), GFP_KERNEL);
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index 248c04090dea..255db6fe15c8 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -369,20 +369,40 @@ static int tpm_validate_command(struct tpm_chip *chip,
+ 	return -EINVAL;
+ }
+ 
+-/**
+- * tmp_transmit - Internal kernel interface to transmit TPM commands.
+- *
+- * @chip: TPM chip to use
+- * @buf: TPM command buffer
+- * @bufsiz: length of the TPM command buffer
+- * @flags: tpm transmit flags - bitmap
+- *
+- * Return:
+- *     0 when the operation is successful.
+- *     A negative number for system errors (errno).
+- */
+-ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
+-		     u8 *buf, size_t bufsiz, unsigned int flags)
++static int tpm_request_locality(struct tpm_chip *chip)
++{
++	int rc;
++
++	if (!chip->ops->request_locality)
++		return 0;
++
++	rc = chip->ops->request_locality(chip, 0);
++	if (rc < 0)
++		return rc;
++
++	chip->locality = rc;
++
++	return 0;
++}
++
++static void tpm_relinquish_locality(struct tpm_chip *chip)
++{
++	int rc;
++
++	if (!chip->ops->relinquish_locality)
++		return;
++
++	rc = chip->ops->relinquish_locality(chip, chip->locality);
++	if (rc)
++		dev_err(&chip->dev, "%s: : error %d\n", __func__, rc);
++
++	chip->locality = -1;
++}
++
++static ssize_t tpm_try_transmit(struct tpm_chip *chip,
++				struct tpm_space *space,
++				u8 *buf, size_t bufsiz,
++				unsigned int flags)
+ {
+ 	struct tpm_output_header *header = (void *)buf;
+ 	int rc;
+@@ -422,8 +442,6 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
+ 	if (!(flags & TPM_TRANSMIT_UNLOCKED))
+ 		mutex_lock(&chip->tpm_mutex);
+ 
+-	if (chip->dev.parent)
+-		pm_runtime_get_sync(chip->dev.parent);
+ 
+ 	if (chip->ops->clk_enable != NULL)
+ 		chip->ops->clk_enable(chip, true);
+@@ -431,14 +449,15 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
+ 	/* Store the decision as chip->locality will be changed. */
+ 	need_locality = chip->locality == -1;
+ 
+-	if (!(flags & TPM_TRANSMIT_RAW) &&
+-	    need_locality && chip->ops->request_locality)  {
+-		rc = chip->ops->request_locality(chip, 0);
++	if (!(flags & TPM_TRANSMIT_RAW) && need_locality) {
++		rc = tpm_request_locality(chip);
+ 		if (rc < 0)
+ 			goto out_no_locality;
+-		chip->locality = rc;
+ 	}
+ 
++	if (chip->dev.parent)
++		pm_runtime_get_sync(chip->dev.parent);
++
+ 	rc = tpm2_prepare_space(chip, space, ordinal, buf);
+ 	if (rc)
+ 		goto out;
+@@ -499,27 +518,83 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
+ 	rc = tpm2_commit_space(chip, space, ordinal, buf, &len);
+ 
+ out:
+-	if (need_locality && chip->ops->relinquish_locality) {
+-		chip->ops->relinquish_locality(chip, chip->locality);
+-		chip->locality = -1;
+-	}
++	if (chip->dev.parent)
++		pm_runtime_put_sync(chip->dev.parent);
++
++	if (need_locality)
++		tpm_relinquish_locality(chip);
++
+ out_no_locality:
+ 	if (chip->ops->clk_enable != NULL)
+ 		chip->ops->clk_enable(chip, false);
+ 
+-	if (chip->dev.parent)
+-		pm_runtime_put_sync(chip->dev.parent);
+-
+ 	if (!(flags & TPM_TRANSMIT_UNLOCKED))
+ 		mutex_unlock(&chip->tpm_mutex);
+ 	return rc ? rc : len;
+ }
+ 
+ /**
+- * tmp_transmit_cmd - send a tpm command to the device
++ * tpm_transmit - Internal kernel interface to transmit TPM commands.
++ *
++ * @chip: TPM chip to use
++ * @space: tpm space
++ * @buf: TPM command buffer
++ * @bufsiz: length of the TPM command buffer
++ * @flags: tpm transmit flags - bitmap
++ *
++ * A wrapper around tpm_try_transmit that handles TPM2_RC_RETRY
++ * returns from the TPM and retransmits the command after a delay up
++ * to a maximum wait of TPM2_DURATION_LONG.
++ *
++ * Note: TPM1 never returns TPM2_RC_RETRY so the retry logic is TPM2
++ * only
++ *
++ * Return:
++ *     the length of the return when the operation is successful.
++ *     A negative number for system errors (errno).
++ */
++ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
++		     u8 *buf, size_t bufsiz, unsigned int flags)
++{
++	struct tpm_output_header *header = (struct tpm_output_header *)buf;
++	/* space for header and handles */
++	u8 save[TPM_HEADER_SIZE + 3*sizeof(u32)];
++	unsigned int delay_msec = TPM2_DURATION_SHORT;
++	u32 rc = 0;
++	ssize_t ret;
++	const size_t save_size = min(space ? sizeof(save) : TPM_HEADER_SIZE,
++				     bufsiz);
++
++	/*
++	 * Subtlety here: if we have a space, the handles will be
++	 * transformed, so when we restore the header we also have to
++	 * restore the handles.
++	 */
++	memcpy(save, buf, save_size);
++
++	for (;;) {
++		ret = tpm_try_transmit(chip, space, buf, bufsiz, flags);
++		if (ret < 0)
++			break;
++		rc = be32_to_cpu(header->return_code);
++		if (rc != TPM2_RC_RETRY)
++			break;
++		delay_msec *= 2;
++		if (delay_msec > TPM2_DURATION_LONG) {
++			dev_err(&chip->dev, "TPM is in retry loop\n");
++			break;
++		}
++		tpm_msleep(delay_msec);
++		memcpy(buf, save, save_size);
++	}
++	return ret;
++}
++/**
++ * tpm_transmit_cmd - send a tpm command to the device
+  *    The function extracts tpm out header return code
+  *
+  * @chip: TPM chip to use
++ * @space: tpm space
+  * @buf: TPM command buffer
+  * @bufsiz: length of the buffer
+  * @min_rsp_body_length: minimum expected length of response body
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+index f895fba4e20d..d73f3fb81b42 100644
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -108,6 +108,7 @@ enum tpm2_return_codes {
+ 	TPM2_RC_COMMAND_CODE    = 0x0143,
+ 	TPM2_RC_TESTING		= 0x090A, /* RC_WARN */
+ 	TPM2_RC_REFERENCE_H0	= 0x0910,
++	TPM2_RC_RETRY		= 0x0922,
+ };
+ 
+ enum tpm2_algorithms {
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 7b3c2a8aa9de..497edd9848cd 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -112,6 +112,25 @@ struct tpm2_crb_smc {
+ 	u32 smc_func_id;
+ };
+ 
++static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
++				unsigned long timeout)
++{
++	ktime_t start;
++	ktime_t stop;
++
++	start = ktime_get();
++	stop = ktime_add(start, ms_to_ktime(timeout));
++
++	do {
++		if ((ioread32(reg) & mask) == value)
++			return true;
++
++		usleep_range(50, 100);
++	} while (ktime_before(ktime_get(), stop));
++
++	return ((ioread32(reg) & mask) == value);
++}
++
+ /**
+  * crb_go_idle - request tpm crb device to go the idle state
+  *
+@@ -128,7 +147,7 @@ struct tpm2_crb_smc {
+  *
+  * Return: 0 always
+  */
+-static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv)
++static int crb_go_idle(struct device *dev, struct crb_priv *priv)
+ {
+ 	if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ 	    (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+@@ -136,30 +155,17 @@ static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv)
+ 		return 0;
+ 
+ 	iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
+-	/* we don't really care when this settles */
+ 
++	if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
++				 CRB_CTRL_REQ_GO_IDLE/* mask */,
++				 0, /* value */
++				 TPM2_TIMEOUT_C)) {
++		dev_warn(dev, "goIdle timed out\n");
++		return -ETIME;
++	}
+ 	return 0;
+ }
+ 
+-static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
+-				unsigned long timeout)
+-{
+-	ktime_t start;
+-	ktime_t stop;
+-
+-	start = ktime_get();
+-	stop = ktime_add(start, ms_to_ktime(timeout));
+-
+-	do {
+-		if ((ioread32(reg) & mask) == value)
+-			return true;
+-
+-		usleep_range(50, 100);
+-	} while (ktime_before(ktime_get(), stop));
+-
+-	return false;
+-}
+-
+ /**
+  * crb_cmd_ready - request tpm crb device to enter ready state
+  *
+@@ -175,8 +181,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
+  *
+  * Return: 0 on success -ETIME on timeout;
+  */
+-static int __maybe_unused crb_cmd_ready(struct device *dev,
+-					struct crb_priv *priv)
++static int crb_cmd_ready(struct device *dev, struct crb_priv *priv)
+ {
+ 	if ((priv->sm == ACPI_TPM2_START_METHOD) ||
+ 	    (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
+@@ -195,11 +200,11 @@ static int __maybe_unused crb_cmd_ready(struct device *dev,
+ 	return 0;
+ }
+ 
+-static int crb_request_locality(struct tpm_chip *chip, int loc)
++static int __crb_request_locality(struct device *dev,
++				  struct crb_priv *priv, int loc)
+ {
+-	struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ 	u32 value = CRB_LOC_STATE_LOC_ASSIGNED |
+-		CRB_LOC_STATE_TPM_REG_VALID_STS;
++		    CRB_LOC_STATE_TPM_REG_VALID_STS;
+ 
+ 	if (!priv->regs_h)
+ 		return 0;
+@@ -207,21 +212,45 @@ static int crb_request_locality(struct tpm_chip *chip, int loc)
+ 	iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl);
+ 	if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value,
+ 				 TPM2_TIMEOUT_C)) {
+-		dev_warn(&chip->dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
++		dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
+ 		return -ETIME;
+ 	}
+ 
+ 	return 0;
+ }
+ 
+-static void crb_relinquish_locality(struct tpm_chip *chip, int loc)
++static int crb_request_locality(struct tpm_chip *chip, int loc)
+ {
+ 	struct crb_priv *priv = dev_get_drvdata(&chip->dev);
+ 
++	return __crb_request_locality(&chip->dev, priv, loc);
++}
++
++static int __crb_relinquish_locality(struct device *dev,
++				     struct crb_priv *priv, int loc)
++{
++	u32 mask = CRB_LOC_STATE_LOC_ASSIGNED |
++		   CRB_LOC_STATE_TPM_REG_VALID_STS;
++	u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS;
++
+ 	if (!priv->regs_h)
+-		return;
++		return 0;
+ 
+ 	iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl);
++	if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value,
++				 TPM2_TIMEOUT_C)) {
++		dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n");
++		return -ETIME;
++	}
++
++	return 0;
++}
++
++static int crb_relinquish_locality(struct tpm_chip *chip, int loc)
++{
++	struct crb_priv *priv = dev_get_drvdata(&chip->dev);
++
++	return __crb_relinquish_locality(&chip->dev, priv, loc);
+ }
+ 
+ static u8 crb_status(struct tpm_chip *chip)
+@@ -475,6 +504,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ 			dev_warn(dev, FW_BUG "Bad ACPI memory layout");
+ 	}
+ 
++	ret = __crb_request_locality(dev, priv, 0);
++	if (ret)
++		return ret;
++
+ 	priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
+ 				   sizeof(struct crb_regs_tail));
+ 	if (IS_ERR(priv->regs_t))
+@@ -531,6 +564,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ 
+ 	crb_go_idle(dev, priv);
+ 
++	__crb_relinquish_locality(dev, priv, 0);
++
+ 	return ret;
+ }
+ 
+@@ -588,10 +623,14 @@ static int crb_acpi_add(struct acpi_device *device)
+ 	chip->acpi_dev_handle = device->handle;
+ 	chip->flags = TPM_CHIP_FLAG_TPM2;
+ 
+-	rc  = crb_cmd_ready(dev, priv);
++	rc = __crb_request_locality(dev, priv, 0);
+ 	if (rc)
+ 		return rc;
+ 
++	rc  = crb_cmd_ready(dev, priv);
++	if (rc)
++		goto out;
++
+ 	pm_runtime_get_noresume(dev);
+ 	pm_runtime_set_active(dev);
+ 	pm_runtime_enable(dev);
+@@ -601,12 +640,15 @@ static int crb_acpi_add(struct acpi_device *device)
+ 		crb_go_idle(dev, priv);
+ 		pm_runtime_put_noidle(dev);
+ 		pm_runtime_disable(dev);
+-		return rc;
++		goto out;
+ 	}
+ 
+-	pm_runtime_put(dev);
++	pm_runtime_put_sync(dev);
+ 
+-	return 0;
++out:
++	__crb_relinquish_locality(dev, priv, 0);
++
++	return rc;
+ }
+ 
+ static int crb_acpi_remove(struct acpi_device *device)
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index da074e3db19b..5a1f47b43947 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -143,11 +143,13 @@ static bool check_locality(struct tpm_chip *chip, int l)
+ 	return false;
+ }
+ 
+-static void release_locality(struct tpm_chip *chip, int l)
++static int release_locality(struct tpm_chip *chip, int l)
+ {
+ 	struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+ 
+ 	tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
++
++	return 0;
+ }
+ 
+ static int request_locality(struct tpm_chip *chip, int l)
+diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+index a38db40ce990..b2447ee3b245 100644
+--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
++++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+@@ -1637,6 +1637,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
+ 	 * (and possibly on the platform). So far only i.MX6Q (v1.30a) and
+ 	 * i.MX6DL (v1.31a) have been identified as needing the workaround, with
+ 	 * 4 and 1 iterations respectively.
++	 * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
++	 * the workaround with a single iteration.
+ 	 */
+ 
+ 	switch (hdmi->version) {
+@@ -1644,6 +1646,7 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
+ 		count = 4;
+ 		break;
+ 	case 0x131a:
++	case 0x201a:
+ 		count = 1;
+ 		break;
+ 	default:
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index 051a72eecb24..d2cc55e21374 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -40,6 +40,10 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
+ #define PCI_DEVICE_ID_AMD_17H_DF_F3	0x1463
+ #endif
+ 
++#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB
++#define PCI_DEVICE_ID_AMD_17H_RR_NB	0x15d0
++#endif
++
+ /* CPUID function 0x80000001, ebx */
+ #define CPUID_PKGTYPE_MASK	0xf0000000
+ #define CPUID_PKGTYPE_F		0x00000000
+@@ -72,6 +76,7 @@ struct k10temp_data {
+ 	struct pci_dev *pdev;
+ 	void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
+ 	int temp_offset;
++	u32 temp_adjust_mask;
+ };
+ 
+ struct tctl_offset {
+@@ -84,6 +89,7 @@ static const struct tctl_offset tctl_offset_table[] = {
+ 	{ 0x17, "AMD Ryzen 5 1600X", 20000 },
+ 	{ 0x17, "AMD Ryzen 7 1700X", 20000 },
+ 	{ 0x17, "AMD Ryzen 7 1800X", 20000 },
++	{ 0x17, "AMD Ryzen 7 2700X", 10000 },
+ 	{ 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
+ 	{ 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
+ 	{ 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
+@@ -129,6 +135,8 @@ static ssize_t temp1_input_show(struct device *dev,
+ 
+ 	data->read_tempreg(data->pdev, &regval);
+ 	temp = (regval >> 21) * 125;
++	if (regval & data->temp_adjust_mask)
++		temp -= 49000;
+ 	if (temp > data->temp_offset)
+ 		temp -= data->temp_offset;
+ 	else
+@@ -259,12 +267,14 @@ static int k10temp_probe(struct pci_dev *pdev,
+ 	data->pdev = pdev;
+ 
+ 	if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
+-					  boot_cpu_data.x86_model == 0x70))
++					  boot_cpu_data.x86_model == 0x70)) {
+ 		data->read_tempreg = read_tempreg_nb_f15;
+-	else if (boot_cpu_data.x86 == 0x17)
++	} else if (boot_cpu_data.x86 == 0x17) {
++		data->temp_adjust_mask = 0x80000;
+ 		data->read_tempreg = read_tempreg_nb_f17;
+-	else
++	} else {
+ 		data->read_tempreg = read_tempreg_pci;
++	}
+ 
+ 	for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
+ 		const struct tctl_offset *entry = &tctl_offset_table[i];
+@@ -292,6 +302,7 @@ static const struct pci_device_id k10temp_id_table[] = {
+ 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
+ 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
++	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(pci, k10temp_id_table);
+diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
+index 439ee9c5f535..c59b5da85321 100644
+--- a/drivers/message/fusion/mptsas.c
++++ b/drivers/message/fusion/mptsas.c
+@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
+ 	.cmd_per_lun			= 7,
+ 	.use_clustering			= ENABLE_CLUSTERING,
+ 	.shost_attrs			= mptscsih_host_attrs,
++	.no_write_same			= 1,
+ };
+ 
+ static int mptsas_get_linkerrors(struct sas_phy *phy)
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index b7b113018853..718e4914e3a0 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1660,8 +1660,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ 	} /* switch(bond_mode) */
+ 
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+-	slave_dev->npinfo = bond->dev->npinfo;
+-	if (slave_dev->npinfo) {
++	if (bond->dev->npinfo) {
+ 		if (slave_enable_netpoll(new_slave)) {
+ 			netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
+ 			res = -EBUSY;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index 7ea72ef11a55..d272dc6984ac 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -1321,6 +1321,10 @@
+ #define MDIO_VEND2_AN_STAT		0x8002
+ #endif
+ 
++#ifndef MDIO_VEND2_PMA_CDR_CONTROL
++#define MDIO_VEND2_PMA_CDR_CONTROL	0x8056
++#endif
++
+ #ifndef MDIO_CTRL1_SPEED1G
+ #define MDIO_CTRL1_SPEED1G		(MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+ #endif
+@@ -1369,6 +1373,10 @@
+ #define XGBE_AN_CL37_TX_CONFIG_MASK	0x08
+ #define XGBE_AN_CL37_MII_CTRL_8BIT	0x0100
+ 
++#define XGBE_PMA_CDR_TRACK_EN_MASK	0x01
++#define XGBE_PMA_CDR_TRACK_EN_OFF	0x00
++#define XGBE_PMA_CDR_TRACK_EN_ON	0x01
++
+ /* Bit setting and getting macros
+  *  The get macro will extract the current bit field value from within
+  *  the variable
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+index 7d128be61310..b91143947ed2 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+@@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
+ 				   "debugfs_create_file failed\n");
+ 	}
+ 
++	if (pdata->vdata->an_cdr_workaround) {
++		pfile = debugfs_create_bool("an_cdr_workaround", 0600,
++					    pdata->xgbe_debugfs,
++					    &pdata->debugfs_an_cdr_workaround);
++		if (!pfile)
++			netdev_err(pdata->netdev,
++				   "debugfs_create_bool failed\n");
++
++		pfile = debugfs_create_bool("an_cdr_track_early", 0600,
++					    pdata->xgbe_debugfs,
++					    &pdata->debugfs_an_cdr_track_early);
++		if (!pfile)
++			netdev_err(pdata->netdev,
++				   "debugfs_create_bool failed\n");
++	}
++
+ 	kfree(buf);
+ }
+ 
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index d91fa595be98..e31d9d1fb6a6 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
+ 	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
+ 
+ 	/* Call MDIO/PHY initialization routine */
++	pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
+ 	ret = pdata->phy_if.phy_init(pdata);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 072b9f664597..1b45cd73a258 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -432,11 +432,16 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata)
+ 	xgbe_an73_set(pdata, false, false);
+ 	xgbe_an73_disable_interrupts(pdata);
+ 
++	pdata->an_start = 0;
++
+ 	netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n");
+ }
+ 
+ static void xgbe_an_restart(struct xgbe_prv_data *pdata)
+ {
++	if (pdata->phy_if.phy_impl.an_pre)
++		pdata->phy_if.phy_impl.an_pre(pdata);
++
+ 	switch (pdata->an_mode) {
+ 	case XGBE_AN_MODE_CL73:
+ 	case XGBE_AN_MODE_CL73_REDRV:
+@@ -453,6 +458,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata)
+ 
+ static void xgbe_an_disable(struct xgbe_prv_data *pdata)
+ {
++	if (pdata->phy_if.phy_impl.an_post)
++		pdata->phy_if.phy_impl.an_post(pdata);
++
+ 	switch (pdata->an_mode) {
+ 	case XGBE_AN_MODE_CL73:
+ 	case XGBE_AN_MODE_CL73_REDRV:
+@@ -505,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
+ 		XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+ 			    reg);
+ 
+-		if (pdata->phy_if.phy_impl.kr_training_post)
+-			pdata->phy_if.phy_impl.kr_training_post(pdata);
+-
+ 		netif_dbg(pdata, link, pdata->netdev,
+ 			  "KR training initiated\n");
++
++		if (pdata->phy_if.phy_impl.kr_training_post)
++			pdata->phy_if.phy_impl.kr_training_post(pdata);
+ 	}
+ 
+ 	return XGBE_AN_PAGE_RECEIVED;
+@@ -637,11 +645,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
+ 			return XGBE_AN_NO_LINK;
+ 	}
+ 
+-	xgbe_an73_disable(pdata);
++	xgbe_an_disable(pdata);
+ 
+ 	xgbe_switch_mode(pdata);
+ 
+-	xgbe_an73_restart(pdata);
++	xgbe_an_restart(pdata);
+ 
+ 	return XGBE_AN_INCOMPAT_LINK;
+ }
+@@ -820,6 +828,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
+ 		pdata->an_result = pdata->an_state;
+ 		pdata->an_state = XGBE_AN_READY;
+ 
++		if (pdata->phy_if.phy_impl.an_post)
++			pdata->phy_if.phy_impl.an_post(pdata);
++
+ 		netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n",
+ 			  xgbe_state_as_string(pdata->an_result));
+ 	}
+@@ -903,6 +914,9 @@ static void xgbe_an73_state_machine(struct xgbe_prv_data *pdata)
+ 		pdata->kx_state = XGBE_RX_BPA;
+ 		pdata->an_start = 0;
+ 
++		if (pdata->phy_if.phy_impl.an_post)
++			pdata->phy_if.phy_impl.an_post(pdata);
++
+ 		netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n",
+ 			  xgbe_state_as_string(pdata->an_result));
+ 	}
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+index eb23f9ba1a9a..82d1f416ee2a 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+@@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = {
+ 	.irq_reissue_support		= 1,
+ 	.tx_desc_prefetch		= 5,
+ 	.rx_desc_prefetch		= 5,
++	.an_cdr_workaround		= 1,
+ };
+ 
+ static const struct xgbe_version_data xgbe_v2b = {
+@@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = {
+ 	.irq_reissue_support		= 1,
+ 	.tx_desc_prefetch		= 5,
+ 	.rx_desc_prefetch		= 5,
++	.an_cdr_workaround		= 1,
+ };
+ 
+ static const struct pci_device_id xgbe_pci_table[] = {
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 3304a291aa96..aac884314000 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -147,6 +147,14 @@
+ /* Rate-change complete wait/retry count */
+ #define XGBE_RATECHANGE_COUNT		500
+ 
++/* CDR delay values for KR support (in usec) */
++#define XGBE_CDR_DELAY_INIT		10000
++#define XGBE_CDR_DELAY_INC		10000
++#define XGBE_CDR_DELAY_MAX		100000
++
++/* RRC frequency during link status check */
++#define XGBE_RRC_FREQUENCY		10
++
+ enum xgbe_port_mode {
+ 	XGBE_PORT_MODE_RSVD = 0,
+ 	XGBE_PORT_MODE_BACKPLANE,
+@@ -245,6 +253,10 @@ enum xgbe_sfp_speed {
+ #define XGBE_SFP_BASE_VENDOR_SN			4
+ #define XGBE_SFP_BASE_VENDOR_SN_LEN		16
+ 
++#define XGBE_SFP_EXTD_OPT1			1
++#define XGBE_SFP_EXTD_OPT1_RX_LOS		BIT(1)
++#define XGBE_SFP_EXTD_OPT1_TX_FAULT		BIT(3)
++
+ #define XGBE_SFP_EXTD_DIAG			28
+ #define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE		BIT(2)
+ 
+@@ -324,6 +336,7 @@ struct xgbe_phy_data {
+ 
+ 	unsigned int sfp_gpio_address;
+ 	unsigned int sfp_gpio_mask;
++	unsigned int sfp_gpio_inputs;
+ 	unsigned int sfp_gpio_rx_los;
+ 	unsigned int sfp_gpio_tx_fault;
+ 	unsigned int sfp_gpio_mod_absent;
+@@ -355,6 +368,10 @@ struct xgbe_phy_data {
+ 	unsigned int redrv_addr;
+ 	unsigned int redrv_lane;
+ 	unsigned int redrv_model;
++
++	/* KR AN support */
++	unsigned int phy_cdr_notrack;
++	unsigned int phy_cdr_delay;
+ };
+ 
+ /* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
+@@ -974,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata)
+ 	phy_data->sfp_phy_avail = 1;
+ }
+ 
++static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data)
++{
++	u8 *sfp_extd = phy_data->sfp_eeprom.extd;
++
++	if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS))
++		return false;
++
++	if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS)
++		return false;
++
++	if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los))
++		return true;
++
++	return false;
++}
++
++static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data)
++{
++	u8 *sfp_extd = phy_data->sfp_eeprom.extd;
++
++	if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT))
++		return false;
++
++	if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT)
++		return false;
++
++	if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault))
++		return true;
++
++	return false;
++}
++
++static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
++{
++	if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT)
++		return false;
++
++	if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent))
++		return true;
++
++	return false;
++}
++
+ static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
+ {
+ 	struct xgbe_phy_data *phy_data = pdata->phy_data;
+@@ -1019,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
+ 	if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP)
+ 		return;
+ 
++	/* Update transceiver signals (eeprom extd/options) */
++	phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
++	phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
++
+ 	if (xgbe_phy_sfp_parse_quirks(pdata))
+ 		return;
+ 
+@@ -1184,7 +1248,6 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
+ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
+ {
+ 	struct xgbe_phy_data *phy_data = pdata->phy_data;
+-	unsigned int gpio_input;
+ 	u8 gpio_reg, gpio_ports[2];
+ 	int ret;
+ 
+@@ -1199,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
+ 		return;
+ 	}
+ 
+-	gpio_input = (gpio_ports[1] << 8) | gpio_ports[0];
+-
+-	if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) {
+-		/* No GPIO, just assume the module is present for now */
+-		phy_data->sfp_mod_absent = 0;
+-	} else {
+-		if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
+-			phy_data->sfp_mod_absent = 0;
+-	}
+-
+-	if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) &&
+-	    (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
+-		phy_data->sfp_rx_los = 1;
++	phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0];
+ 
+-	if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) &&
+-	    (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
+-		phy_data->sfp_tx_fault = 1;
++	phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data);
+ }
+ 
+ static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata)
+@@ -2361,7 +2410,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
+ 		return 1;
+ 
+ 	/* No link, attempt a receiver reset cycle */
+-	if (phy_data->rrc_count++) {
++	if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
+ 		phy_data->rrc_count = 0;
+ 		xgbe_phy_rrc(pdata);
+ 	}
+@@ -2669,6 +2718,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
+ 	return true;
+ }
+ 
++static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata)
++{
++	struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++	if (!pdata->debugfs_an_cdr_workaround)
++		return;
++
++	if (!phy_data->phy_cdr_notrack)
++		return;
++
++	usleep_range(phy_data->phy_cdr_delay,
++		     phy_data->phy_cdr_delay + 500);
++
++	XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
++			 XGBE_PMA_CDR_TRACK_EN_MASK,
++			 XGBE_PMA_CDR_TRACK_EN_ON);
++
++	phy_data->phy_cdr_notrack = 0;
++}
++
++static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata)
++{
++	struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++	if (!pdata->debugfs_an_cdr_workaround)
++		return;
++
++	if (phy_data->phy_cdr_notrack)
++		return;
++
++	XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
++			 XGBE_PMA_CDR_TRACK_EN_MASK,
++			 XGBE_PMA_CDR_TRACK_EN_OFF);
++
++	xgbe_phy_rrc(pdata);
++
++	phy_data->phy_cdr_notrack = 1;
++}
++
++static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
++{
++	if (!pdata->debugfs_an_cdr_track_early)
++		xgbe_phy_cdr_track(pdata);
++}
++
++static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
++{
++	if (pdata->debugfs_an_cdr_track_early)
++		xgbe_phy_cdr_track(pdata);
++}
++
++static void xgbe_phy_an_post(struct xgbe_prv_data *pdata)
++{
++	struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++	switch (pdata->an_mode) {
++	case XGBE_AN_MODE_CL73:
++	case XGBE_AN_MODE_CL73_REDRV:
++		if (phy_data->cur_mode != XGBE_MODE_KR)
++			break;
++
++		xgbe_phy_cdr_track(pdata);
++
++		switch (pdata->an_result) {
++		case XGBE_AN_READY:
++		case XGBE_AN_COMPLETE:
++			break;
++		default:
++			if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX)
++				phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC;
++			else
++				phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
++			break;
++		}
++		break;
++	default:
++		break;
++	}
++}
++
++static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata)
++{
++	struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++	switch (pdata->an_mode) {
++	case XGBE_AN_MODE_CL73:
++	case XGBE_AN_MODE_CL73_REDRV:
++		if (phy_data->cur_mode != XGBE_MODE_KR)
++			break;
++
++		xgbe_phy_cdr_notrack(pdata);
++		break;
++	default:
++		break;
++	}
++}
++
+ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+ {
+ 	struct xgbe_phy_data *phy_data = pdata->phy_data;
+@@ -2680,6 +2826,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+ 	xgbe_phy_sfp_reset(phy_data);
+ 	xgbe_phy_sfp_mod_absent(pdata);
+ 
++	/* Reset CDR support */
++	xgbe_phy_cdr_track(pdata);
++
+ 	/* Power off the PHY */
+ 	xgbe_phy_power_off(pdata);
+ 
+@@ -2712,6 +2861,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+ 	/* Start in highest supported mode */
+ 	xgbe_phy_set_mode(pdata, phy_data->start_mode);
+ 
++	/* Reset CDR support */
++	xgbe_phy_cdr_track(pdata);
++
+ 	/* After starting the I2C controller, we can check for an SFP */
+ 	switch (phy_data->port_mode) {
+ 	case XGBE_PORT_MODE_SFP:
+@@ -3019,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ 		}
+ 	}
+ 
++	phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
++
+ 	/* Register for driving external PHYs */
+ 	mii = devm_mdiobus_alloc(pdata->dev);
+ 	if (!mii) {
+@@ -3071,4 +3225,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
+ 	phy_impl->an_advertising	= xgbe_phy_an_advertising;
+ 
+ 	phy_impl->an_outcome		= xgbe_phy_an_outcome;
++
++	phy_impl->an_pre		= xgbe_phy_an_pre;
++	phy_impl->an_post		= xgbe_phy_an_post;
++
++	phy_impl->kr_training_pre	= xgbe_phy_kr_training_pre;
++	phy_impl->kr_training_post	= xgbe_phy_kr_training_post;
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index ad102c8bac7b..95d4b56448c6 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -833,6 +833,7 @@ struct xgbe_hw_if {
+ /* This structure represents implementation specific routines for an
+  * implementation of a PHY. All routines are required unless noted below.
+  *   Optional routines:
++ *     an_pre, an_post
+  *     kr_training_pre, kr_training_post
+  */
+ struct xgbe_phy_impl_if {
+@@ -875,6 +876,10 @@ struct xgbe_phy_impl_if {
+ 	/* Process results of auto-negotiation */
+ 	enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
+ 
++	/* Pre/Post auto-negotiation support */
++	void (*an_pre)(struct xgbe_prv_data *);
++	void (*an_post)(struct xgbe_prv_data *);
++
+ 	/* Pre/Post KR training enablement support */
+ 	void (*kr_training_pre)(struct xgbe_prv_data *);
+ 	void (*kr_training_post)(struct xgbe_prv_data *);
+@@ -989,6 +994,7 @@ struct xgbe_version_data {
+ 	unsigned int irq_reissue_support;
+ 	unsigned int tx_desc_prefetch;
+ 	unsigned int rx_desc_prefetch;
++	unsigned int an_cdr_workaround;
+ };
+ 
+ struct xgbe_vxlan_data {
+@@ -1257,6 +1263,9 @@ struct xgbe_prv_data {
+ 	unsigned int debugfs_xprop_reg;
+ 
+ 	unsigned int debugfs_xi2c_reg;
++
++	bool debugfs_an_cdr_workaround;
++	bool debugfs_an_cdr_track_early;
+ };
+ 
+ /* Function prototypes*/
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+index c96a92118b8b..32f6d2e24d66 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+@@ -951,9 +951,11 @@ void aq_nic_shutdown(struct aq_nic_s *self)
+ 
+ 	netif_device_detach(self->ndev);
+ 
+-	err = aq_nic_stop(self);
+-	if (err < 0)
+-		goto err_exit;
++	if (netif_running(self->ndev)) {
++		err = aq_nic_stop(self);
++		if (err < 0)
++			goto err_exit;
++	}
+ 	aq_nic_deinit(self);
+ 
+ err_exit:
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+index d3b847ec7465..c58b2c227260 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+@@ -48,6 +48,8 @@
+ #define FORCE_FLASHLESS 0
+ 
+ static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
++static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
++				      enum hal_atl_utils_fw_state_e state);
+ 
+ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
+ {
+@@ -247,6 +249,20 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
+ 
+ 	self->rbl_enabled = (boot_exit_code != 0);
+ 
++	/* FW 1.x may bootup in an invalid POWER state (WOL feature).
++	 * We should work around this by forcing its state back to DEINIT
++	 */
++	if (!hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
++				    aq_hw_read_reg(self,
++						   HW_ATL_MPI_FW_VERSION))) {
++		int err = 0;
++
++		hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
++		AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) &
++			       HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
++			       10, 1000U);
++	}
++
+ 	if (self->rbl_enabled)
+ 		return hw_atl_utils_soft_reset_rbl(self);
+ 	else
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 1801582076be..9442605f4fd4 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -1874,22 +1874,39 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
+ 	return retval;
+ }
+ 
+-static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen)
++static void bnxt_get_pkgver(struct net_device *dev)
+ {
++	struct bnxt *bp = netdev_priv(dev);
+ 	u16 index = 0;
+-	u32 datalen;
++	char *pkgver;
++	u32 pkglen;
++	u8 *pkgbuf;
++	int len;
+ 
+ 	if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
+ 				 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+-				 &index, NULL, &datalen) != 0)
+-		return NULL;
++				 &index, NULL, &pkglen) != 0)
++		return;
+ 
+-	memset(buf, 0, buflen);
+-	if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0)
+-		return NULL;
++	pkgbuf = kzalloc(pkglen, GFP_KERNEL);
++	if (!pkgbuf) {
++		dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
++			pkglen);
++		return;
++	}
++
++	if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
++		goto err;
+ 
+-	return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf,
+-		datalen);
++	pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
++				   pkglen);
++	if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
++		len = strlen(bp->fw_ver_str);
++		snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
++			 "/pkg %s", pkgver);
++	}
++err:
++	kfree(pkgbuf);
+ }
+ 
+ static int bnxt_get_eeprom(struct net_device *dev,
+@@ -2558,22 +2575,10 @@ void bnxt_ethtool_init(struct bnxt *bp)
+ 	struct hwrm_selftest_qlist_input req = {0};
+ 	struct bnxt_test_info *test_info;
+ 	struct net_device *dev = bp->dev;
+-	char *pkglog;
+ 	int i, rc;
+ 
+-	pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL);
+-	if (pkglog) {
+-		char *pkgver;
+-		int len;
++	bnxt_get_pkgver(dev);
+ 
+-		pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH);
+-		if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
+-			len = strlen(bp->fw_ver_str);
+-			snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
+-				 "/pkg %s", pkgver);
+-		}
+-		kfree(pkglog);
+-	}
+ 	if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
+ 		return;
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+index 73f2249555b5..83444811d3c6 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+@@ -59,8 +59,6 @@ enum bnxt_nvm_directory_type {
+ #define BNX_DIR_ATTR_NO_CHKSUM			(1 << 0)
+ #define BNX_DIR_ATTR_PROP_STREAM		(1 << 1)
+ 
+-#define BNX_PKG_LOG_MAX_LENGTH			4096
+-
+ enum bnxnvm_pkglog_field_index {
+ 	BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP	= 0,
+ 	BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION		= 1,
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+index e9309fb9084b..21a21934e5bf 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+@@ -2889,6 +2889,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+ 	int ret = 0;
+ 	struct hlist_node *h;
+ 	int bkt;
++	u8 i;
+ 
+ 	/* validate the request */
+ 	if (vf_id >= pf->num_alloc_vfs) {
+@@ -2900,6 +2901,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+ 
+ 	vf = &(pf->vf[vf_id]);
+ 	vsi = pf->vsi[vf->lan_vsi_idx];
++
++	/* When the VF is resetting wait until it is done.
++	 * It can take up to 200 milliseconds,
++	 * but wait for up to 300 milliseconds to be safe.
++	 */
++	for (i = 0; i < 15; i++) {
++		if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
++			break;
++		msleep(20);
++	}
+ 	if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ 		dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
+ 			vf_id);
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
+index 5a1668cdb461..7f1083ce23da 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -838,6 +838,8 @@ enum mvpp2_bm_type {
+ 
+ #define MVPP2_MIB_COUNTERS_STATS_DELAY		(1 * HZ)
+ 
++#define MVPP2_DESC_DMA_MASK	DMA_BIT_MASK(40)
++
+ /* Definitions */
+ 
+ /* Shared Packet Processor resources */
+@@ -1336,7 +1338,7 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
+ 	if (port->priv->hw_version == MVPP21)
+ 		return tx_desc->pp21.buf_dma_addr;
+ 	else
+-		return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
++		return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
+ }
+ 
+ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
+@@ -1354,7 +1356,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
+ 	} else {
+ 		u64 val = (u64)addr;
+ 
+-		tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
++		tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
+ 		tx_desc->pp22.buf_dma_addr_ptp |= val;
+ 		tx_desc->pp22.packet_offset = offset;
+ 	}
+@@ -1414,7 +1416,7 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
+ 	if (port->priv->hw_version == MVPP21)
+ 		return rx_desc->pp21.buf_dma_addr;
+ 	else
+-		return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
++		return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
+ }
+ 
+ static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
+@@ -1423,7 +1425,7 @@ static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
+ 	if (port->priv->hw_version == MVPP21)
+ 		return rx_desc->pp21.buf_cookie;
+ 	else
+-		return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
++		return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
+ }
+ 
+ static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
+@@ -8347,7 +8349,7 @@ static int mvpp2_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	if (priv->hw_version == MVPP22) {
+-		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
++		err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
+ 		if (err)
+ 			goto err_mg_clk;
+ 		/* Sadly, the BM pools all share the same register to
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+index 7761a26ec9c5..e7565416639b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+@@ -343,7 +343,7 @@ enum power_event {
+ #define MTL_RX_OVERFLOW_INT		BIT(16)
+ 
+ /* Default operating mode of the MAC */
+-#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \
++#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \
+ 			GMAC_CONFIG_BE | GMAC_CONFIG_DCRS)
+ 
+ /* To dump the core regs excluding  the Address Registers */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+index 63795ecafc8d..26dfb75e927a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -30,13 +30,6 @@ static void dwmac4_core_init(struct mac_device_info *hw,
+ 
+ 	value |= GMAC_CORE_INIT;
+ 
+-	/* Clear ACS bit because Ethernet switch tagging formats such as
+-	 * Broadcom tags can look like invalid LLC/SNAP packets and cause the
+-	 * hardware to truncate packets on reception.
+-	 */
+-	if (netdev_uses_dsa(dev))
+-		value &= ~GMAC_CONFIG_ACS;
+-
+ 	if (mtu > 1500)
+ 		value |= GMAC_CONFIG_2K;
+ 	if (mtu > 2000)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 7ad841434ec8..3ea343b45d93 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -3435,8 +3435,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+ 
+ 			/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+ 			 * Type frames (LLC/LLC-SNAP)
++			 *
++			 * llc_snap is never checked in GMAC >= 4, so this ACS
++			 * feature is always disabled and packets need to be
++			 * stripped manually.
+ 			 */
+-			if (unlikely(status != llc_snap))
++			if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
++			    unlikely(status != llc_snap))
+ 				frame_len -= ETH_FCS_LEN;
+ 
+ 			if (netif_msg_rx_status(priv)) {
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index b2b30c9df037..33c35b2df7d5 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -125,7 +125,7 @@ do {								\
+ 
+ #define RX_PRIORITY_MAPPING	0x76543210
+ #define TX_PRIORITY_MAPPING	0x33221100
+-#define CPDMA_TX_PRIORITY_MAP	0x01234567
++#define CPDMA_TX_PRIORITY_MAP	0x76543210
+ 
+ #define CPSW_VLAN_AWARE		BIT(1)
+ #define CPSW_ALE_VLAN_AWARE	1
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 9cbb0c8a896a..7de88b33d5b9 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
+ 
+ 	err = netdev_upper_dev_link(real_dev, dev, extack);
+ 	if (err < 0)
+-		goto put_dev;
++		goto unregister;
+ 
+ 	/* need to be already registered so that ->init has run and
+ 	 * the MAC addr is set
+@@ -3316,8 +3316,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
+ 	macsec_del_dev(macsec);
+ unlink:
+ 	netdev_upper_dev_unlink(real_dev, dev);
+-put_dev:
+-	dev_put(real_dev);
++unregister:
+ 	unregister_netdevice(dev);
+ 	return err;
+ }
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 5aa59f41bf8c..71e2aef6b7a1 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	lock_sock(sk);
+ 
+ 	error = -EINVAL;
++
++	if (sockaddr_len != sizeof(struct sockaddr_pppox))
++		goto end;
++
+ 	if (sp->sa_protocol != PX_PROTO_OE)
+ 		goto end;
+ 
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index befed2d22bf4..3175f7410baf 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
+ 	}
+ }
+ 
++static bool __team_option_inst_tmp_find(const struct list_head *opts,
++					const struct team_option_inst *needle)
++{
++	struct team_option_inst *opt_inst;
++
++	list_for_each_entry(opt_inst, opts, tmp_list)
++		if (opt_inst == needle)
++			return true;
++	return false;
++}
++
+ static int __team_options_register(struct team *team,
+ 				   const struct team_option *option,
+ 				   size_t option_count)
+@@ -1061,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
+ }
+ 
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
++static int __team_port_enable_netpoll(struct team_port *port)
+ {
+ 	struct netpoll *np;
+ 	int err;
+ 
+-	if (!team->dev->npinfo)
+-		return 0;
+-
+ 	np = kzalloc(sizeof(*np), GFP_KERNEL);
+ 	if (!np)
+ 		return -ENOMEM;
+@@ -1082,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+ 	return err;
+ }
+ 
++static int team_port_enable_netpoll(struct team_port *port)
++{
++	if (!port->team->dev->npinfo)
++		return 0;
++
++	return __team_port_enable_netpoll(port);
++}
++
+ static void team_port_disable_netpoll(struct team_port *port)
+ {
+ 	struct netpoll *np = port->np;
+@@ -1096,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port)
+ 	kfree(np);
+ }
+ #else
+-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
++static int team_port_enable_netpoll(struct team_port *port)
+ {
+ 	return 0;
+ }
+@@ -1204,7 +1220,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
+ 		goto err_vids_add;
+ 	}
+ 
+-	err = team_port_enable_netpoll(team, port);
++	err = team_port_enable_netpoll(port);
+ 	if (err) {
+ 		netdev_err(dev, "Failed to enable netpoll on device %s\n",
+ 			   portname);
+@@ -1901,7 +1917,7 @@ static int team_netpoll_setup(struct net_device *dev,
+ 
+ 	mutex_lock(&team->lock);
+ 	list_for_each_entry(port, &team->port_list, list) {
+-		err = team_port_enable_netpoll(team, port);
++		err = __team_port_enable_netpoll(port);
+ 		if (err) {
+ 			__team_netpoll_cleanup(team);
+ 			break;
+@@ -2562,6 +2578,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
+ 			if (err)
+ 				goto team_put;
+ 			opt_inst->changed = true;
++
++			/* dumb/evil user-space can send us duplicate opt,
++			 * keep only the last one
++			 */
++			if (__team_option_inst_tmp_find(&opt_inst_list,
++							opt_inst))
++				continue;
++
+ 			list_add(&opt_inst->tmp_list, &opt_inst_list);
+ 		}
+ 		if (!opt_found) {
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 28cfa642e39a..6c7bdd0c361a 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1094,12 +1094,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		goto drop;
+ 
+ 	len = run_ebpf_filter(tun, skb, len);
+-
+-	/* Trim extra bytes since we may insert vlan proto & TCI
+-	 * in tun_put_user().
+-	 */
+-	len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0;
+-	if (len <= 0 || pskb_trim(skb, len))
++	if (len == 0 || pskb_trim(skb, len))
+ 		goto drop;
+ 
+ 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index ca066b785e9f..c853e7410f5a 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1107,6 +1107,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x1435, 0xd181, 3)},	/* Wistron NeWeb D18Q1 */
+ 	{QMI_FIXED_INTF(0x1435, 0xd181, 4)},	/* Wistron NeWeb D18Q1 */
+ 	{QMI_FIXED_INTF(0x1435, 0xd181, 5)},	/* Wistron NeWeb D18Q1 */
++	{QMI_FIXED_INTF(0x1435, 0xd191, 4)},	/* Wistron NeWeb D19Q1 */
+ 	{QMI_FIXED_INTF(0x16d8, 0x6003, 0)},	/* CMOTech 6003 */
+ 	{QMI_FIXED_INTF(0x16d8, 0x6007, 0)},	/* CMOTech CHE-628S */
+ 	{QMI_FIXED_INTF(0x16d8, 0x6008, 0)},	/* CMOTech CMU-301 */
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 23374603e4d9..aa21b2225679 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -147,6 +147,17 @@ struct receive_queue {
+ 	struct xdp_rxq_info xdp_rxq;
+ };
+ 
++/* Control VQ buffers: protected by the rtnl lock */
++struct control_buf {
++	struct virtio_net_ctrl_hdr hdr;
++	virtio_net_ctrl_ack status;
++	struct virtio_net_ctrl_mq mq;
++	u8 promisc;
++	u8 allmulti;
++	__virtio16 vid;
++	u64 offloads;
++};
++
+ struct virtnet_info {
+ 	struct virtio_device *vdev;
+ 	struct virtqueue *cvq;
+@@ -192,14 +203,7 @@ struct virtnet_info {
+ 	struct hlist_node node;
+ 	struct hlist_node node_dead;
+ 
+-	/* Control VQ buffers: protected by the rtnl lock */
+-	struct virtio_net_ctrl_hdr ctrl_hdr;
+-	virtio_net_ctrl_ack ctrl_status;
+-	struct virtio_net_ctrl_mq ctrl_mq;
+-	u8 ctrl_promisc;
+-	u8 ctrl_allmulti;
+-	u16 ctrl_vid;
+-	u64 ctrl_offloads;
++	struct control_buf *ctrl;
+ 
+ 	/* Ethtool settings */
+ 	u8 duplex;
+@@ -1269,7 +1273,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
+ {
+ 	struct receive_queue *rq =
+ 		container_of(napi, struct receive_queue, napi);
+-	unsigned int received;
++	struct virtnet_info *vi = rq->vq->vdev->priv;
++	struct send_queue *sq;
++	unsigned int received, qp;
+ 	bool xdp_xmit = false;
+ 
+ 	virtnet_poll_cleantx(rq);
+@@ -1280,8 +1286,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
+ 	if (received < budget)
+ 		virtqueue_napi_complete(napi, rq->vq, received);
+ 
+-	if (xdp_xmit)
++	if (xdp_xmit) {
++		qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
++		     smp_processor_id();
++		sq = &vi->sq[qp];
++		virtqueue_kick(sq->vq);
+ 		xdp_do_flush_map();
++	}
+ 
+ 	return received;
+ }
+@@ -1454,25 +1465,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
+ 	/* Caller should know better */
+ 	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
+ 
+-	vi->ctrl_status = ~0;
+-	vi->ctrl_hdr.class = class;
+-	vi->ctrl_hdr.cmd = cmd;
++	vi->ctrl->status = ~0;
++	vi->ctrl->hdr.class = class;
++	vi->ctrl->hdr.cmd = cmd;
+ 	/* Add header */
+-	sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
++	sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
+ 	sgs[out_num++] = &hdr;
+ 
+ 	if (out)
+ 		sgs[out_num++] = out;
+ 
+ 	/* Add return status. */
+-	sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
++	sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
+ 	sgs[out_num] = &stat;
+ 
+ 	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
+ 	virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
+ 
+ 	if (unlikely(!virtqueue_kick(vi->cvq)))
+-		return vi->ctrl_status == VIRTIO_NET_OK;
++		return vi->ctrl->status == VIRTIO_NET_OK;
+ 
+ 	/* Spin for a response, the kick causes an ioport write, trapping
+ 	 * into the hypervisor, so the request should be handled immediately.
+@@ -1481,7 +1492,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
+ 	       !virtqueue_is_broken(vi->cvq))
+ 		cpu_relax();
+ 
+-	return vi->ctrl_status == VIRTIO_NET_OK;
++	return vi->ctrl->status == VIRTIO_NET_OK;
+ }
+ 
+ static int virtnet_set_mac_address(struct net_device *dev, void *p)
+@@ -1593,8 +1604,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
+ 	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
+ 		return 0;
+ 
+-	vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
+-	sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq));
++	vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
++	sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
+ 
+ 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
+ 				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
+@@ -1653,22 +1664,22 @@ static void virtnet_set_rx_mode(struct net_device *dev)
+ 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
+ 		return;
+ 
+-	vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
+-	vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
++	vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
++	vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
+ 
+-	sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
++	sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
+ 
+ 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
+ 				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
+ 		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
+-			 vi->ctrl_promisc ? "en" : "dis");
++			 vi->ctrl->promisc ? "en" : "dis");
+ 
+-	sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
++	sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
+ 
+ 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
+ 				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
+ 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
+-			 vi->ctrl_allmulti ? "en" : "dis");
++			 vi->ctrl->allmulti ? "en" : "dis");
+ 
+ 	uc_count = netdev_uc_count(dev);
+ 	mc_count = netdev_mc_count(dev);
+@@ -1714,8 +1725,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
+ 	struct virtnet_info *vi = netdev_priv(dev);
+ 	struct scatterlist sg;
+ 
+-	vi->ctrl_vid = vid;
+-	sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
++	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
++	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
+ 
+ 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
+ 				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
+@@ -1729,8 +1740,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
+ 	struct virtnet_info *vi = netdev_priv(dev);
+ 	struct scatterlist sg;
+ 
+-	vi->ctrl_vid = vid;
+-	sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
++	vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
++	sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
+ 
+ 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
+ 				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
+@@ -2126,9 +2137,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
+ static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
+ {
+ 	struct scatterlist sg;
+-	vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads);
++	vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
+ 
+-	sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads));
++	sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
+ 
+ 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
+ 				  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
+@@ -2351,6 +2362,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
+ 
+ 	kfree(vi->rq);
+ 	kfree(vi->sq);
++	kfree(vi->ctrl);
+ }
+ 
+ static void _free_receive_bufs(struct virtnet_info *vi)
+@@ -2543,6 +2555,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
+ {
+ 	int i;
+ 
++	vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
++	if (!vi->ctrl)
++		goto err_ctrl;
+ 	vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
+ 	if (!vi->sq)
+ 		goto err_sq;
+@@ -2571,6 +2586,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
+ err_rq:
+ 	kfree(vi->sq);
+ err_sq:
++	kfree(vi->ctrl);
++err_ctrl:
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index e04937f44f33..9ebe2a689966 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1218,6 +1218,7 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
+ 	union {
+ 		void *ptr;
+ 		struct ethhdr *eth;
++		struct vlan_ethhdr *veth;
+ 		struct iphdr *ipv4;
+ 		struct ipv6hdr *ipv6;
+ 		struct tcphdr *tcp;
+@@ -1228,16 +1229,24 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
+ 	if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
+ 		return 0;
+ 
++	if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
++	    skb->protocol == cpu_to_be16(ETH_P_8021AD))
++		hlen = sizeof(struct vlan_ethhdr);
++	else
++		hlen = sizeof(struct ethhdr);
++
+ 	hdr.eth = eth_hdr(skb);
+ 	if (gdesc->rcd.v4) {
+-		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP));
+-		hdr.ptr += sizeof(struct ethhdr);
++		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
++		       hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
++		hdr.ptr += hlen;
+ 		BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
+ 		hlen = hdr.ipv4->ihl << 2;
+ 		hdr.ptr += hdr.ipv4->ihl << 2;
+ 	} else if (gdesc->rcd.v6) {
+-		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6));
+-		hdr.ptr += sizeof(struct ethhdr);
++		BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
++		       hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
++		hdr.ptr += hlen;
+ 		/* Use an estimated value, since we also need to handle
+ 		 * TSO case.
+ 		 */
+diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
+index 59ec34052a65..a3326463b71f 100644
+--- a/drivers/net/vmxnet3/vmxnet3_int.h
++++ b/drivers/net/vmxnet3/vmxnet3_int.h
+@@ -69,10 +69,10 @@
+ /*
+  * Version numbers
+  */
+-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.13.0-k"
++#define VMXNET3_DRIVER_VERSION_STRING   "1.4.14.0-k"
+ 
+ /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
+-#define VMXNET3_DRIVER_VERSION_NUM      0x01040d00
++#define VMXNET3_DRIVER_VERSION_NUM      0x01040e00
+ 
+ #if defined(CONFIG_PCI_MSI)
+ 	/* RSS only makes sense if MSI-X is supported. */
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index ebb3f1b046f3..800a86e2d671 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -6028,9 +6028,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
+ 				    sta->addr, smps, err);
+ 	}
+ 
+-	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
+-	    changed & IEEE80211_RC_NSS_CHANGED) {
+-		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
++	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
++		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
+ 			   sta->addr);
+ 
+ 		err = ath10k_station_assoc(ar, arvif->vif, sta, true);
+diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
+index 96e73e30204e..5f111f0ee7ca 100644
+--- a/drivers/pinctrl/intel/pinctrl-intel.c
++++ b/drivers/pinctrl/intel/pinctrl-intel.c
+@@ -425,18 +425,6 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
+ 	writel(value, padcfg0);
+ }
+ 
+-static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
+-{
+-	u32 value;
+-
+-	/* Put the pad into GPIO mode */
+-	value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
+-	/* Disable SCI/SMI/NMI generation */
+-	value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
+-	value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
+-	writel(value, padcfg0);
+-}
+-
+ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
+ 				     struct pinctrl_gpio_range *range,
+ 				     unsigned pin)
+@@ -444,6 +432,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
+ 	struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ 	void __iomem *padcfg0;
+ 	unsigned long flags;
++	u32 value;
+ 
+ 	raw_spin_lock_irqsave(&pctrl->lock, flags);
+ 
+@@ -453,7 +442,13 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
+ 	}
+ 
+ 	padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
+-	intel_gpio_set_gpio_mode(padcfg0);
++	/* Put the pad into GPIO mode */
++	value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
++	/* Disable SCI/SMI/NMI generation */
++	value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
++	value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
++	writel(value, padcfg0);
++
+ 	/* Disable TX buffer and enable RX (this will be input) */
+ 	__intel_gpio_set_direction(padcfg0, true);
+ 
+@@ -973,8 +968,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
+ 
+ 	raw_spin_lock_irqsave(&pctrl->lock, flags);
+ 
+-	intel_gpio_set_gpio_mode(reg);
+-
+ 	value = readl(reg);
+ 
+ 	value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
+diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
+index 62f5f04d8f61..5e963fe0e38d 100644
+--- a/drivers/s390/block/dasd_alias.c
++++ b/drivers/s390/block/dasd_alias.c
+@@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
+ int dasd_alias_add_device(struct dasd_device *device)
+ {
+ 	struct dasd_eckd_private *private = device->private;
+-	struct alias_lcu *lcu;
++	__u8 uaddr = private->uid.real_unit_addr;
++	struct alias_lcu *lcu = private->lcu;
+ 	unsigned long flags;
+ 	int rc;
+ 
+-	lcu = private->lcu;
+ 	rc = 0;
+ 	spin_lock_irqsave(&lcu->lock, flags);
++	/*
++	 * Check if device and lcu type differ. If so, the uac data may be
++	 * outdated and needs to be updated.
++	 */
++	if (private->uid.type !=  lcu->uac->unit[uaddr].ua_type) {
++		lcu->flags |= UPDATE_PENDING;
++		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
++			      "uid type mismatch - trigger rescan");
++	}
+ 	if (!(lcu->flags & UPDATE_PENDING)) {
+ 		rc = _add_device_to_lcu(lcu, device, device);
+ 		if (rc)
+diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
+index c08fc5a8df0c..aea0b25eff29 100644
+--- a/drivers/s390/cio/chsc.c
++++ b/drivers/s390/cio/chsc.c
+@@ -452,6 +452,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
+ 
+ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
+ {
++	struct channel_path *chp;
+ 	struct chp_link link;
+ 	struct chp_id chpid;
+ 	int status;
+@@ -464,10 +465,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
+ 	chpid.id = sei_area->rsid;
+ 	/* allocate a new channel path structure, if needed */
+ 	status = chp_get_status(chpid);
+-	if (status < 0)
+-		chp_new(chpid);
+-	else if (!status)
++	if (!status)
+ 		return;
++
++	if (status < 0) {
++		chp_new(chpid);
++	} else {
++		chp = chpid_to_chp(chpid);
++		mutex_lock(&chp->lock);
++		chp_update_desc(chp);
++		mutex_unlock(&chp->lock);
++	}
+ 	memset(&link, 0, sizeof(struct chp_link));
+ 	link.chpid = chpid;
+ 	if ((sei_area->vf & 0xc0) != 0) {
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index 959c65cf75d9..e338ce823c44 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -565,7 +565,6 @@ enum qeth_ip_types {
+ enum qeth_cmd_buffer_state {
+ 	BUF_STATE_FREE,
+ 	BUF_STATE_LOCKED,
+-	BUF_STATE_PROCESSED,
+ };
+ 
+ enum qeth_cq {
+@@ -609,7 +608,6 @@ struct qeth_channel {
+ 	struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
+ 	atomic_t irq_pending;
+ 	int io_buf_no;
+-	int buf_no;
+ };
+ 
+ /**
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 3653bea38470..c11a083cd956 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -821,7 +821,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel)
+ 
+ 	for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
+ 		qeth_release_buffer(channel, &channel->iob[cnt]);
+-	channel->buf_no = 0;
+ 	channel->io_buf_no = 0;
+ }
+ EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
+@@ -927,7 +926,6 @@ static int qeth_setup_channel(struct qeth_channel *channel)
+ 			kfree(channel->iob[cnt].data);
+ 		return -ENOMEM;
+ 	}
+-	channel->buf_no = 0;
+ 	channel->io_buf_no = 0;
+ 	atomic_set(&channel->irq_pending, 0);
+ 	spin_lock_init(&channel->iob_lock);
+@@ -1103,11 +1101,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ {
+ 	int rc;
+ 	int cstat, dstat;
+-	struct qeth_cmd_buffer *buffer;
+ 	struct qeth_channel *channel;
+ 	struct qeth_card *card;
+ 	struct qeth_cmd_buffer *iob;
+-	__u8 index;
+ 
+ 	if (__qeth_check_irb_error(cdev, intparm, irb))
+ 		return;
+@@ -1185,25 +1181,18 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ 		channel->state = CH_STATE_RCD_DONE;
+ 		goto out;
+ 	}
+-	if (intparm) {
+-		buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
+-		buffer->state = BUF_STATE_PROCESSED;
+-	}
+ 	if (channel == &card->data)
+ 		return;
+ 	if (channel == &card->read &&
+ 	    channel->state == CH_STATE_UP)
+ 		__qeth_issue_next_read(card);
+ 
+-	iob = channel->iob;
+-	index = channel->buf_no;
+-	while (iob[index].state == BUF_STATE_PROCESSED) {
+-		if (iob[index].callback != NULL)
+-			iob[index].callback(channel, iob + index);
+-
+-		index = (index + 1) % QETH_CMD_BUFFER_NO;
++	if (intparm) {
++		iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
++		if (iob->callback)
++			iob->callback(iob->channel, iob);
+ 	}
+-	channel->buf_no = index;
++
+ out:
+ 	wake_up(&card->wait_q);
+ 	return;
+@@ -2217,7 +2206,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
+ error:
+ 	atomic_set(&card->write.irq_pending, 0);
+ 	qeth_release_buffer(iob->channel, iob);
+-	card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
+ 	rc = reply->rc;
+ 	qeth_put_reply(reply);
+ 	return rc;
+@@ -3037,28 +3025,23 @@ static int qeth_send_startlan(struct qeth_card *card)
+ 	return rc;
+ }
+ 
+-static int qeth_default_setadapterparms_cb(struct qeth_card *card,
+-		struct qeth_reply *reply, unsigned long data)
++static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
+ {
+-	struct qeth_ipa_cmd *cmd;
+-
+-	QETH_CARD_TEXT(card, 4, "defadpcb");
+-
+-	cmd = (struct qeth_ipa_cmd *) data;
+-	if (cmd->hdr.return_code == 0)
++	if (!cmd->hdr.return_code)
+ 		cmd->hdr.return_code =
+ 			cmd->data.setadapterparms.hdr.return_code;
+-	return 0;
++	return cmd->hdr.return_code;
+ }
+ 
+ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
+ 		struct qeth_reply *reply, unsigned long data)
+ {
+-	struct qeth_ipa_cmd *cmd;
++	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ 
+ 	QETH_CARD_TEXT(card, 3, "quyadpcb");
++	if (qeth_setadpparms_inspect_rc(cmd))
++		return 0;
+ 
+-	cmd = (struct qeth_ipa_cmd *) data;
+ 	if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
+ 		card->info.link_type =
+ 		      cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
+@@ -3066,7 +3049,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
+ 	}
+ 	card->options.adp.supported_funcs =
+ 		cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
+-	return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
++	return 0;
+ }
+ 
+ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
+@@ -3158,22 +3141,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists);
+ static int qeth_query_switch_attributes_cb(struct qeth_card *card,
+ 				struct qeth_reply *reply, unsigned long data)
+ {
+-	struct qeth_ipa_cmd *cmd;
+-	struct qeth_switch_info *sw_info;
++	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ 	struct qeth_query_switch_attributes *attrs;
++	struct qeth_switch_info *sw_info;
+ 
+ 	QETH_CARD_TEXT(card, 2, "qswiatcb");
+-	cmd = (struct qeth_ipa_cmd *) data;
+-	sw_info = (struct qeth_switch_info *)reply->param;
+-	if (cmd->data.setadapterparms.hdr.return_code == 0) {
+-		attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
+-		sw_info->capabilities = attrs->capabilities;
+-		sw_info->settings = attrs->settings;
+-		QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
+-							sw_info->settings);
+-	}
+-	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
++	if (qeth_setadpparms_inspect_rc(cmd))
++		return 0;
+ 
++	sw_info = (struct qeth_switch_info *)reply->param;
++	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
++	sw_info->capabilities = attrs->capabilities;
++	sw_info->settings = attrs->settings;
++	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
++			sw_info->settings);
+ 	return 0;
+ }
+ 
+@@ -4211,16 +4192,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet);
+ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
+ 		struct qeth_reply *reply, unsigned long data)
+ {
+-	struct qeth_ipa_cmd *cmd;
++	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ 	struct qeth_ipacmd_setadpparms *setparms;
+ 
+ 	QETH_CARD_TEXT(card, 4, "prmadpcb");
+ 
+-	cmd = (struct qeth_ipa_cmd *) data;
+ 	setparms = &(cmd->data.setadapterparms);
+-
+-	qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
+-	if (cmd->hdr.return_code) {
++	if (qeth_setadpparms_inspect_rc(cmd)) {
+ 		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
+ 		setparms->data.mode = SET_PROMISC_MODE_OFF;
+ 	}
+@@ -4290,18 +4268,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats);
+ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
+ 		struct qeth_reply *reply, unsigned long data)
+ {
+-	struct qeth_ipa_cmd *cmd;
++	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ 
+ 	QETH_CARD_TEXT(card, 4, "chgmaccb");
++	if (qeth_setadpparms_inspect_rc(cmd))
++		return 0;
+ 
+-	cmd = (struct qeth_ipa_cmd *) data;
+ 	if (!card->options.layer2 ||
+ 	    !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
+ 		ether_addr_copy(card->dev->dev_addr,
+ 				cmd->data.setadapterparms.data.change_addr.addr);
+ 		card->info.mac_bits |= QETH_LAYER2_MAC_READ;
+ 	}
+-	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+ 	return 0;
+ }
+ 
+@@ -4332,13 +4310,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
+ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
+ 		struct qeth_reply *reply, unsigned long data)
+ {
+-	struct qeth_ipa_cmd *cmd;
++	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ 	struct qeth_set_access_ctrl *access_ctrl_req;
+ 	int fallback = *(int *)reply->param;
+ 
+ 	QETH_CARD_TEXT(card, 4, "setaccb");
++	if (cmd->hdr.return_code)
++		return 0;
++	qeth_setadpparms_inspect_rc(cmd);
+ 
+-	cmd = (struct qeth_ipa_cmd *) data;
+ 	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
+ 	QETH_DBF_TEXT_(SETUP, 2, "setaccb");
+ 	QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
+@@ -4411,7 +4391,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
+ 			card->options.isolation = card->options.prev_isolation;
+ 		break;
+ 	}
+-	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+ 	return 0;
+ }
+ 
+@@ -4699,14 +4678,15 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
+ static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
+ 		struct qeth_reply *reply, unsigned long data)
+ {
+-	struct qeth_ipa_cmd *cmd;
++	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
+ 	struct qeth_qoat_priv *priv;
+ 	char *resdata;
+ 	int resdatalen;
+ 
+ 	QETH_CARD_TEXT(card, 3, "qoatcb");
++	if (qeth_setadpparms_inspect_rc(cmd))
++		return 0;
+ 
+-	cmd = (struct qeth_ipa_cmd *)data;
+ 	priv = (struct qeth_qoat_priv *)reply->param;
+ 	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
+ 	resdata = (char *)data + 28;
+@@ -4800,21 +4780,18 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
+ static int qeth_query_card_info_cb(struct qeth_card *card,
+ 				   struct qeth_reply *reply, unsigned long data)
+ {
+-	struct qeth_ipa_cmd *cmd;
++	struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
++	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
+ 	struct qeth_query_card_info *card_info;
+-	struct carrier_info *carrier_info;
+ 
+ 	QETH_CARD_TEXT(card, 2, "qcrdincb");
+-	carrier_info = (struct carrier_info *)reply->param;
+-	cmd = (struct qeth_ipa_cmd *)data;
+-	card_info = &cmd->data.setadapterparms.data.card_info;
+-	if (cmd->data.setadapterparms.hdr.return_code == 0) {
+-		carrier_info->card_type = card_info->card_type;
+-		carrier_info->port_mode = card_info->port_mode;
+-		carrier_info->port_speed = card_info->port_speed;
+-	}
++	if (qeth_setadpparms_inspect_rc(cmd))
++		return 0;
+ 
+-	qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
++	card_info = &cmd->data.setadapterparms.data.card_info;
++	carrier_info->card_type = card_info->card_type;
++	carrier_info->port_mode = card_info->port_mode;
++	carrier_info->port_speed = card_info->port_speed;
+ 	return 0;
+ }
+ 
+@@ -6567,10 +6544,14 @@ static int __init qeth_core_init(void)
+ 	mutex_init(&qeth_mod_mutex);
+ 
+ 	qeth_wq = create_singlethread_workqueue("qeth_wq");
++	if (!qeth_wq) {
++		rc = -ENOMEM;
++		goto out_err;
++	}
+ 
+ 	rc = qeth_register_dbf_views();
+ 	if (rc)
+-		goto out_err;
++		goto dbf_err;
+ 	qeth_core_root_dev = root_device_register("qeth");
+ 	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
+ 	if (rc)
+@@ -6607,6 +6588,8 @@ static int __init qeth_core_init(void)
+ 	root_device_unregister(qeth_core_root_dev);
+ register_err:
+ 	qeth_unregister_dbf_views();
++dbf_err:
++	destroy_workqueue(qeth_wq);
+ out_err:
+ 	pr_err("Initializing the qeth device driver failed\n");
+ 	return rc;
+diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
+index 067d52e95f02..d7191943ecb8 100644
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -217,12 +217,10 @@ struct fsnotify_mark_connector {
+ 	union {	/* Object pointer [lock] */
+ 		struct inode *inode;
+ 		struct vfsmount *mnt;
+-	};
+-	union {
+-		struct hlist_head list;
+ 		/* Used listing heads to free after srcu period expires */
+ 		struct fsnotify_mark_connector *destroy_next;
+ 	};
++	struct hlist_head list;
+ };
+ 
+ /*
+diff --git a/include/linux/hmm.h b/include/linux/hmm.h
+index 36dd21fe5caf..325017ad9311 100644
+--- a/include/linux/hmm.h
++++ b/include/linux/hmm.h
+@@ -498,16 +498,23 @@ struct hmm_device {
+ struct hmm_device *hmm_device_new(void *drvdata);
+ void hmm_device_put(struct hmm_device *hmm_device);
+ #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
++#endif /* IS_ENABLED(CONFIG_HMM) */
+ 
+ /* Below are for HMM internal use only! Not to be used by device driver! */
++#if IS_ENABLED(CONFIG_HMM_MIRROR)
+ void hmm_mm_destroy(struct mm_struct *mm);
+ 
+ static inline void hmm_mm_init(struct mm_struct *mm)
+ {
+ 	mm->hmm = NULL;
+ }
++#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
++static inline void hmm_mm_destroy(struct mm_struct *mm) {}
++static inline void hmm_mm_init(struct mm_struct *mm) {}
++#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
++
++
+ #else /* IS_ENABLED(CONFIG_HMM) */
+ static inline void hmm_mm_destroy(struct mm_struct *mm) {}
+ static inline void hmm_mm_init(struct mm_struct *mm) {}
+-#endif /* IS_ENABLED(CONFIG_HMM) */
+ #endif /* LINUX_HMM_H */
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index 7d30892da064..87b8c20d5b27 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -639,7 +639,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
+  * Returns true if the skb is tagged with multiple vlan headers, regardless
+  * of whether it is hardware accelerated or not.
+  */
+-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
++static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
+ {
+ 	__be16 protocol = skb->protocol;
+ 
+@@ -649,6 +649,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+ 		if (likely(!eth_type_vlan(protocol)))
+ 			return false;
+ 
++		if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
++			return false;
++
+ 		veh = (struct vlan_ethhdr *)skb->data;
+ 		protocol = veh->h_vlan_encapsulated_proto;
+ 	}
+@@ -666,7 +669,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+  *
+  * Returns features without unsafe ones if the skb has multiple tags.
+  */
+-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
++static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
+ 						    netdev_features_t features)
+ {
+ 	if (skb_vlan_tagged_multi(skb)) {
+diff --git a/include/linux/tpm.h b/include/linux/tpm.h
+index bcdd3790e94d..06639fb6ab85 100644
+--- a/include/linux/tpm.h
++++ b/include/linux/tpm.h
+@@ -44,7 +44,7 @@ struct tpm_class_ops {
+ 	bool (*update_timeouts)(struct tpm_chip *chip,
+ 				unsigned long *timeout_cap);
+ 	int (*request_locality)(struct tpm_chip *chip, int loc);
+-	void (*relinquish_locality)(struct tpm_chip *chip, int loc);
++	int (*relinquish_locality)(struct tpm_chip *chip, int loc);
+ 	void (*clk_enable)(struct tpm_chip *chip, bool value);
+ };
+ 
+diff --git a/include/net/ife.h b/include/net/ife.h
+index 44b9c00f7223..e117617e3c34 100644
+--- a/include/net/ife.h
++++ b/include/net/ife.h
+@@ -12,7 +12,8 @@
+ void *ife_encode(struct sk_buff *skb, u16 metalen);
+ void *ife_decode(struct sk_buff *skb, u16 *metalen);
+ 
+-void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen);
++void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
++			  u16 *dlen, u16 *totlen);
+ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
+ 			const void *dval);
+ 
+diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
+index 5c40f118c0fa..df528a623548 100644
+--- a/include/net/llc_conn.h
++++ b/include/net/llc_conn.h
+@@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
+ 
+ struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
+ 			  struct proto *prot, int kern);
++void llc_sk_stop_all_timers(struct sock *sk, bool sync);
+ void llc_sk_free(struct sock *sk);
+ 
+ void llc_sk_reset(struct sock *sk);
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 102160ff5c66..ea619021d901 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2428,7 +2428,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
+ 	struct kprobe_blacklist_entry *ent =
+ 		list_entry(v, struct kprobe_blacklist_entry, list);
+ 
+-	seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
++	seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
+ 		   (void *)ent->end_addr, (void *)ent->start_addr);
+ 	return 0;
+ }
+diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
+index e954ae3d82c0..e3a658bac10f 100644
+--- a/kernel/trace/trace_entries.h
++++ b/kernel/trace/trace_entries.h
+@@ -356,7 +356,7 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
+ 		__field(	unsigned int,		seqnum		)
+ 	),
+ 
+-	F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n",
++	F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llu\tnmi-ts:%llu\tnmi-count:%u\n",
+ 		 __entry->seqnum,
+ 		 __entry->tv_sec,
+ 		 __entry->tv_nsec,
+diff --git a/net/core/dev.c b/net/core/dev.c
+index c4aa2941dbfd..3e550507e9f0 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2942,7 +2942,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
+ }
+ EXPORT_SYMBOL(passthru_features_check);
+ 
+-static netdev_features_t dflt_features_check(const struct sk_buff *skb,
++static netdev_features_t dflt_features_check(struct sk_buff *skb,
+ 					     struct net_device *dev,
+ 					     netdev_features_t features)
+ {
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 7b7a14abba28..ce519861be59 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -55,7 +55,8 @@ static void neigh_timer_handler(struct timer_list *t);
+ static void __neigh_notify(struct neighbour *n, int type, int flags,
+ 			   u32 pid);
+ static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
+-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
++static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
++				    struct net_device *dev);
+ 
+ #ifdef CONFIG_PROC_FS
+ static const struct file_operations neigh_stat_seq_fops;
+@@ -291,8 +292,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+ {
+ 	write_lock_bh(&tbl->lock);
+ 	neigh_flush_dev(tbl, dev);
+-	pneigh_ifdown(tbl, dev);
+-	write_unlock_bh(&tbl->lock);
++	pneigh_ifdown_and_unlock(tbl, dev);
+ 
+ 	del_timer_sync(&tbl->proxy_timer);
+ 	pneigh_queue_purge(&tbl->proxy_queue);
+@@ -681,9 +681,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
+ 	return -ENOENT;
+ }
+ 
+-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
++static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
++				    struct net_device *dev)
+ {
+-	struct pneigh_entry *n, **np;
++	struct pneigh_entry *n, **np, *freelist = NULL;
+ 	u32 h;
+ 
+ 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
+@@ -691,16 +692,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+ 		while ((n = *np) != NULL) {
+ 			if (!dev || n->dev == dev) {
+ 				*np = n->next;
+-				if (tbl->pdestructor)
+-					tbl->pdestructor(n);
+-				if (n->dev)
+-					dev_put(n->dev);
+-				kfree(n);
++				n->next = freelist;
++				freelist = n;
+ 				continue;
+ 			}
+ 			np = &n->next;
+ 		}
+ 	}
++	write_unlock_bh(&tbl->lock);
++	while ((n = freelist)) {
++		freelist = n->next;
++		n->next = NULL;
++		if (tbl->pdestructor)
++			tbl->pdestructor(n);
++		if (n->dev)
++			dev_put(n->dev);
++		kfree(n);
++	}
+ 	return -ENOENT;
+ }
+ 
+@@ -2323,12 +2331,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
+ 
+ 	err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL);
+ 	if (!err) {
+-		if (tb[NDA_IFINDEX])
++		if (tb[NDA_IFINDEX]) {
++			if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
++				return -EINVAL;
+ 			filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
+-
+-		if (tb[NDA_MASTER])
++		}
++		if (tb[NDA_MASTER]) {
++			if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
++				return -EINVAL;
+ 			filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
+-
++		}
+ 		if (filter_idx || filter_master_idx)
+ 			flags |= NLM_F_DUMP_FILTERED;
+ 	}
+diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
+index e1d4d898a007..ed372d550137 100644
+--- a/net/dns_resolver/dns_key.c
++++ b/net/dns_resolver/dns_key.c
+@@ -91,9 +91,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ 
+ 			next_opt = memchr(opt, '#', end - opt) ?: end;
+ 			opt_len = next_opt - opt;
+-			if (!opt_len) {
+-				printk(KERN_WARNING
+-				       "Empty option to dns_resolver key\n");
++			if (opt_len <= 0 || opt_len > 128) {
++				pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
++						    opt_len);
+ 				return -EINVAL;
+ 			}
+ 
+@@ -127,10 +127,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ 			}
+ 
+ 		bad_option_value:
+-			printk(KERN_WARNING
+-			       "Option '%*.*s' to dns_resolver key:"
+-			       " bad/missing value\n",
+-			       opt_nlen, opt_nlen, opt);
++			pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
++					    opt_nlen, opt_nlen, opt);
+ 			return -EINVAL;
+ 		} while (opt = next_opt + 1, opt < end);
+ 	}
+diff --git a/net/ife/ife.c b/net/ife/ife.c
+index 7d1ec76e7f43..13bbf8cb6a39 100644
+--- a/net/ife/ife.c
++++ b/net/ife/ife.c
+@@ -69,6 +69,9 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen)
+ 	int total_pull;
+ 	u16 ifehdrln;
+ 
++	if (!pskb_may_pull(skb, skb->dev->hard_header_len + IFE_METAHDRLEN))
++		return NULL;
++
+ 	ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len);
+ 	ifehdrln = ntohs(ifehdr->metalen);
+ 	total_pull = skb->dev->hard_header_len + ifehdrln;
+@@ -92,12 +95,43 @@ struct meta_tlvhdr {
+ 	__be16 len;
+ };
+ 
++static bool __ife_tlv_meta_valid(const unsigned char *skbdata,
++				 const unsigned char *ifehdr_end)
++{
++	const struct meta_tlvhdr *tlv;
++	u16 tlvlen;
++
++	if (unlikely(skbdata + sizeof(*tlv) > ifehdr_end))
++		return false;
++
++	tlv = (const struct meta_tlvhdr *)skbdata;
++	tlvlen = ntohs(tlv->len);
++
++	/* tlv length field is inc header, check on minimum */
++	if (tlvlen < NLA_HDRLEN)
++		return false;
++
++	/* overflow by NLA_ALIGN check */
++	if (NLA_ALIGN(tlvlen) < tlvlen)
++		return false;
++
++	if (unlikely(skbdata + NLA_ALIGN(tlvlen) > ifehdr_end))
++		return false;
++
++	return true;
++}
++
+ /* Caller takes care of presenting data in network order
+  */
+-void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen)
++void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
++			  u16 *dlen, u16 *totlen)
+ {
+-	struct meta_tlvhdr *tlv = (struct meta_tlvhdr *) skbdata;
++	struct meta_tlvhdr *tlv;
++
++	if (!__ife_tlv_meta_valid(skbdata, ifehdr_end))
++		return NULL;
+ 
++	tlv = (struct meta_tlvhdr *)skbdata;
+ 	*dlen = ntohs(tlv->len) - NLA_HDRLEN;
+ 	*attrtype = ntohs(tlv->type);
+ 
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 8b8059b7af4d..1ab8733dac5f 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2385,6 +2385,7 @@ void tcp_write_queue_purge(struct sock *sk)
+ 	INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
+ 	sk_mem_reclaim(sk);
+ 	tcp_clear_all_retrans_hints(tcp_sk(sk));
++	tcp_sk(sk)->packets_out = 0;
+ }
+ 
+ int tcp_disconnect(struct sock *sk, int flags)
+@@ -2434,7 +2435,6 @@ int tcp_disconnect(struct sock *sk, int flags)
+ 	icsk->icsk_backoff = 0;
+ 	tp->snd_cwnd = 2;
+ 	icsk->icsk_probes_out = 0;
+-	tp->packets_out = 0;
+ 	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ 	tp->snd_cwnd_cnt = 0;
+ 	tp->window_clamp = 0;
+@@ -2830,8 +2830,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ #ifdef CONFIG_TCP_MD5SIG
+ 	case TCP_MD5SIG:
+ 	case TCP_MD5SIG_EXT:
+-		/* Read the IP->Key mappings from userspace */
+-		err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
++		if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
++			err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
++		else
++			err = -EINVAL;
+ 		break;
+ #endif
+ 	case TCP_USER_TIMEOUT:
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index ff6cd98ce8d5..31ca27fdde66 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3871,11 +3871,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
+ 	int length = (th->doff << 2) - sizeof(*th);
+ 	const u8 *ptr = (const u8 *)(th + 1);
+ 
+-	/* If the TCP option is too short, we can short cut */
+-	if (length < TCPOLEN_MD5SIG)
+-		return NULL;
+-
+-	while (length > 0) {
++	/* If not enough data remaining, we can short cut */
++	while (length >= TCPOLEN_MD5SIG) {
+ 		int opcode = *ptr++;
+ 		int opsize;
+ 
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index fc74352fac12..74a2e37412b2 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3862,6 +3862,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
+ 
+ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
+ 	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
++	[RTA_PREFSRC]		= { .len = sizeof(struct in6_addr) },
+ 	[RTA_OIF]               = { .type = NLA_U32 },
+ 	[RTA_IIF]		= { .type = NLA_U32 },
+ 	[RTA_PRIORITY]          = { .type = NLA_U32 },
+@@ -3873,6 +3874,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
+ 	[RTA_EXPIRES]		= { .type = NLA_U32 },
+ 	[RTA_UID]		= { .type = NLA_U32 },
+ 	[RTA_MARK]		= { .type = NLA_U32 },
++	[RTA_TABLE]		= { .type = NLA_U32 },
+ };
+ 
+ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
+diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
+index f343e6f0fc95..5fe139484919 100644
+--- a/net/ipv6/seg6_iptunnel.c
++++ b/net/ipv6/seg6_iptunnel.c
+@@ -136,7 +136,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
+ 	isrh->nexthdr = proto;
+ 
+ 	hdr->daddr = isrh->segments[isrh->first_segment];
+-	set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr);
++	set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
+ 
+ #ifdef CONFIG_IPV6_SEG6_HMAC
+ 	if (sr_has_hmac(isrh)) {
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 0fbd3ee26165..40261cb68e83 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -183,6 +183,26 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
+ }
+ EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
+ 
++struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
++{
++	const struct l2tp_net *pn = l2tp_pernet(net);
++	struct l2tp_tunnel *tunnel;
++	int count = 0;
++
++	rcu_read_lock_bh();
++	list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
++		if (++count > nth) {
++			l2tp_tunnel_inc_refcount(tunnel);
++			rcu_read_unlock_bh();
++			return tunnel;
++		}
++	}
++	rcu_read_unlock_bh();
++
++	return NULL;
++}
++EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
++
+ /* Lookup a session. A new reference is held on the returned session. */
+ struct l2tp_session *l2tp_session_get(const struct net *net,
+ 				      struct l2tp_tunnel *tunnel,
+@@ -335,26 +355,6 @@ int l2tp_session_register(struct l2tp_session *session,
+ }
+ EXPORT_SYMBOL_GPL(l2tp_session_register);
+ 
+-struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth)
+-{
+-	struct l2tp_net *pn = l2tp_pernet(net);
+-	struct l2tp_tunnel *tunnel;
+-	int count = 0;
+-
+-	rcu_read_lock_bh();
+-	list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+-		if (++count > nth) {
+-			rcu_read_unlock_bh();
+-			return tunnel;
+-		}
+-	}
+-
+-	rcu_read_unlock_bh();
+-
+-	return NULL;
+-}
+-EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
+-
+ /*****************************************************************************
+  * Receive data handling
+  *****************************************************************************/
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index ba33cbec71eb..c199020f8a8a 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -212,6 +212,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session)
+ }
+ 
+ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
++struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth);
++
+ void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
+ 
+ struct l2tp_session *l2tp_session_get(const struct net *net,
+@@ -220,7 +222,6 @@ struct l2tp_session *l2tp_session_get(const struct net *net,
+ struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth);
+ struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
+ 						const char *ifname);
+-struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth);
+ 
+ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
+ 		       u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
+diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
+index 72e713da4733..7f1e842ef05a 100644
+--- a/net/l2tp/l2tp_debugfs.c
++++ b/net/l2tp/l2tp_debugfs.c
+@@ -47,7 +47,11 @@ struct l2tp_dfs_seq_data {
+ 
+ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
+ {
+-	pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx);
++	/* Drop reference taken during previous invocation */
++	if (pd->tunnel)
++		l2tp_tunnel_dec_refcount(pd->tunnel);
++
++	pd->tunnel = l2tp_tunnel_get_nth(pd->net, pd->tunnel_idx);
+ 	pd->tunnel_idx++;
+ }
+ 
+@@ -96,7 +100,17 @@ static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos)
+ 
+ static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
+ {
+-	/* nothing to do */
++	struct l2tp_dfs_seq_data *pd = v;
++
++	if (!pd || pd == SEQ_START_TOKEN)
++		return;
++
++	/* Drop reference taken by last invocation of l2tp_dfs_next_tunnel() */
++	if (pd->tunnel) {
++		l2tp_tunnel_dec_refcount(pd->tunnel);
++		pd->tunnel = NULL;
++		pd->session = NULL;
++	}
+ }
+ 
+ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index b05dbd9ffcb2..6616c9fd292f 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -487,14 +487,17 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback
+ 	struct net *net = sock_net(skb->sk);
+ 
+ 	for (;;) {
+-		tunnel = l2tp_tunnel_find_nth(net, ti);
++		tunnel = l2tp_tunnel_get_nth(net, ti);
+ 		if (tunnel == NULL)
+ 			goto out;
+ 
+ 		if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
+ 					cb->nlh->nlmsg_seq, NLM_F_MULTI,
+-					tunnel, L2TP_CMD_TUNNEL_GET) < 0)
++					tunnel, L2TP_CMD_TUNNEL_GET) < 0) {
++			l2tp_tunnel_dec_refcount(tunnel);
+ 			goto out;
++		}
++		l2tp_tunnel_dec_refcount(tunnel);
+ 
+ 		ti++;
+ 	}
+@@ -848,7 +851,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
+ 
+ 	for (;;) {
+ 		if (tunnel == NULL) {
+-			tunnel = l2tp_tunnel_find_nth(net, ti);
++			tunnel = l2tp_tunnel_get_nth(net, ti);
+ 			if (tunnel == NULL)
+ 				goto out;
+ 		}
+@@ -856,6 +859,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
+ 		session = l2tp_session_get_nth(tunnel, si);
+ 		if (session == NULL) {
+ 			ti++;
++			l2tp_tunnel_dec_refcount(tunnel);
+ 			tunnel = NULL;
+ 			si = 0;
+ 			continue;
+@@ -865,6 +869,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
+ 					 cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ 					 session, L2TP_CMD_SESSION_GET) < 0) {
+ 			l2tp_session_dec_refcount(session);
++			l2tp_tunnel_dec_refcount(tunnel);
+ 			break;
+ 		}
+ 		l2tp_session_dec_refcount(session);
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 3d7887cc599b..0c4530ad74be 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -619,6 +619,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	lock_sock(sk);
+ 
+ 	error = -EINVAL;
++
++	if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
++	    sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
++	    sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
++	    sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
++		goto end;
++
+ 	if (sp->sa_protocol != PX_PROTO_OL2TP)
+ 		goto end;
+ 
+@@ -1552,16 +1559,19 @@ struct pppol2tp_seq_data {
+ 
+ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
+ {
++	/* Drop reference taken during previous invocation */
++	if (pd->tunnel)
++		l2tp_tunnel_dec_refcount(pd->tunnel);
++
+ 	for (;;) {
+-		pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx);
++		pd->tunnel = l2tp_tunnel_get_nth(net, pd->tunnel_idx);
+ 		pd->tunnel_idx++;
+ 
+-		if (pd->tunnel == NULL)
+-			break;
++		/* Only accept L2TPv2 tunnels */
++		if (!pd->tunnel || pd->tunnel->version == 2)
++			return;
+ 
+-		/* Ignore L2TPv3 tunnels */
+-		if (pd->tunnel->version < 3)
+-			break;
++		l2tp_tunnel_dec_refcount(pd->tunnel);
+ 	}
+ }
+ 
+@@ -1610,7 +1620,17 @@ static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
+ 
+ static void pppol2tp_seq_stop(struct seq_file *p, void *v)
+ {
+-	/* nothing to do */
++	struct pppol2tp_seq_data *pd = v;
++
++	if (!pd || pd == SEQ_START_TOKEN)
++		return;
++
++	/* Drop reference taken by last invocation of pppol2tp_next_tunnel() */
++	if (pd->tunnel) {
++		l2tp_tunnel_dec_refcount(pd->tunnel);
++		pd->tunnel = NULL;
++		pd->session = NULL;
++	}
+ }
+ 
+ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index c38d16f22d2a..cf41d9b4a0b8 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -199,9 +199,19 @@ static int llc_ui_release(struct socket *sock)
+ 		llc->laddr.lsap, llc->daddr.lsap);
+ 	if (!llc_send_disc(sk))
+ 		llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
+-	if (!sock_flag(sk, SOCK_ZAPPED))
++	if (!sock_flag(sk, SOCK_ZAPPED)) {
++		struct llc_sap *sap = llc->sap;
++
++		/* Hold this for release_sock(), so that llc_backlog_rcv()
++		 * could still use it.
++		 */
++		llc_sap_hold(sap);
+ 		llc_sap_remove_socket(llc->sap, sk);
+-	release_sock(sk);
++		release_sock(sk);
++		llc_sap_put(sap);
++	} else {
++		release_sock(sk);
++	}
+ 	if (llc->dev)
+ 		dev_put(llc->dev);
+ 	sock_put(sk);
+diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
+index 163121192aca..4d78375f9872 100644
+--- a/net/llc/llc_c_ac.c
++++ b/net/llc/llc_c_ac.c
+@@ -1099,14 +1099,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
+ 
+ int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
+ {
+-	struct llc_sock *llc = llc_sk(sk);
+-
+-	del_timer(&llc->pf_cycle_timer.timer);
+-	del_timer(&llc->ack_timer.timer);
+-	del_timer(&llc->rej_sent_timer.timer);
+-	del_timer(&llc->busy_state_timer.timer);
+-	llc->ack_must_be_send = 0;
+-	llc->ack_pf = 0;
++	llc_sk_stop_all_timers(sk, false);
+ 	return 0;
+ }
+ 
+diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
+index 110e32bcb399..c0ac522b48a1 100644
+--- a/net/llc/llc_conn.c
++++ b/net/llc/llc_conn.c
+@@ -961,6 +961,26 @@ struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct pr
+ 	return sk;
+ }
+ 
++void llc_sk_stop_all_timers(struct sock *sk, bool sync)
++{
++	struct llc_sock *llc = llc_sk(sk);
++
++	if (sync) {
++		del_timer_sync(&llc->pf_cycle_timer.timer);
++		del_timer_sync(&llc->ack_timer.timer);
++		del_timer_sync(&llc->rej_sent_timer.timer);
++		del_timer_sync(&llc->busy_state_timer.timer);
++	} else {
++		del_timer(&llc->pf_cycle_timer.timer);
++		del_timer(&llc->ack_timer.timer);
++		del_timer(&llc->rej_sent_timer.timer);
++		del_timer(&llc->busy_state_timer.timer);
++	}
++
++	llc->ack_must_be_send = 0;
++	llc->ack_pf = 0;
++}
++
+ /**
+  *	llc_sk_free - Frees a LLC socket
+  *	@sk - socket to free
+@@ -973,7 +993,7 @@ void llc_sk_free(struct sock *sk)
+ 
+ 	llc->state = LLC_CONN_OUT_OF_SVC;
+ 	/* Stop all (possibly) running timers */
+-	llc_conn_ac_stop_all_timers(sk, NULL);
++	llc_sk_stop_all_timers(sk, true);
+ #ifdef DEBUG_LLC_CONN_ALLOC
+ 	printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
+ 		skb_queue_len(&llc->pdu_unack_q),
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index e0f3f4aeeb4f..3b43b1fcd618 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -329,11 +329,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
+ 	skb_set_queue_mapping(skb, queue_index);
+ }
+ 
+-/* register_prot_hook must be invoked with the po->bind_lock held,
++/* __register_prot_hook must be invoked through register_prot_hook
+  * or from a context in which asynchronous accesses to the packet
+  * socket is not possible (packet_create()).
+  */
+-static void register_prot_hook(struct sock *sk)
++static void __register_prot_hook(struct sock *sk)
+ {
+ 	struct packet_sock *po = pkt_sk(sk);
+ 
+@@ -348,8 +348,13 @@ static void register_prot_hook(struct sock *sk)
+ 	}
+ }
+ 
+-/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
+- * held.   If the sync parameter is true, we will temporarily drop
++static void register_prot_hook(struct sock *sk)
++{
++	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
++	__register_prot_hook(sk);
++}
++
++/* If the sync parameter is true, we will temporarily drop
+  * the po->bind_lock and do a synchronize_net to make sure no
+  * asynchronous packet processing paths still refer to the elements
+  * of po->prot_hook.  If the sync parameter is false, it is the
+@@ -359,6 +364,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
+ {
+ 	struct packet_sock *po = pkt_sk(sk);
+ 
++	lockdep_assert_held_once(&po->bind_lock);
++
+ 	po->running = 0;
+ 
+ 	if (po->fanout)
+@@ -3008,6 +3015,7 @@ static int packet_release(struct socket *sock)
+ 
+ 	packet_flush_mclist(sk);
+ 
++	lock_sock(sk);
+ 	if (po->rx_ring.pg_vec) {
+ 		memset(&req_u, 0, sizeof(req_u));
+ 		packet_set_ring(sk, &req_u, 1, 0);
+@@ -3017,6 +3025,7 @@ static int packet_release(struct socket *sock)
+ 		memset(&req_u, 0, sizeof(req_u));
+ 		packet_set_ring(sk, &req_u, 1, 1);
+ 	}
++	release_sock(sk);
+ 
+ 	f = fanout_release(sk);
+ 
+@@ -3250,7 +3259,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
+ 
+ 	if (proto) {
+ 		po->prot_hook.type = proto;
+-		register_prot_hook(sk);
++		__register_prot_hook(sk);
+ 	}
+ 
+ 	mutex_lock(&net->packet.sklist_lock);
+@@ -3645,6 +3654,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 		union tpacket_req_u req_u;
+ 		int len;
+ 
++		lock_sock(sk);
+ 		switch (po->tp_version) {
+ 		case TPACKET_V1:
+ 		case TPACKET_V2:
+@@ -3655,12 +3665,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 			len = sizeof(req_u.req3);
+ 			break;
+ 		}
+-		if (optlen < len)
+-			return -EINVAL;
+-		if (copy_from_user(&req_u.req, optval, len))
+-			return -EFAULT;
+-		return packet_set_ring(sk, &req_u, 0,
+-			optname == PACKET_TX_RING);
++		if (optlen < len) {
++			ret = -EINVAL;
++		} else {
++			if (copy_from_user(&req_u.req, optval, len))
++				ret = -EFAULT;
++			else
++				ret = packet_set_ring(sk, &req_u, 0,
++						    optname == PACKET_TX_RING);
++		}
++		release_sock(sk);
++		return ret;
+ 	}
+ 	case PACKET_COPY_THRESH:
+ 	{
+@@ -3726,12 +3741,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 
+ 		if (optlen != sizeof(val))
+ 			return -EINVAL;
+-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-			return -EBUSY;
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+-		po->tp_loss = !!val;
+-		return 0;
++
++		lock_sock(sk);
++		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++			ret = -EBUSY;
++		} else {
++			po->tp_loss = !!val;
++			ret = 0;
++		}
++		release_sock(sk);
++		return ret;
+ 	}
+ 	case PACKET_AUXDATA:
+ 	{
+@@ -3742,7 +3763,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 
++		lock_sock(sk);
+ 		po->auxdata = !!val;
++		release_sock(sk);
+ 		return 0;
+ 	}
+ 	case PACKET_ORIGDEV:
+@@ -3754,7 +3777,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 
++		lock_sock(sk);
+ 		po->origdev = !!val;
++		release_sock(sk);
+ 		return 0;
+ 	}
+ 	case PACKET_VNET_HDR:
+@@ -3763,15 +3788,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 
+ 		if (sock->type != SOCK_RAW)
+ 			return -EINVAL;
+-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-			return -EBUSY;
+ 		if (optlen < sizeof(val))
+ 			return -EINVAL;
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 
+-		po->has_vnet_hdr = !!val;
+-		return 0;
++		lock_sock(sk);
++		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++			ret = -EBUSY;
++		} else {
++			po->has_vnet_hdr = !!val;
++			ret = 0;
++		}
++		release_sock(sk);
++		return ret;
+ 	}
+ 	case PACKET_TIMESTAMP:
+ 	{
+@@ -3809,11 +3839,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 
+ 		if (optlen != sizeof(val))
+ 			return -EINVAL;
+-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-			return -EBUSY;
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+-		po->tp_tx_has_off = !!val;
++
++		lock_sock(sk);
++		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++			ret = -EBUSY;
++		} else {
++			po->tp_tx_has_off = !!val;
++			ret = 0;
++		}
++		release_sock(sk);
+ 		return 0;
+ 	}
+ 	case PACKET_QDISC_BYPASS:
+@@ -4210,8 +4246,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 	/* Added to avoid minimal code churn */
+ 	struct tpacket_req *req = &req_u->req;
+ 
+-	lock_sock(sk);
+-
+ 	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
+ 	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
+ 
+@@ -4349,7 +4383,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 	if (pg_vec)
+ 		free_pg_vec(pg_vec, order, req->tp_block_nr);
+ out:
+-	release_sock(sk);
+ 	return err;
+ }
+ 
+diff --git a/net/packet/internal.h b/net/packet/internal.h
+index a1d2b2319ae9..3bb7c5fb3bff 100644
+--- a/net/packet/internal.h
++++ b/net/packet/internal.h
+@@ -112,10 +112,12 @@ struct packet_sock {
+ 	int			copy_thresh;
+ 	spinlock_t		bind_lock;
+ 	struct mutex		pg_vec_lock;
+-	unsigned int		running:1,	/* prot_hook is attached*/
+-				auxdata:1,
++	unsigned int		running;	/* bind_lock must be held */
++	unsigned int		auxdata:1,	/* writer must hold sock lock */
+ 				origdev:1,
+-				has_vnet_hdr:1;
++				has_vnet_hdr:1,
++				tp_loss:1,
++				tp_tx_has_off:1;
+ 	int			pressure;
+ 	int			ifindex;	/* bound device		*/
+ 	__be16			num;
+@@ -125,8 +127,6 @@ struct packet_sock {
+ 	enum tpacket_versions	tp_version;
+ 	unsigned int		tp_hdrlen;
+ 	unsigned int		tp_reserve;
+-	unsigned int		tp_loss:1;
+-	unsigned int		tp_tx_has_off:1;
+ 	unsigned int		tp_tstamp;
+ 	struct net_device __rcu	*cached_dev;
+ 	int			(*xmit)(struct sk_buff *skb);
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
+index 5954e992685a..1d477b054f2e 100644
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -652,7 +652,7 @@ static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
+ 		}
+ 	}
+ 
+-	return 0;
++	return -ENOENT;
+ }
+ 
+ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+@@ -682,7 +682,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ 		u16 mtype;
+ 		u16 dlen;
+ 
+-		curr_data = ife_tlv_meta_decode(tlv_data, &mtype, &dlen, NULL);
++		curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype,
++						&dlen, NULL);
++		if (!curr_data) {
++			qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
++			return TC_ACT_SHOT;
++		}
+ 
+ 		if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
+ 			/* abuse overlimits to count when we receive metadata
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index f6d3d0c1e133..07b64719d1bc 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -521,46 +521,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
+ 	addr->v6.sin6_scope_id = 0;
+ }
+ 
+-/* Compare addresses exactly.
+- * v4-mapped-v6 is also in consideration.
+- */
+-static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
+-			    const union sctp_addr *addr2)
++static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
++			      const union sctp_addr *addr2)
+ {
+ 	if (addr1->sa.sa_family != addr2->sa.sa_family) {
+ 		if (addr1->sa.sa_family == AF_INET &&
+ 		    addr2->sa.sa_family == AF_INET6 &&
+-		    ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
+-			if (addr2->v6.sin6_port == addr1->v4.sin_port &&
+-			    addr2->v6.sin6_addr.s6_addr32[3] ==
+-			    addr1->v4.sin_addr.s_addr)
+-				return 1;
+-		}
++		    ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
++		    addr2->v6.sin6_addr.s6_addr32[3] ==
++		    addr1->v4.sin_addr.s_addr)
++			return 1;
++
+ 		if (addr2->sa.sa_family == AF_INET &&
+ 		    addr1->sa.sa_family == AF_INET6 &&
+-		    ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
+-			if (addr1->v6.sin6_port == addr2->v4.sin_port &&
+-			    addr1->v6.sin6_addr.s6_addr32[3] ==
+-			    addr2->v4.sin_addr.s_addr)
+-				return 1;
+-		}
++		    ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
++		    addr1->v6.sin6_addr.s6_addr32[3] ==
++		    addr2->v4.sin_addr.s_addr)
++			return 1;
++
+ 		return 0;
+ 	}
+-	if (addr1->v6.sin6_port != addr2->v6.sin6_port)
+-		return 0;
++
+ 	if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
+ 		return 0;
++
+ 	/* If this is a linklocal address, compare the scope_id. */
+-	if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
+-		if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
+-		    (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
+-			return 0;
+-		}
+-	}
++	if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
++	    addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
++	    addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
++		return 0;
+ 
+ 	return 1;
+ }
+ 
++/* Compare addresses exactly.
++ * v4-mapped-v6 is also in consideration.
++ */
++static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
++			    const union sctp_addr *addr2)
++{
++	return __sctp_v6_cmp_addr(addr1, addr2) &&
++	       addr1->v6.sin6_port == addr2->v6.sin6_port;
++}
++
+ /* Initialize addr struct to INADDR_ANY. */
+ static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
+ {
+@@ -846,8 +849,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
+ 			       const union sctp_addr *addr2,
+ 			       struct sctp_sock *opt)
+ {
+-	struct sctp_af *af1, *af2;
+ 	struct sock *sk = sctp_opt2sk(opt);
++	struct sctp_af *af1, *af2;
+ 
+ 	af1 = sctp_get_af_specific(addr1->sa.sa_family);
+ 	af2 = sctp_get_af_specific(addr2->sa.sa_family);
+@@ -863,10 +866,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
+ 	if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
+ 		return 1;
+ 
+-	if (addr1->sa.sa_family != addr2->sa.sa_family)
+-		return 0;
+-
+-	return af1->cmp_addr(addr1, addr2);
++	return __sctp_v6_cmp_addr(addr1, addr2);
+ }
+ 
+ /* Verify that the provided sockaddr looks bindable.   Common verification,
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 1e0d780855c3..afd5a935bbcb 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -1254,14 +1254,12 @@ static int smc_shutdown(struct socket *sock, int how)
+ 		rc = smc_close_shutdown_write(smc);
+ 		break;
+ 	case SHUT_RD:
+-		if (sk->sk_state == SMC_LISTEN)
+-			rc = smc_close_active(smc);
+-		else
+-			rc = 0;
+-			/* nothing more to do because peer is not involved */
++		rc = 0;
++		/* nothing more to do because peer is not involved */
+ 		break;
+ 	}
+-	rc1 = kernel_sock_shutdown(smc->clcsock, how);
++	if (smc->clcsock)
++		rc1 = kernel_sock_shutdown(smc->clcsock, how);
+ 	/* map sock_shutdown_cmd constants to sk_shutdown value range */
+ 	sk->sk_shutdown |= how + 1;
+ 
+diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
+index b9283ce5cd85..092bebc70048 100644
+--- a/net/strparser/strparser.c
++++ b/net/strparser/strparser.c
+@@ -67,7 +67,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
+ 
+ static void strp_start_timer(struct strparser *strp, long timeo)
+ {
+-	if (timeo)
++	if (timeo && timeo != LONG_MAX)
+ 		mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo);
+ }
+ 
+@@ -296,9 +296,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
+ 					strp_start_timer(strp, timeo);
+ 				}
+ 
++				stm->accum_len += cand_len;
+ 				strp->need_bytes = stm->strp.full_len -
+ 						       stm->accum_len;
+-				stm->accum_len += cand_len;
+ 				stm->early_eaten = cand_len;
+ 				STRP_STATS_ADD(strp->stats.bytes, cand_len);
+ 				desc->count = 0; /* Stop reading socket */
+@@ -321,6 +321,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
+ 		/* Hurray, we have a new message! */
+ 		cancel_delayed_work(&strp->msg_timer_work);
+ 		strp->skb_head = NULL;
++		strp->need_bytes = 0;
+ 		STRP_STATS_INCR(strp->stats.msgs);
+ 
+ 		/* Give skb to upper layer */
+@@ -410,9 +411,7 @@ void strp_data_ready(struct strparser *strp)
+ 		return;
+ 
+ 	if (strp->need_bytes) {
+-		if (strp_peek_len(strp) >= strp->need_bytes)
+-			strp->need_bytes = 0;
+-		else
++		if (strp_peek_len(strp) < strp->need_bytes)
+ 			return;
+ 	}
+ 
+diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
+index b76f13f6fea1..d4e0bbeee727 100644
+--- a/net/tipc/netlink.c
++++ b/net/tipc/netlink.c
+@@ -79,7 +79,8 @@ const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
+ 
+ const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
+ 	[TIPC_NLA_NET_UNSPEC]		= { .type = NLA_UNSPEC },
+-	[TIPC_NLA_NET_ID]		= { .type = NLA_U32 }
++	[TIPC_NLA_NET_ID]		= { .type = NLA_U32 },
++	[TIPC_NLA_NET_ADDR]		= { .type = NLA_U32 },
+ };
+ 
+ const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index e0fc84daed94..ad17a985f74e 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -2018,7 +2018,13 @@ const struct vsock_transport *vsock_core_get_transport(void)
+ }
+ EXPORT_SYMBOL_GPL(vsock_core_get_transport);
+ 
++static void __exit vsock_exit(void)
++{
++	/* Do nothing.  This function makes this module removable. */
++}
++
+ module_init(vsock_init_tables);
++module_exit(vsock_exit);
+ 
+ MODULE_AUTHOR("VMware, Inc.");
+ MODULE_DESCRIPTION("VMware Virtual Socket Family");
+diff --git a/security/commoncap.c b/security/commoncap.c
+index 48620c93d697..1ce701fcb3f3 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -449,6 +449,8 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
+ 				magic |= VFS_CAP_FLAGS_EFFECTIVE;
+ 			memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
+ 			cap->magic_etc = cpu_to_le32(magic);
++		} else {
++			size = -ENOMEM;
+ 		}
+ 	}
+ 	kfree(tmpbuf);


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-04-26 10:22 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-04-26 10:22 UTC (permalink / raw
  To: gentoo-commits

commit:     3da94fc1c80fd1720445ecdcc890ccd938c7dd75
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Apr 26 10:22:14 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Apr 26 10:22:14 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3da94fc1

Linux patch 4.16.5

 0000_README             |   4 +
 1004_linux-4.16.5.patch | 995 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 999 insertions(+)

diff --git a/0000_README b/0000_README
index c127441..344c387 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-4.16.4.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.4
 
+Patch:  1004_linux-4.16.5.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.5
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-4.16.5.patch b/1004_linux-4.16.5.patch
new file mode 100644
index 0000000..4a84bff
--- /dev/null
+++ b/1004_linux-4.16.5.patch
@@ -0,0 +1,995 @@
+diff --git a/Makefile b/Makefile
+index d51175192ac1..6678a90f355b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 5ee33a6e33bb..9bf2a1a4bd22 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -215,6 +215,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
+ 	apic_id = processor->local_apic_id;
+ 	enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
+ 
++	/* Ignore invalid ID */
++	if (apic_id == 0xffffffff)
++		return 0;
++
+ 	/*
+ 	 * We need to register disabled CPU as well to permit
+ 	 * counting disabled CPUs. This allows us to size
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index fb4302738410..3615c0f255e9 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -317,7 +317,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
+ 	hpet2 -= hpet1;
+ 	tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
+ 	do_div(tmp, 1000000);
+-	do_div(deltatsc, tmp);
++	deltatsc = div64_u64(deltatsc, tmp);
+ 
+ 	return (unsigned long) deltatsc;
+ }
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 763bb3bade63..8494dbae41b9 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3031,7 +3031,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
+ 		return RET_PF_RETRY;
+ 	}
+ 
+-	return RET_PF_EMULATE;
++	return -EFAULT;
+ }
+ 
+ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
+diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
+index 21bffdcb2f20..557ed25b42e3 100644
+--- a/drivers/clocksource/timer-imx-tpm.c
++++ b/drivers/clocksource/timer-imx-tpm.c
+@@ -105,7 +105,7 @@ static int tpm_set_next_event(unsigned long delta,
+ 	 * of writing CNT registers which may cause the min_delta event got
+ 	 * missed, so we need add a ETIME check here in case it happened.
+ 	 */
+-	return (int)((next - now) <= 0) ? -ETIME : 0;
++	return (int)(next - now) <= 0 ? -ETIME : 0;
+ }
+ 
+ static int tpm_set_state_oneshot(struct clock_event_device *evt)
+diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+index 02a50929af67..e7f4fe2848a5 100644
+--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
++++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+@@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
+ {
+ 	uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
+ 	ssize_t ret;
++	int retry;
+ 
+ 	if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
+ 		return 0;
+ 
+-	ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
+-				     &tmds_oen, sizeof(tmds_oen));
+-	if (ret) {
+-		DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
+-			      enable ? "enable" : "disable");
+-		return ret;
++	/*
++	 * LSPCON adapters in low-power state may ignore the first write, so
++	 * read back and verify the written value a few times.
++	 */
++	for (retry = 0; retry < 3; retry++) {
++		uint8_t tmp;
++
++		ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
++					     &tmds_oen, sizeof(tmds_oen));
++		if (ret) {
++			DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
++				      enable ? "enable" : "disable",
++				      retry + 1);
++			return ret;
++		}
++
++		ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
++					    &tmp, sizeof(tmp));
++		if (ret) {
++			DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
++				      enable ? "enabling" : "disabling",
++				      retry + 1);
++			return ret;
++		}
++
++		if (tmp == tmds_oen)
++			return 0;
+ 	}
+ 
+-	return 0;
++	DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
++		      enable ? "enabling" : "disabling");
++
++	return -EIO;
+ }
+ EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
+ 
+diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
+index 2fb7b34ef561..82cd2fbe2cb3 100644
+--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
++++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
+@@ -323,6 +323,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
+ 		      struct intel_vgpu_fb_info *fb_info)
+ {
+ 	gvt_dmabuf->drm_format = fb_info->drm_format;
++	gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
+ 	gvt_dmabuf->width = fb_info->width;
+ 	gvt_dmabuf->height = fb_info->height;
+ 	gvt_dmabuf->stride = fb_info->stride;
+diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
+index 021f722e2481..f34d7f1e6c4e 100644
+--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
++++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
+@@ -1284,7 +1284,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
+ 
+ 	}
+ 
+-	return 0;
++	return -ENOTTY;
+ }
+ 
+ static ssize_t
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index 3ab1ace2a6bd..df505868d65a 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
+ 
+ 		err = radix_tree_insert(handles_vma, handle, vma);
+ 		if (unlikely(err)) {
+-			kfree(lut);
++			kmem_cache_free(eb->i915->luts, lut);
+ 			goto err_obj;
+ 		}
+ 
+diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
+index 4a01f62a392d..0ef7856d8155 100644
+--- a/drivers/gpu/drm/i915/intel_audio.c
++++ b/drivers/gpu/drm/i915/intel_audio.c
+@@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
+ 	struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ 	u32 tmp;
+ 
+-	if (!IS_GEN9_BC(dev_priv))
++	if (!IS_GEN9(dev_priv))
+ 		return;
+ 
+ 	i915_audio_component_get_power(kdev);
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index b49a2df44430..9b992e1b5996 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -1255,7 +1255,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+ 		return;
+ 
+ 	aux_channel = child->aux_channel;
+-	ddc_pin = child->ddc_pin;
+ 
+ 	is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
+ 	is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+@@ -1302,9 +1301,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+ 		DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
+ 
+ 	if (is_dvi) {
+-		info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin);
+-
+-		sanitize_ddc_pin(dev_priv, port);
++		ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
++		if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
++			info->alternate_ddc_pin = ddc_pin;
++			sanitize_ddc_pin(dev_priv, port);
++		} else {
++			DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
++				      "sticking to defaults\n",
++				      port_name(port), ddc_pin);
++		}
+ 	}
+ 
+ 	if (is_dp) {
+diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
+index 2decc8e2c79f..add9cc97a3b6 100644
+--- a/drivers/gpu/drm/vc4/vc4_bo.c
++++ b/drivers/gpu/drm/vc4/vc4_bo.c
+@@ -195,6 +195,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
+ 	vc4_bo_set_label(obj, -1);
+ 
+ 	if (bo->validated_shader) {
++		kfree(bo->validated_shader->uniform_addr_offsets);
+ 		kfree(bo->validated_shader->texture_samples);
+ 		kfree(bo->validated_shader);
+ 		bo->validated_shader = NULL;
+@@ -591,6 +592,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
+ 	}
+ 
+ 	if (bo->validated_shader) {
++		kfree(bo->validated_shader->uniform_addr_offsets);
+ 		kfree(bo->validated_shader->texture_samples);
+ 		kfree(bo->validated_shader);
+ 		bo->validated_shader = NULL;
+diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+index d3f15bf60900..7cf82b071de2 100644
+--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
++++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+@@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
+ fail:
+ 	kfree(validation_state.branch_targets);
+ 	if (validated_shader) {
++		kfree(validated_shader->uniform_addr_offsets);
+ 		kfree(validated_shader->texture_samples);
+ 		kfree(validated_shader);
+ 	}
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index a2e1aa86e133..6c424afea25f 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -3157,7 +3157,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
+ 	 * If we moved a kernel QP to RESET, clean up all old CQ
+ 	 * entries and reinitialize the QP.
+ 	 */
+-	if (new_state == IB_QPS_RESET && !ibqp->uobject) {
++	if (new_state == IB_QPS_RESET &&
++	    !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
+ 		mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
+ 				 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
+ 		if (send_cq != recv_cq)
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index 35b21f8152bb..20af54378cc0 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -3484,8 +3484,11 @@ static void __net_exit hwsim_exit_net(struct net *net)
+ 		list_del(&data->list);
+ 		rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
+ 				       hwsim_rht_params);
+-		INIT_WORK(&data->destroy_work, destroy_radio);
+-		queue_work(hwsim_wq, &data->destroy_work);
++		spin_unlock_bh(&hwsim_radio_lock);
++		mac80211_hwsim_del_radio(data,
++					 wiphy_name(data->hw->wiphy),
++					 NULL);
++		spin_lock_bh(&hwsim_radio_lock);
+ 	}
+ 	spin_unlock_bh(&hwsim_radio_lock);
+ }
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 7ab5e0128f0c..1e9a20a4c06c 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -553,8 +553,10 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
+ 		     struct btrfs_delayed_ref_head *head_ref,
+ 		     struct btrfs_qgroup_extent_record *qrecord,
+ 		     u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
+-		     int action, int is_data, int *qrecord_inserted_ret,
++		     int action, int is_data, int is_system,
++		     int *qrecord_inserted_ret,
+ 		     int *old_ref_mod, int *new_ref_mod)
++
+ {
+ 	struct btrfs_delayed_ref_head *existing;
+ 	struct btrfs_delayed_ref_root *delayed_refs;
+@@ -598,6 +600,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
+ 	head_ref->ref_mod = count_mod;
+ 	head_ref->must_insert_reserved = must_insert_reserved;
+ 	head_ref->is_data = is_data;
++	head_ref->is_system = is_system;
+ 	head_ref->ref_tree = RB_ROOT;
+ 	INIT_LIST_HEAD(&head_ref->ref_add_list);
+ 	RB_CLEAR_NODE(&head_ref->href_node);
+@@ -785,6 +788,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ 	struct btrfs_delayed_ref_root *delayed_refs;
+ 	struct btrfs_qgroup_extent_record *record = NULL;
+ 	int qrecord_inserted;
++	int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
+ 
+ 	BUG_ON(extent_op && extent_op->is_data);
+ 	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
+@@ -813,8 +817,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ 	 */
+ 	head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
+ 					bytenr, num_bytes, 0, 0, action, 0,
+-					&qrecord_inserted, old_ref_mod,
+-					new_ref_mod);
++					is_system, &qrecord_inserted,
++					old_ref_mod, new_ref_mod);
+ 
+ 	add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
+ 			     num_bytes, parent, ref_root, level, action);
+@@ -881,7 +885,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+ 	 */
+ 	head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
+ 					bytenr, num_bytes, ref_root, reserved,
+-					action, 1, &qrecord_inserted,
++					action, 1, 0, &qrecord_inserted,
+ 					old_ref_mod, new_ref_mod);
+ 
+ 	add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
+@@ -911,9 +915,14 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
+ 	delayed_refs = &trans->transaction->delayed_refs;
+ 	spin_lock(&delayed_refs->lock);
+ 
++	/*
++	 * extent_ops just modify the flags of an extent and they don't result
++	 * in ref count changes, hence it's safe to pass false/0 for is_system
++	 * argument
++	 */
+ 	add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
+ 			     num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
+-			     extent_op->is_data, NULL, NULL, NULL);
++			     extent_op->is_data, 0, NULL, NULL, NULL);
+ 
+ 	spin_unlock(&delayed_refs->lock);
+ 	return 0;
+diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
+index c4f625e5a691..ba97d18cc168 100644
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -139,6 +139,7 @@ struct btrfs_delayed_ref_head {
+ 	 */
+ 	unsigned int must_insert_reserved:1;
+ 	unsigned int is_data:1;
++	unsigned int is_system:1;
+ 	unsigned int processing:1;
+ };
+ 
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index c1618ab9fecf..16b54b1ff20e 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2615,13 +2615,19 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
+ 	trace_run_delayed_ref_head(fs_info, head, 0);
+ 
+ 	if (head->total_ref_mod < 0) {
+-		struct btrfs_block_group_cache *cache;
++		struct btrfs_space_info *space_info;
++		u64 flags;
+ 
+-		cache = btrfs_lookup_block_group(fs_info, head->bytenr);
+-		ASSERT(cache);
+-		percpu_counter_add(&cache->space_info->total_bytes_pinned,
++		if (head->is_data)
++			flags = BTRFS_BLOCK_GROUP_DATA;
++		else if (head->is_system)
++			flags = BTRFS_BLOCK_GROUP_SYSTEM;
++		else
++			flags = BTRFS_BLOCK_GROUP_METADATA;
++		space_info = __find_space_info(fs_info, flags);
++		ASSERT(space_info);
++		percpu_counter_add(&space_info->total_bytes_pinned,
+ 				   -head->num_bytes);
+-		btrfs_put_block_group(cache);
+ 
+ 		if (head->is_data) {
+ 			spin_lock(&delayed_refs->lock);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index c7b75dd58fad..ef1cf323832a 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -44,6 +44,7 @@
+ #include <linux/uio.h>
+ #include <linux/magic.h>
+ #include <linux/iversion.h>
++#include <asm/unaligned.h>
+ #include "ctree.h"
+ #include "disk-io.h"
+ #include "transaction.h"
+@@ -5951,11 +5952,13 @@ static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
+ 		struct dir_entry *entry = addr;
+ 		char *name = (char *)(entry + 1);
+ 
+-		ctx->pos = entry->offset;
+-		if (!dir_emit(ctx, name, entry->name_len, entry->ino,
+-			      entry->type))
++		ctx->pos = get_unaligned(&entry->offset);
++		if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
++					 get_unaligned(&entry->ino),
++					 get_unaligned(&entry->type)))
+ 			return 1;
+-		addr += sizeof(struct dir_entry) + entry->name_len;
++		addr += sizeof(struct dir_entry) +
++			get_unaligned(&entry->name_len);
+ 		ctx->pos++;
+ 	}
+ 	return 0;
+@@ -6045,14 +6048,15 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
+ 		}
+ 
+ 		entry = addr;
+-		entry->name_len = name_len;
++		put_unaligned(name_len, &entry->name_len);
+ 		name_ptr = (char *)(entry + 1);
+ 		read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
+ 				   name_len);
+-		entry->type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
++		put_unaligned(btrfs_filetype_table[btrfs_dir_type(leaf, di)],
++				&entry->type);
+ 		btrfs_dir_item_key_to_cpu(leaf, di, &location);
+-		entry->ino = location.objectid;
+-		entry->offset = found_key.offset;
++		put_unaligned(location.objectid, &entry->ino);
++		put_unaligned(found_key.offset, &entry->offset);
+ 		entries++;
+ 		addr += sizeof(struct dir_entry) + name_len;
+ 		total_len += sizeof(struct dir_entry) + name_len;
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 81ba6e0d88d8..925844343038 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -684,6 +684,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
+ 		goto mknod_out;
+ 	}
+ 
++	if (!S_ISCHR(mode) && !S_ISBLK(mode))
++		goto mknod_out;
++
+ 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
+ 		goto mknod_out;
+ 
+@@ -692,10 +695,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
+ 
+ 	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+ 	if (buf == NULL) {
+-		kfree(full_path);
+ 		rc = -ENOMEM;
+-		free_xid(xid);
+-		return rc;
++		goto mknod_out;
+ 	}
+ 
+ 	if (backup_cred(cifs_sb))
+@@ -742,7 +743,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
+ 		pdev->minor = cpu_to_le64(MINOR(device_number));
+ 		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+ 							&bytes_written, iov, 1);
+-	} /* else if (S_ISFIFO) */
++	}
+ 	tcon->ses->server->ops->close(xid, tcon, &fid);
+ 	d_drop(direntry);
+ 
+diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
+index 52cccdbb7e14..34be5c5d027f 100644
+--- a/fs/cifs/smbdirect.c
++++ b/fs/cifs/smbdirect.c
+@@ -2194,6 +2194,8 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
+ 						goto done;
+ 				}
+ 				i++;
++				if (i == rqst->rq_nvec)
++					break;
+ 			}
+ 			start = i;
+ 			buflen = 0;
+diff --git a/fs/super.c b/fs/super.c
+index 672538ca9831..afbf4d220c27 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -166,6 +166,7 @@ static void destroy_unused_super(struct super_block *s)
+ 	security_sb_free(s);
+ 	put_user_ns(s->s_user_ns);
+ 	kfree(s->s_subtype);
++	free_prealloced_shrinker(&s->s_shrink);
+ 	/* no delays needed */
+ 	destroy_super_work(&s->destroy_work);
+ }
+@@ -251,6 +252,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
+ 	s->s_shrink.count_objects = super_cache_count;
+ 	s->s_shrink.batch = 1024;
+ 	s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
++	if (prealloc_shrinker(&s->s_shrink))
++		goto fail;
+ 	return s;
+ 
+ fail:
+@@ -517,11 +520,7 @@ struct super_block *sget_userns(struct file_system_type *type,
+ 	hlist_add_head(&s->s_instances, &type->fs_supers);
+ 	spin_unlock(&sb_lock);
+ 	get_filesystem(type);
+-	err = register_shrinker(&s->s_shrink);
+-	if (err) {
+-		deactivate_locked_super(s);
+-		s = ERR_PTR(err);
+-	}
++	register_shrinker_prepared(&s->s_shrink);
+ 	return s;
+ }
+ 
+diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
+index 14529511c4b8..065d605adea0 100644
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
+@@ -301,6 +301,7 @@ int xt_data_to_user(void __user *dst, const void *src,
+ 
+ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
+ 				 struct xt_counters_info *info, bool compat);
++struct xt_counters *xt_counters_alloc(unsigned int counters);
+ 
+ struct xt_table *xt_register_table(struct net *net,
+ 				   const struct xt_table *table,
+@@ -509,7 +510,7 @@ void xt_compat_unlock(u_int8_t af);
+ 
+ int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
+ void xt_compat_flush_offsets(u_int8_t af);
+-void xt_compat_init_offsets(u_int8_t af, unsigned int number);
++int xt_compat_init_offsets(u8 af, unsigned int number);
+ int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
+ 
+ int xt_compat_match_offset(const struct xt_match *match);
+diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
+index 388ff2936a87..6794490f25b2 100644
+--- a/include/linux/shrinker.h
++++ b/include/linux/shrinker.h
+@@ -75,6 +75,9 @@ struct shrinker {
+ #define SHRINKER_NUMA_AWARE	(1 << 0)
+ #define SHRINKER_MEMCG_AWARE	(1 << 1)
+ 
+-extern int register_shrinker(struct shrinker *);
+-extern void unregister_shrinker(struct shrinker *);
++extern int prealloc_shrinker(struct shrinker *shrinker);
++extern void register_shrinker_prepared(struct shrinker *shrinker);
++extern int register_shrinker(struct shrinker *shrinker);
++extern void unregister_shrinker(struct shrinker *shrinker);
++extern void free_prealloced_shrinker(struct shrinker *shrinker);
+ #endif
+diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
+index 772a43fea825..73cc26e321de 100644
+--- a/kernel/events/callchain.c
++++ b/kernel/events/callchain.c
+@@ -119,19 +119,22 @@ int get_callchain_buffers(int event_max_stack)
+ 		goto exit;
+ 	}
+ 
++	/*
++	 * If requesting per event more than the global cap,
++	 * return a different error to help userspace figure
++	 * this out.
++	 *
++	 * And also do it here so that we have &callchain_mutex held.
++	 */
++	if (event_max_stack > sysctl_perf_event_max_stack) {
++		err = -EOVERFLOW;
++		goto exit;
++	}
++
+ 	if (count > 1) {
+ 		/* If the allocation failed, give up */
+ 		if (!callchain_cpus_entries)
+ 			err = -ENOMEM;
+-		/*
+-		 * If requesting per event more than the global cap,
+-		 * return a different error to help userspace figure
+-		 * this out.
+-		 *
+-		 * And also do it here so that we have &callchain_mutex held.
+-		 */
+-		if (event_max_stack > sysctl_perf_event_max_stack)
+-			err = -EOVERFLOW;
+ 		goto exit;
+ 	}
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index b32bc0698a2a..ca7298760c83 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -9730,9 +9730,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
+ 		 * __u16 sample size limit.
+ 		 */
+ 		if (attr->sample_stack_user >= USHRT_MAX)
+-			ret = -EINVAL;
++			return -EINVAL;
+ 		else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
+-			ret = -EINVAL;
++			return -EINVAL;
+ 	}
+ 
+ 	if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index ec09ce9a6012..639321bf2e39 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -326,6 +326,17 @@ static int alarmtimer_resume(struct device *dev)
+ }
+ #endif
+ 
++static void
++__alarm_init(struct alarm *alarm, enum alarmtimer_type type,
++	     enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
++{
++	timerqueue_init(&alarm->node);
++	alarm->timer.function = alarmtimer_fired;
++	alarm->function = function;
++	alarm->type = type;
++	alarm->state = ALARMTIMER_STATE_INACTIVE;
++}
++
+ /**
+  * alarm_init - Initialize an alarm structure
+  * @alarm: ptr to alarm to be initialized
+@@ -335,13 +346,9 @@ static int alarmtimer_resume(struct device *dev)
+ void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
+ 		enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
+ {
+-	timerqueue_init(&alarm->node);
+ 	hrtimer_init(&alarm->timer, alarm_bases[type].base_clockid,
+-			HRTIMER_MODE_ABS);
+-	alarm->timer.function = alarmtimer_fired;
+-	alarm->function = function;
+-	alarm->type = type;
+-	alarm->state = ALARMTIMER_STATE_INACTIVE;
++		     HRTIMER_MODE_ABS);
++	__alarm_init(alarm, type, function);
+ }
+ EXPORT_SYMBOL_GPL(alarm_init);
+ 
+@@ -719,6 +726,8 @@ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp,
+ 
+ 	__set_current_state(TASK_RUNNING);
+ 
++	destroy_hrtimer_on_stack(&alarm->timer);
++
+ 	if (!alarm->data)
+ 		return 0;
+ 
+@@ -740,6 +749,15 @@ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp,
+ 	return -ERESTART_RESTARTBLOCK;
+ }
+ 
++static void
++alarm_init_on_stack(struct alarm *alarm, enum alarmtimer_type type,
++		    enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
++{
++	hrtimer_init_on_stack(&alarm->timer, alarm_bases[type].base_clockid,
++			      HRTIMER_MODE_ABS);
++	__alarm_init(alarm, type, function);
++}
++
+ /**
+  * alarm_timer_nsleep_restart - restartblock alarmtimer nsleep
+  * @restart: ptr to restart block
+@@ -752,7 +770,7 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart)
+ 	ktime_t exp = restart->nanosleep.expires;
+ 	struct alarm alarm;
+ 
+-	alarm_init(&alarm, type, alarmtimer_nsleep_wakeup);
++	alarm_init_on_stack(&alarm, type, alarmtimer_nsleep_wakeup);
+ 
+ 	return alarmtimer_do_nsleep(&alarm, exp, type);
+ }
+@@ -784,7 +802,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
+ 	if (!capable(CAP_WAKE_ALARM))
+ 		return -EPERM;
+ 
+-	alarm_init(&alarm, type, alarmtimer_nsleep_wakeup);
++	alarm_init_on_stack(&alarm, type, alarmtimer_nsleep_wakeup);
+ 
+ 	exp = timespec64_to_ktime(*tsreq);
+ 	/* Convert (if necessary) to absolute time */
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index 2541bd89f20e..5a6251ac6f7a 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -1205,10 +1205,12 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
+ 			   u64 *newval, u64 *oldval)
+ {
+ 	u64 now;
++	int ret;
+ 
+ 	WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
++	ret = cpu_timer_sample_group(clock_idx, tsk, &now);
+ 
+-	if (oldval && cpu_timer_sample_group(clock_idx, tsk, &now) != -EINVAL) {
++	if (oldval && ret != -EINVAL) {
+ 		/*
+ 		 * We are setting itimer. The *oldval is absolute and we update
+ 		 * it to be relative, *newval argument is relative and we update
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index cd5dc3faaa57..f6a1587f9f31 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -258,7 +258,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone
+ /*
+  * Add a shrinker callback to be called from the vm.
+  */
+-int register_shrinker(struct shrinker *shrinker)
++int prealloc_shrinker(struct shrinker *shrinker)
+ {
+ 	size_t size = sizeof(*shrinker->nr_deferred);
+ 
+@@ -268,10 +268,29 @@ int register_shrinker(struct shrinker *shrinker)
+ 	shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
+ 	if (!shrinker->nr_deferred)
+ 		return -ENOMEM;
++	return 0;
++}
++
++void free_prealloced_shrinker(struct shrinker *shrinker)
++{
++	kfree(shrinker->nr_deferred);
++	shrinker->nr_deferred = NULL;
++}
+ 
++void register_shrinker_prepared(struct shrinker *shrinker)
++{
+ 	down_write(&shrinker_rwsem);
+ 	list_add_tail(&shrinker->list, &shrinker_list);
+ 	up_write(&shrinker_rwsem);
++}
++
++int register_shrinker(struct shrinker *shrinker)
++{
++	int err = prealloc_shrinker(shrinker);
++
++	if (err)
++		return err;
++	register_shrinker_prepared(shrinker);
+ 	return 0;
+ }
+ EXPORT_SYMBOL(register_shrinker);
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index a94d23b0a9af..752112539753 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1821,10 +1821,14 @@ static int compat_table_info(const struct ebt_table_info *info,
+ {
+ 	unsigned int size = info->entries_size;
+ 	const void *entries = info->entries;
++	int ret;
+ 
+ 	newinfo->entries_size = size;
+ 
+-	xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
++	ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
++	if (ret)
++		return ret;
++
+ 	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
+ 							entries, newinfo);
+ }
+@@ -2268,7 +2272,9 @@ static int compat_do_replace(struct net *net, void __user *user,
+ 
+ 	xt_compat_lock(NFPROTO_BRIDGE);
+ 
+-	xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
++	ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
++	if (ret < 0)
++		goto out_unlock;
+ 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
+ 	if (ret < 0)
+ 		goto out_unlock;
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index e3e420f3ba7b..b940d6aaa94f 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -781,7 +781,9 @@ static int compat_table_info(const struct xt_table_info *info,
+ 	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
+ 	newinfo->initial_entries = 0;
+ 	loc_cpu_entry = info->entries;
+-	xt_compat_init_offsets(NFPROTO_ARP, info->number);
++	ret = xt_compat_init_offsets(NFPROTO_ARP, info->number);
++	if (ret)
++		return ret;
+ 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
+ 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
+ 		if (ret != 0)
+@@ -895,7 +897,7 @@ static int __do_replace(struct net *net, const char *name,
+ 	struct arpt_entry *iter;
+ 
+ 	ret = 0;
+-	counters = vzalloc(num_counters * sizeof(struct xt_counters));
++	counters = xt_counters_alloc(num_counters);
+ 	if (!counters) {
+ 		ret = -ENOMEM;
+ 		goto out;
+@@ -1167,7 +1169,7 @@ static int translate_compat_table(struct xt_table_info **pinfo,
+ 	struct compat_arpt_entry *iter0;
+ 	struct arpt_replace repl;
+ 	unsigned int size;
+-	int ret = 0;
++	int ret;
+ 
+ 	info = *pinfo;
+ 	entry0 = *pentry0;
+@@ -1176,7 +1178,9 @@ static int translate_compat_table(struct xt_table_info **pinfo,
+ 
+ 	j = 0;
+ 	xt_compat_lock(NFPROTO_ARP);
+-	xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
++	ret = xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
++	if (ret)
++		goto out_unlock;
+ 	/* Walk through entries, checking offsets. */
+ 	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index e38395a8dcf2..34f22450da5b 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -945,7 +945,9 @@ static int compat_table_info(const struct xt_table_info *info,
+ 	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
+ 	newinfo->initial_entries = 0;
+ 	loc_cpu_entry = info->entries;
+-	xt_compat_init_offsets(AF_INET, info->number);
++	ret = xt_compat_init_offsets(AF_INET, info->number);
++	if (ret)
++		return ret;
+ 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
+ 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
+ 		if (ret != 0)
+@@ -1057,7 +1059,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+ 	struct ipt_entry *iter;
+ 
+ 	ret = 0;
+-	counters = vzalloc(num_counters * sizeof(struct xt_counters));
++	counters = xt_counters_alloc(num_counters);
+ 	if (!counters) {
+ 		ret = -ENOMEM;
+ 		goto out;
+@@ -1418,7 +1420,9 @@ translate_compat_table(struct net *net,
+ 
+ 	j = 0;
+ 	xt_compat_lock(AF_INET);
+-	xt_compat_init_offsets(AF_INET, compatr->num_entries);
++	ret = xt_compat_init_offsets(AF_INET, compatr->num_entries);
++	if (ret)
++		goto out_unlock;
+ 	/* Walk through entries, checking offsets. */
+ 	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 62358b93bbac..41db3c8f469f 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -962,7 +962,9 @@ static int compat_table_info(const struct xt_table_info *info,
+ 	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
+ 	newinfo->initial_entries = 0;
+ 	loc_cpu_entry = info->entries;
+-	xt_compat_init_offsets(AF_INET6, info->number);
++	ret = xt_compat_init_offsets(AF_INET6, info->number);
++	if (ret)
++		return ret;
+ 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
+ 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
+ 		if (ret != 0)
+@@ -1075,7 +1077,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
+ 	struct ip6t_entry *iter;
+ 
+ 	ret = 0;
+-	counters = vzalloc(num_counters * sizeof(struct xt_counters));
++	counters = xt_counters_alloc(num_counters);
+ 	if (!counters) {
+ 		ret = -ENOMEM;
+ 		goto out;
+@@ -1425,7 +1427,7 @@ translate_compat_table(struct net *net,
+ 	struct compat_ip6t_entry *iter0;
+ 	struct ip6t_replace repl;
+ 	unsigned int size;
+-	int ret = 0;
++	int ret;
+ 
+ 	info = *pinfo;
+ 	entry0 = *pentry0;
+@@ -1434,7 +1436,9 @@ translate_compat_table(struct net *net,
+ 
+ 	j = 0;
+ 	xt_compat_lock(AF_INET6);
+-	xt_compat_init_offsets(AF_INET6, compatr->num_entries);
++	ret = xt_compat_init_offsets(AF_INET6, compatr->num_entries);
++	if (ret)
++		goto out_unlock;
+ 	/* Walk through entries, checking offsets. */
+ 	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
+index 4aa01c90e9d1..a94c0e3cdcf0 100644
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -40,6 +40,7 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+ MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
+ 
+ #define XT_PCPU_BLOCK_SIZE 4096
++#define XT_MAX_TABLE_SIZE	(512 * 1024 * 1024)
+ 
+ struct compat_delta {
+ 	unsigned int offset; /* offset in kernel */
+@@ -553,14 +554,8 @@ int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
+ {
+ 	struct xt_af *xp = &xt[af];
+ 
+-	if (!xp->compat_tab) {
+-		if (!xp->number)
+-			return -EINVAL;
+-		xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
+-		if (!xp->compat_tab)
+-			return -ENOMEM;
+-		xp->cur = 0;
+-	}
++	if (WARN_ON(!xp->compat_tab))
++		return -ENOMEM;
+ 
+ 	if (xp->cur >= xp->number)
+ 		return -EINVAL;
+@@ -603,10 +598,28 @@ int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
+ }
+ EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
+ 
+-void xt_compat_init_offsets(u_int8_t af, unsigned int number)
++int xt_compat_init_offsets(u8 af, unsigned int number)
+ {
++	size_t mem;
++
++	if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
++		return -EINVAL;
++
++	if (WARN_ON(xt[af].compat_tab))
++		return -EINVAL;
++
++	mem = sizeof(struct compat_delta) * number;
++	if (mem > XT_MAX_TABLE_SIZE)
++		return -ENOMEM;
++
++	xt[af].compat_tab = vmalloc(mem);
++	if (!xt[af].compat_tab)
++		return -ENOMEM;
++
+ 	xt[af].number = number;
+ 	xt[af].cur = 0;
++
++	return 0;
+ }
+ EXPORT_SYMBOL(xt_compat_init_offsets);
+ 
+@@ -805,6 +818,9 @@ EXPORT_SYMBOL(xt_check_entry_offsets);
+  */
+ unsigned int *xt_alloc_entry_offsets(unsigned int size)
+ {
++	if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
++		return NULL;
++
+ 	return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
+ 
+ }
+@@ -1029,7 +1045,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
+ 	struct xt_table_info *info = NULL;
+ 	size_t sz = sizeof(*info) + size;
+ 
+-	if (sz < sizeof(*info))
++	if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
+ 		return NULL;
+ 
+ 	/* __GFP_NORETRY is not fully supported by kvmalloc but it should
+@@ -1198,6 +1214,21 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
+ 	return 0;
+ }
+ 
++struct xt_counters *xt_counters_alloc(unsigned int counters)
++{
++	struct xt_counters *mem;
++
++	if (counters == 0 || counters > INT_MAX / sizeof(*mem))
++		return NULL;
++
++	counters *= sizeof(*mem);
++	if (counters > XT_MAX_TABLE_SIZE)
++		return NULL;
++
++	return vzalloc(counters);
++}
++EXPORT_SYMBOL(xt_counters_alloc);
++
+ struct xt_table_info *
+ xt_replace_table(struct xt_table *table,
+ 	      unsigned int num_counters,


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-04-24 11:31 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-04-24 11:31 UTC (permalink / raw
  To: gentoo-commits

commit:     a3bf978c7d70cc9633c062dcfbaa009437563b42
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Apr 24 11:31:20 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Apr 24 11:31:20 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a3bf978c

Linux patch 4.16.4

 0000_README             |    4 +
 1003_linux-4.16.4.patch | 7880 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7884 insertions(+)

diff --git a/0000_README b/0000_README
index 65c079f..c127441 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-4.16.3.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.3
 
+Patch:  1003_linux-4.16.4.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.4
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1003_linux-4.16.4.patch b/1003_linux-4.16.4.patch
new file mode 100644
index 0000000..e8ddb45
--- /dev/null
+++ b/1003_linux-4.16.4.patch
@@ -0,0 +1,7880 @@
+diff --git a/Makefile b/Makefile
+index 38df392e45e4..d51175192ac1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi b/arch/arm/boot/dts/at91sam9g25.dtsi
+index a7da0dd0c98f..0898213f3bb2 100644
+--- a/arch/arm/boot/dts/at91sam9g25.dtsi
++++ b/arch/arm/boot/dts/at91sam9g25.dtsi
+@@ -21,7 +21,7 @@
+ 				atmel,mux-mask = <
+ 				      /*    A         B          C     */
+ 				       0xffffffff 0xffe0399f 0xc000001c  /* pioA */
+-				       0x0007ffff 0x8000fe3f 0x00000000  /* pioB */
++				       0x0007ffff 0x00047e3f 0x00000000  /* pioB */
+ 				       0x80000000 0x07c0ffff 0xb83fffff  /* pioC */
+ 				       0x003fffff 0x003f8000 0x00000000  /* pioD */
+ 				      >;
+diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
+index 56626d1a4235..cf89385e7888 100644
+--- a/arch/arm/boot/dts/exynos5250.dtsi
++++ b/arch/arm/boot/dts/exynos5250.dtsi
+@@ -655,7 +655,7 @@
+ 			power-domains = <&pd_gsc>;
+ 			clocks = <&clock CLK_GSCL0>;
+ 			clock-names = "gscl";
+-			iommu = <&sysmmu_gsc0>;
++			iommus = <&sysmmu_gsc0>;
+ 		};
+ 
+ 		gsc_1:  gsc@13e10000 {
+@@ -665,7 +665,7 @@
+ 			power-domains = <&pd_gsc>;
+ 			clocks = <&clock CLK_GSCL1>;
+ 			clock-names = "gscl";
+-			iommu = <&sysmmu_gsc1>;
++			iommus = <&sysmmu_gsc1>;
+ 		};
+ 
+ 		gsc_2:  gsc@13e20000 {
+@@ -675,7 +675,7 @@
+ 			power-domains = <&pd_gsc>;
+ 			clocks = <&clock CLK_GSCL2>;
+ 			clock-names = "gscl";
+-			iommu = <&sysmmu_gsc2>;
++			iommus = <&sysmmu_gsc2>;
+ 		};
+ 
+ 		gsc_3:  gsc@13e30000 {
+@@ -685,7 +685,7 @@
+ 			power-domains = <&pd_gsc>;
+ 			clocks = <&clock CLK_GSCL3>;
+ 			clock-names = "gscl";
+-			iommu = <&sysmmu_gsc3>;
++			iommus = <&sysmmu_gsc3>;
+ 		};
+ 
+ 		hdmi: hdmi@14530000 {
+diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
+index 7bf5aa2237c9..7de704575aee 100644
+--- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
++++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
+@@ -39,6 +39,24 @@
+ 		};
+ 	};
+ 
++	reg_3p3v: regulator-3p3v {
++		compatible = "regulator-fixed";
++		regulator-name = "fixed-3.3V";
++		regulator-min-microvolt = <3300000>;
++		regulator-max-microvolt = <3300000>;
++		regulator-boot-on;
++		regulator-always-on;
++	};
++
++	reg_5v: regulator-5v {
++		compatible = "regulator-fixed";
++		regulator-name = "fixed-5V";
++		regulator-min-microvolt = <5000000>;
++		regulator-max-microvolt = <5000000>;
++		regulator-boot-on;
++		regulator-always-on;
++	};
++
+ 	gpio_keys {
+ 		compatible = "gpio-keys";
+ 		pinctrl-names = "default";
+@@ -468,12 +486,14 @@
+ };
+ 
+ &usb1 {
+-	vusb33-supply = <&mt6323_vusb_reg>;
++	vusb33-supply = <&reg_3p3v>;
++	vbus-supply = <&reg_5v>;
+ 	status = "okay";
+ };
+ 
+ &usb2 {
+-	vusb33-supply = <&mt6323_vusb_reg>;
++	vusb33-supply = <&reg_3p3v>;
++	vbus-supply = <&reg_5v>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
+index 373b3621b536..c7105096c623 100644
+--- a/arch/arm/boot/dts/sama5d4.dtsi
++++ b/arch/arm/boot/dts/sama5d4.dtsi
+@@ -1379,7 +1379,7 @@
+ 			pinctrl@fc06a000 {
+ 				#address-cells = <1>;
+ 				#size-cells = <1>;
+-				compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
++				compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus";
+ 				ranges = <0xfc068000 0xfc068000 0x100
+ 					  0xfc06a000 0xfc06a000 0x4000>;
+ 				/* WARNING: revisit as pin spec has changed */
+diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
+index dc4346ecf16d..a1055a2b8d54 100644
+--- a/arch/arm/mach-exynos/pm.c
++++ b/arch/arm/mach-exynos/pm.c
+@@ -271,11 +271,7 @@ static int exynos_cpu0_enter_aftr(void)
+ 				goto fail;
+ 
+ 			call_firmware_op(cpu_boot, 1);
+-
+-			if (soc_is_exynos3250())
+-				dsb_sev();
+-			else
+-				arch_send_wakeup_ipi_mask(cpumask_of(1));
++			dsb_sev();
+ 		}
+ 	}
+ fail:
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+index ee4ada61c59c..93a7830706f5 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+@@ -310,7 +310,7 @@
+ 	pinctrl-names = "default", "clk-gate";
+ 
+ 	bus-width = <8>;
+-	max-frequency = <200000000>;
++	max-frequency = <100000000>;
+ 	non-removable;
+ 	disable-wp;
+ 	cap-mmc-highspeed;
+diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
+index b71306947290..06629011a434 100644
+--- a/arch/mips/include/asm/uaccess.h
++++ b/arch/mips/include/asm/uaccess.h
+@@ -654,6 +654,13 @@ __clear_user(void __user *addr, __kernel_size_t size)
+ {
+ 	__kernel_size_t res;
+ 
++#ifdef CONFIG_CPU_MICROMIPS
++/* micromips memset / bzero also clobbers t7 & t8 */
++#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
++#else
++#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
++#endif /* CONFIG_CPU_MICROMIPS */
++
+ 	if (eva_kernel_access()) {
+ 		__asm__ __volatile__(
+ 			"move\t$4, %1\n\t"
+@@ -663,7 +670,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
+ 			"move\t%0, $6"
+ 			: "=r" (res)
+ 			: "r" (addr), "r" (size)
+-			: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
++			: bzero_clobbers);
+ 	} else {
+ 		might_fault();
+ 		__asm__ __volatile__(
+@@ -674,7 +681,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
+ 			"move\t%0, $6"
+ 			: "=r" (res)
+ 			: "r" (addr), "r" (size)
+-			: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
++			: bzero_clobbers);
+ 	}
+ 
+ 	return res;
+diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
+index a1456664d6c2..f7327979a8f8 100644
+--- a/arch/mips/lib/memset.S
++++ b/arch/mips/lib/memset.S
+@@ -219,7 +219,7 @@
+ 1:	PTR_ADDIU	a0, 1			/* fill bytewise */
+ 	R10KCBARRIER(0(ra))
+ 	bne		t1, a0, 1b
+-	sb		a1, -1(a0)
++	 EX(sb, a1, -1(a0), .Lsmall_fixup\@)
+ 
+ 2:	jr		ra			/* done */
+ 	move		a2, zero
+@@ -252,13 +252,18 @@
+ 	PTR_L		t0, TI_TASK($28)
+ 	andi		a2, STORMASK
+ 	LONG_L		t0, THREAD_BUADDR(t0)
+-	LONG_ADDU	a2, t1
++	LONG_ADDU	a2, a0
+ 	jr		ra
+ 	LONG_SUBU	a2, t0
+ 
+ .Llast_fixup\@:
+ 	jr		ra
+-	andi		v1, a2, STORMASK
++	 nop
++
++.Lsmall_fixup\@:
++	PTR_SUBU	a2, t1, a0
++	jr		ra
++	 PTR_ADDIU	a2, 1
+ 
+ 	.endm
+ 
+diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
+index 10daa1d56e0a..c7c63959ba91 100644
+--- a/arch/powerpc/include/asm/barrier.h
++++ b/arch/powerpc/include/asm/barrier.h
+@@ -35,7 +35,8 @@
+ #define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
+ #define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
+ 
+-#ifdef __SUBARCH_HAS_LWSYNC
++/* The sub-arch has lwsync */
++#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
+ #    define SMPWMB      LWSYNC
+ #else
+ #    define SMPWMB      eieio
+diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
+index 12e70fb58700..fcf3ed5b8b18 100644
+--- a/arch/powerpc/include/asm/opal.h
++++ b/arch/powerpc/include/asm/opal.h
+@@ -21,6 +21,9 @@
+ /* We calculate number of sg entries based on PAGE_SIZE */
+ #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
+ 
++/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */
++#define OPAL_BUSY_DELAY_MS	10
++
+ /* /sys/firmware/opal */
+ extern struct kobject *opal_kobj;
+ 
+diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
+index 63e7f5a1f105..6ec546090ba1 100644
+--- a/arch/powerpc/include/asm/synch.h
++++ b/arch/powerpc/include/asm/synch.h
+@@ -6,10 +6,6 @@
+ #include <linux/stringify.h>
+ #include <asm/feature-fixups.h>
+ 
+-#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
+-#define __SUBARCH_HAS_LWSYNC
+-#endif
+-
+ #ifndef __ASSEMBLY__
+ extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
+ extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
+diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
+index 8ca5d5b74618..078553a177de 100644
+--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
+@@ -84,6 +84,7 @@ static int hv_mode;
+ 
+ static struct {
+ 	u64	lpcr;
++	u64	lpcr_clear;
+ 	u64	hfscr;
+ 	u64	fscr;
+ } system_registers;
+@@ -92,6 +93,8 @@ static void (*init_pmu_registers)(void);
+ 
+ static void __restore_cpu_cpufeatures(void)
+ {
++	u64 lpcr;
++
+ 	/*
+ 	 * LPCR is restored by the power on engine already. It can be changed
+ 	 * after early init e.g., by radix enable, and we have no unified API
+@@ -104,8 +107,10 @@ static void __restore_cpu_cpufeatures(void)
+ 	 * The best we can do to accommodate secondary boot and idle restore
+ 	 * for now is "or" LPCR with existing.
+ 	 */
+-
+-	mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR));
++	lpcr = mfspr(SPRN_LPCR);
++	lpcr |= system_registers.lpcr;
++	lpcr &= ~system_registers.lpcr_clear;
++	mtspr(SPRN_LPCR, lpcr);
+ 	if (hv_mode) {
+ 		mtspr(SPRN_LPID, 0);
+ 		mtspr(SPRN_HFSCR, system_registers.hfscr);
+@@ -325,8 +330,9 @@ static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
+ {
+ 	u64 lpcr;
+ 
++	system_registers.lpcr_clear |= (LPCR_ISL | LPCR_UPRT | LPCR_HR);
+ 	lpcr = mfspr(SPRN_LPCR);
+-	lpcr &= ~LPCR_ISL;
++	lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR);
+ 	mtspr(SPRN_LPCR, lpcr);
+ 
+ 	cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
+@@ -658,6 +664,13 @@ static void __init cpufeatures_setup_start(u32 isa)
+ 		cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
+ 		cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
+ 	}
++
++	/*
++	 * PKEY was not in the initial base or feature node
++	 * specification, but it should become optional in the next
++	 * cpu feature version sequence.
++	 */
++	cur_cpu_spec->cpu_features |= CPU_FTR_PKEY;
+ }
+ 
+ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index 0c0b66fc5bfb..295ba833846e 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -207,18 +207,18 @@ static void *eeh_report_error(void *data, void *userdata)
+ 
+ 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
+ 		return NULL;
++
++	device_lock(&dev->dev);
+ 	dev->error_state = pci_channel_io_frozen;
+ 
+ 	driver = eeh_pcid_get(dev);
+-	if (!driver) return NULL;
++	if (!driver) goto out_no_dev;
+ 
+ 	eeh_disable_irq(dev);
+ 
+ 	if (!driver->err_handler ||
+-	    !driver->err_handler->error_detected) {
+-		eeh_pcid_put(dev);
+-		return NULL;
+-	}
++	    !driver->err_handler->error_detected)
++		goto out;
+ 
+ 	rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
+ 
+@@ -227,8 +227,12 @@ static void *eeh_report_error(void *data, void *userdata)
+ 	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+ 
+ 	edev->in_error = true;
+-	eeh_pcid_put(dev);
+ 	pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
++
++out:
++	eeh_pcid_put(dev);
++out_no_dev:
++	device_unlock(&dev->dev);
+ 	return NULL;
+ }
+ 
+@@ -251,15 +255,14 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata)
+ 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
+ 		return NULL;
+ 
++	device_lock(&dev->dev);
+ 	driver = eeh_pcid_get(dev);
+-	if (!driver) return NULL;
++	if (!driver) goto out_no_dev;
+ 
+ 	if (!driver->err_handler ||
+ 	    !driver->err_handler->mmio_enabled ||
+-	    (edev->mode & EEH_DEV_NO_HANDLER)) {
+-		eeh_pcid_put(dev);
+-		return NULL;
+-	}
++	    (edev->mode & EEH_DEV_NO_HANDLER))
++		goto out;
+ 
+ 	rc = driver->err_handler->mmio_enabled(dev);
+ 
+@@ -267,7 +270,10 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata)
+ 	if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+ 	if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+ 
++out:
+ 	eeh_pcid_put(dev);
++out_no_dev:
++	device_unlock(&dev->dev);
+ 	return NULL;
+ }
+ 
+@@ -290,20 +296,20 @@ static void *eeh_report_reset(void *data, void *userdata)
+ 
+ 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
+ 		return NULL;
++
++	device_lock(&dev->dev);
+ 	dev->error_state = pci_channel_io_normal;
+ 
+ 	driver = eeh_pcid_get(dev);
+-	if (!driver) return NULL;
++	if (!driver) goto out_no_dev;
+ 
+ 	eeh_enable_irq(dev);
+ 
+ 	if (!driver->err_handler ||
+ 	    !driver->err_handler->slot_reset ||
+ 	    (edev->mode & EEH_DEV_NO_HANDLER) ||
+-	    (!edev->in_error)) {
+-		eeh_pcid_put(dev);
+-		return NULL;
+-	}
++	    (!edev->in_error))
++		goto out;
+ 
+ 	rc = driver->err_handler->slot_reset(dev);
+ 	if ((*res == PCI_ERS_RESULT_NONE) ||
+@@ -311,7 +317,10 @@ static void *eeh_report_reset(void *data, void *userdata)
+ 	if (*res == PCI_ERS_RESULT_DISCONNECT &&
+ 	     rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+ 
++out:
+ 	eeh_pcid_put(dev);
++out_no_dev:
++	device_unlock(&dev->dev);
+ 	return NULL;
+ }
+ 
+@@ -362,10 +371,12 @@ static void *eeh_report_resume(void *data, void *userdata)
+ 
+ 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
+ 		return NULL;
++
++	device_lock(&dev->dev);
+ 	dev->error_state = pci_channel_io_normal;
+ 
+ 	driver = eeh_pcid_get(dev);
+-	if (!driver) return NULL;
++	if (!driver) goto out_no_dev;
+ 
+ 	was_in_error = edev->in_error;
+ 	edev->in_error = false;
+@@ -375,18 +386,20 @@ static void *eeh_report_resume(void *data, void *userdata)
+ 	    !driver->err_handler->resume ||
+ 	    (edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) {
+ 		edev->mode &= ~EEH_DEV_NO_HANDLER;
+-		eeh_pcid_put(dev);
+-		return NULL;
++		goto out;
+ 	}
+ 
+ 	driver->err_handler->resume(dev);
+ 
+-	eeh_pcid_put(dev);
+ 	pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
++out:
++	eeh_pcid_put(dev);
+ #ifdef CONFIG_PCI_IOV
+ 	if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev))
+ 		eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
+ #endif
++out_no_dev:
++	device_unlock(&dev->dev);
+ 	return NULL;
+ }
+ 
+@@ -406,23 +419,26 @@ static void *eeh_report_failure(void *data, void *userdata)
+ 
+ 	if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
+ 		return NULL;
++
++	device_lock(&dev->dev);
+ 	dev->error_state = pci_channel_io_perm_failure;
+ 
+ 	driver = eeh_pcid_get(dev);
+-	if (!driver) return NULL;
++	if (!driver) goto out_no_dev;
+ 
+ 	eeh_disable_irq(dev);
+ 
+ 	if (!driver->err_handler ||
+-	    !driver->err_handler->error_detected) {
+-		eeh_pcid_put(dev);
+-		return NULL;
+-	}
++	    !driver->err_handler->error_detected)
++		goto out;
+ 
+ 	driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
+ 
+-	eeh_pcid_put(dev);
+ 	pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
++out:
++	eeh_pcid_put(dev);
++out_no_dev:
++	device_unlock(&dev->dev);
+ 	return NULL;
+ }
+ 
+diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
+index 2d4956e97aa9..ee5a67d57aab 100644
+--- a/arch/powerpc/kernel/eeh_pe.c
++++ b/arch/powerpc/kernel/eeh_pe.c
+@@ -807,7 +807,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
+ 	eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
+ 
+ 	/* PCI Command: 0x4 */
+-	eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
++	eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
++			      PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ 
+ 	/* Check the PCIe link is ready */
+ 	eeh_bridge_check_link(edev);
+diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
+index ca5d5a081e75..e4c5bf33970b 100644
+--- a/arch/powerpc/kernel/kprobes.c
++++ b/arch/powerpc/kernel/kprobes.c
+@@ -455,29 +455,33 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+ 	}
+ 
+ 	kretprobe_assert(ri, orig_ret_address, trampoline_address);
+-	regs->nip = orig_ret_address;
++
+ 	/*
+-	 * Make LR point to the orig_ret_address.
+-	 * When the 'nop' inside the kretprobe_trampoline
+-	 * is optimized, we can do a 'blr' after executing the
+-	 * detour buffer code.
++	 * We get here through one of two paths:
++	 * 1. by taking a trap -> kprobe_handler() -> here
++	 * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
++	 *
++	 * When going back through (1), we need regs->nip to be setup properly
++	 * as it is used to determine the return address from the trap.
++	 * For (2), since nip is not honoured with optprobes, we instead setup
++	 * the link register properly so that the subsequent 'blr' in
++	 * kretprobe_trampoline jumps back to the right instruction.
++	 *
++	 * For nip, we should set the address to the previous instruction since
++	 * we end up emulating it in kprobe_handler(), which increments the nip
++	 * again.
+ 	 */
++	regs->nip = orig_ret_address - 4;
+ 	regs->link = orig_ret_address;
+ 
+-	reset_current_kprobe();
+ 	kretprobe_hash_unlock(current, &flags);
+-	preempt_enable_no_resched();
+ 
+ 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+ 		hlist_del(&ri->hlist);
+ 		kfree(ri);
+ 	}
+-	/*
+-	 * By returning a non-zero value, we are telling
+-	 * kprobe_handler() that we don't want the post_handler
+-	 * to run (and have re-enabled preemption)
+-	 */
+-	return 1;
++
++	return 0;
+ }
+ NOKPROBE_SYMBOL(trampoline_probe_handler);
+ 
+diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c
+index e4395f937d63..45e0b7d5f200 100644
+--- a/arch/powerpc/kernel/machine_kexec_file_64.c
++++ b/arch/powerpc/kernel/machine_kexec_file_64.c
+@@ -43,7 +43,7 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+ 
+ 	/* We don't support crash kernels yet. */
+ 	if (image->type == KEXEC_TYPE_CRASH)
+-		return -ENOTSUPP;
++		return -EOPNOTSUPP;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
+ 		fops = kexec_file_loaders[i];
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index 73697c4e3468..f61ff5a6bddb 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
+ 		unsigned int *target = (unsigned int *)branch_target(src);
+ 
+ 		/* Branch within the section doesn't need translating */
+-		if (target < alt_start || target >= alt_end) {
++		if (target < alt_start || target > alt_end) {
+ 			instr = translate_branch(dest, src);
+ 			if (!instr)
+ 				return 1;
+diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
+index cf290d415dcd..1d388a0e1746 100644
+--- a/arch/powerpc/mm/hash_utils_64.c
++++ b/arch/powerpc/mm/hash_utils_64.c
+@@ -875,6 +875,12 @@ static void __init htab_initialize(void)
+ 		/* Using a hypervisor which owns the htab */
+ 		htab_address = NULL;
+ 		_SDR1 = 0; 
++		/*
++		 * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall
++		 * to inform the hypervisor that we wish to use the HPT.
++		 */
++		if (cpu_has_feature(CPU_FTR_ARCH_300))
++			register_process_table(0, 0, 0);
+ #ifdef CONFIG_FA_DUMP
+ 		/*
+ 		 * If firmware assisted dump is active firmware preserves
+diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
+index a07f5372a4bf..9ab051155af3 100644
+--- a/arch/powerpc/mm/tlb-radix.c
++++ b/arch/powerpc/mm/tlb-radix.c
+@@ -33,13 +33,12 @@ static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
+ {
+ 	unsigned long rb;
+ 	unsigned long rs;
+-	unsigned int r = 1; /* radix format */
+ 
+ 	rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
+ 	rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
+ 
+-	asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
+-		     : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
++	asm volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
++		     : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
+ 		     : "memory");
+ }
+ 
+diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
+index 9db4398ded5d..1bceb95f422d 100644
+--- a/arch/powerpc/platforms/powernv/opal-nvram.c
++++ b/arch/powerpc/platforms/powernv/opal-nvram.c
+@@ -11,6 +11,7 @@
+ 
+ #define DEBUG
+ 
++#include <linux/delay.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/of.h>
+@@ -56,9 +57,17 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
+ 
+ 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ 		rc = opal_write_nvram(__pa(buf), count, off);
+-		if (rc == OPAL_BUSY_EVENT)
++		if (rc == OPAL_BUSY_EVENT) {
++			msleep(OPAL_BUSY_DELAY_MS);
+ 			opal_poll_events(NULL);
++		} else if (rc == OPAL_BUSY) {
++			msleep(OPAL_BUSY_DELAY_MS);
++		}
+ 	}
++
++	if (rc)
++		return -EIO;
++
+ 	*index += count;
+ 	return count;
+ }
+diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
+index 0ee4a469a4ae..d11f3c14c21e 100644
+--- a/arch/powerpc/platforms/pseries/lpar.c
++++ b/arch/powerpc/platforms/pseries/lpar.c
+@@ -726,15 +726,18 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
+ 	return 0;
+ }
+ 
+-/* Actually only used for radix, so far */
+ static int pseries_lpar_register_process_table(unsigned long base,
+ 			unsigned long page_size, unsigned long table_size)
+ {
+ 	long rc;
+-	unsigned long flags = PROC_TABLE_NEW;
++	unsigned long flags = 0;
+ 
++	if (table_size)
++		flags |= PROC_TABLE_NEW;
+ 	if (radix_enabled())
+ 		flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE;
++	else
++		flags |= PROC_TABLE_HPT_SLB;
+ 	for (;;) {
+ 		rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
+ 					page_size, table_size);
+@@ -760,6 +763,7 @@ void __init hpte_init_pseries(void)
+ 	mmu_hash_ops.flush_hash_range	 = pSeries_lpar_flush_hash_range;
+ 	mmu_hash_ops.hpte_clear_all      = pseries_hpte_clear_all;
+ 	mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
++	register_process_table		 = pseries_lpar_register_process_table;
+ 
+ 	if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
+ 		mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
+diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
+index d22aeb0b69e1..b48454be5b98 100644
+--- a/arch/powerpc/sysdev/xive/native.c
++++ b/arch/powerpc/sysdev/xive/native.c
+@@ -389,6 +389,10 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
+ 	if (xive_pool_vps == XIVE_INVALID_VP)
+ 		return;
+ 
++	/* Check if pool VP already active, if it is, pull it */
++	if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
++		in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
++
+ 	/* Enable the pool VP */
+ 	vp = xive_pool_vps + cpu;
+ 	pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index eaee7087886f..6e91e0d422ea 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -289,12 +289,12 @@ config MARCH_Z13
+ 	  older machines.
+ 
+ config MARCH_Z14
+-	bool "IBM z14"
++	bool "IBM z14 ZR1 and z14"
+ 	select HAVE_MARCH_Z14_FEATURES
+ 	help
+-	  Select this to enable optimizations for IBM z14 (3906 series).
+-	  The kernel will be slightly faster but will not work on older
+-	  machines.
++	  Select this to enable optimizations for IBM z14 ZR1 and z14 (3907
++	  and 3906 series). The kernel will be slightly faster but will not
++	  work on older machines.
+ 
+ endchoice
+ 
+diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
+index 43bbe63e2992..06b513d192b9 100644
+--- a/arch/s390/hypfs/inode.c
++++ b/arch/s390/hypfs/inode.c
+@@ -320,7 +320,7 @@ static void hypfs_kill_super(struct super_block *sb)
+ 
+ 	if (sb->s_root)
+ 		hypfs_delete_tree(sb->s_root);
+-	if (sb_info->update_file)
++	if (sb_info && sb_info->update_file)
+ 		hypfs_remove(sb_info->update_file);
+ 	kfree(sb->s_fs_info);
+ 	sb->s_fs_info = NULL;
+diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
+index c5bc3f209652..5ee27dc9a10c 100644
+--- a/arch/s390/kernel/perf_cpum_cf_events.c
++++ b/arch/s390/kernel/perf_cpum_cf_events.c
+@@ -583,6 +583,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
+ 		model = cpumcf_z13_pmu_event_attr;
+ 		break;
+ 	case 0x3906:
++	case 0x3907:
+ 		model = cpumcf_z14_pmu_event_attr;
+ 		break;
+ 	default:
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index a6a91f01a17a..ce5ff4c4d435 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -819,6 +819,7 @@ static int __init setup_hwcaps(void)
+ 		strcpy(elf_platform, "z13");
+ 		break;
+ 	case 0x3906:
++	case 0x3907:
+ 		strcpy(elf_platform, "z14");
+ 		break;
+ 	}
+diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
+index 2db18cbbb0ea..c0197097c86e 100644
+--- a/arch/um/os-Linux/file.c
++++ b/arch/um/os-Linux/file.c
+@@ -12,6 +12,7 @@
+ #include <sys/mount.h>
+ #include <sys/socket.h>
+ #include <sys/stat.h>
++#include <sys/sysmacros.h>
+ #include <sys/un.h>
+ #include <sys/types.h>
+ #include <os.h>
+diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
+index a86d7cc2c2d8..bf0acb8aad8b 100644
+--- a/arch/um/os-Linux/signal.c
++++ b/arch/um/os-Linux/signal.c
+@@ -16,6 +16,7 @@
+ #include <os.h>
+ #include <sysdep/mcontext.h>
+ #include <um_malloc.h>
++#include <sys/ucontext.h>
+ 
+ void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
+ 	[SIGTRAP]	= relay_signal,
+@@ -159,7 +160,7 @@ static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
+ 
+ static void hard_handler(int sig, siginfo_t *si, void *p)
+ {
+-	struct ucontext *uc = p;
++	ucontext_t *uc = p;
+ 	mcontext_t *mc = &uc->uc_mcontext;
+ 	unsigned long pending = 1UL << sig;
+ 
+diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c
+index 1518d2805ae8..27361cbb7ca9 100644
+--- a/arch/x86/um/stub_segv.c
++++ b/arch/x86/um/stub_segv.c
+@@ -6,11 +6,12 @@
+ #include <sysdep/stub.h>
+ #include <sysdep/faultinfo.h>
+ #include <sysdep/mcontext.h>
++#include <sys/ucontext.h>
+ 
+ void __attribute__ ((__section__ (".__syscall_stub")))
+ stub_segv_handler(int sig, siginfo_t *info, void *p)
+ {
+-	struct ucontext *uc = p;
++	ucontext_t *uc = p;
+ 
+ 	GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
+ 			      &uc->uc_mcontext);
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 3c2c2530737e..c36d23aa6c35 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -1259,10 +1259,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
+ 	 */
+ 	__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
+ 
+-	/* Work out if we support NX */
+-	get_cpu_cap(&boot_cpu_data);
+-	x86_configure_nx();
+-
+ 	/* Get mfn list */
+ 	xen_build_dynamic_phys_to_machine();
+ 
+@@ -1272,6 +1268,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
+ 	 */
+ 	xen_setup_gdt(0);
+ 
++	/* Work out if we support NX */
++	get_cpu_cap(&boot_cpu_data);
++	x86_configure_nx();
++
+ 	xen_init_irq_ops();
+ 
+ 	/* Let's presume PV guests always boot on vCPU with id 0. */
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index 9f8f39d49396..984ec6b288df 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -196,7 +196,7 @@ static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd
+ 		 * In the _LSI, _LSR, _LSW case the locked status is
+ 		 * communicated via the read/write commands
+ 		 */
+-		if (nfit_mem->has_lsi)
++		if (nfit_mem->has_lsr)
+ 			break;
+ 
+ 		if (status >> 16 & ND_CONFIG_LOCKED)
+@@ -483,7 +483,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ 			min_t(u32, 256, in_buf.buffer.length), true);
+ 
+ 	/* call the BIOS, prefer the named methods over _DSM if available */
+-	if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsi)
++	if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsr)
+ 		out_obj = acpi_label_info(handle);
+ 	else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) {
+ 		struct nd_cmd_get_config_data_hdr *p = buf;
+@@ -1250,8 +1250,11 @@ static ssize_t scrub_show(struct device *dev,
+ 	if (nd_desc) {
+ 		struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
+ 
++		mutex_lock(&acpi_desc->init_mutex);
+ 		rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
+-				(work_busy(&acpi_desc->work)) ? "+\n" : "\n");
++				work_busy(&acpi_desc->work)
++				&& !acpi_desc->cancel ? "+\n" : "\n");
++		mutex_unlock(&acpi_desc->init_mutex);
+ 	}
+ 	device_unlock(dev);
+ 	return rc;
+@@ -1654,12 +1657,23 @@ static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
+ 	device_unlock(dev->parent);
+ }
+ 
++static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
++{
++	acpi_handle handle;
++	acpi_status status;
++
++	status = acpi_get_handle(adev->handle, method, &handle);
++
++	if (ACPI_SUCCESS(status))
++		return true;
++	return false;
++}
++
+ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
+ 		struct nfit_mem *nfit_mem, u32 device_handle)
+ {
+ 	struct acpi_device *adev, *adev_dimm;
+ 	struct device *dev = acpi_desc->dev;
+-	union acpi_object *obj;
+ 	unsigned long dsm_mask;
+ 	const guid_t *guid;
+ 	int i;
+@@ -1732,25 +1746,15 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
+ 					1ULL << i))
+ 			set_bit(i, &nfit_mem->dsm_mask);
+ 
+-	obj = acpi_label_info(adev_dimm->handle);
+-	if (obj) {
+-		ACPI_FREE(obj);
+-		nfit_mem->has_lsi = 1;
+-		dev_dbg(dev, "%s: has _LSI\n", dev_name(&adev_dimm->dev));
+-	}
+-
+-	obj = acpi_label_read(adev_dimm->handle, 0, 0);
+-	if (obj) {
+-		ACPI_FREE(obj);
+-		nfit_mem->has_lsr = 1;
++	if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
++			&& acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
+ 		dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
++		nfit_mem->has_lsr = true;
+ 	}
+ 
+-	obj = acpi_label_write(adev_dimm->handle, 0, 0, NULL);
+-	if (obj) {
+-		ACPI_FREE(obj);
+-		nfit_mem->has_lsw = 1;
++	if (nfit_mem->has_lsr && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
+ 		dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
++		nfit_mem->has_lsw = true;
+ 	}
+ 
+ 	return 0;
+@@ -1839,10 +1843,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
+ 			cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
+ 		}
+ 
+-		if (nfit_mem->has_lsi)
++		if (nfit_mem->has_lsr) {
+ 			set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
+-		if (nfit_mem->has_lsr)
+ 			set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
++		}
+ 		if (nfit_mem->has_lsw)
+ 			set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
+ 
+@@ -2579,7 +2583,7 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
+ 	struct acpi_nfit_system_address *spa = nfit_spa->spa;
+ 	struct nd_blk_region_desc *ndbr_desc;
+ 	struct nfit_mem *nfit_mem;
+-	int blk_valid = 0, rc;
++	int rc;
+ 
+ 	if (!nvdimm) {
+ 		dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
+@@ -2599,15 +2603,14 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
+ 		if (!nfit_mem || !nfit_mem->bdw) {
+ 			dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
+ 					spa->range_index, nvdimm_name(nvdimm));
+-		} else {
+-			mapping->size = nfit_mem->bdw->capacity;
+-			mapping->start = nfit_mem->bdw->start_address;
+-			ndr_desc->num_lanes = nfit_mem->bdw->windows;
+-			blk_valid = 1;
++			break;
+ 		}
+ 
++		mapping->size = nfit_mem->bdw->capacity;
++		mapping->start = nfit_mem->bdw->start_address;
++		ndr_desc->num_lanes = nfit_mem->bdw->windows;
+ 		ndr_desc->mapping = mapping;
+-		ndr_desc->num_mappings = blk_valid;
++		ndr_desc->num_mappings = 1;
+ 		ndbr_desc = to_blk_region_desc(ndr_desc);
+ 		ndbr_desc->enable = acpi_nfit_blk_region_enable;
+ 		ndbr_desc->do_io = acpi_desc->blk_do_io;
+diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
+index 50d36e166d70..ac9c49463731 100644
+--- a/drivers/acpi/nfit/nfit.h
++++ b/drivers/acpi/nfit/nfit.h
+@@ -171,9 +171,8 @@ struct nfit_mem {
+ 	struct resource *flush_wpq;
+ 	unsigned long dsm_mask;
+ 	int family;
+-	u32 has_lsi:1;
+-	u32 has_lsr:1;
+-	u32 has_lsw:1;
++	bool has_lsr;
++	bool has_lsw;
+ };
+ 
+ struct acpi_nfit_desc {
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 601e5d372887..43587ac680e4 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -219,6 +219,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
+ 			  "3570R/370R/470R/450R/510R/4450RV"),
+ 		},
+ 	},
++	{
++	 /* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */
++	 .callback = video_detect_force_video,
++	 .ident = "SAMSUNG 670Z5E",
++	 .matches = {
++		DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
++		DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"),
++		},
++	},
+ 	{
+ 	 /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
+ 	 .callback = video_detect_force_video,
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index ee302ccdfbc8..453116fd4362 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1831,7 +1831,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
+ 		return -EINVAL;
+ 	if (val_len % map->format.val_bytes)
+ 		return -EINVAL;
+-	if (map->max_raw_write && map->max_raw_write > val_len)
++	if (map->max_raw_write && map->max_raw_write < val_len)
+ 		return -E2BIG;
+ 
+ 	map->lock(map->lock_arg);
+diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
+index 1a81f6b8c2ce..942ac63f0b12 100644
+--- a/drivers/bluetooth/hci_bcm.c
++++ b/drivers/bluetooth/hci_bcm.c
+@@ -126,6 +126,10 @@ struct bcm_data {
+ static DEFINE_MUTEX(bcm_device_lock);
+ static LIST_HEAD(bcm_device_list);
+ 
++static int irq_polarity = -1;
++module_param(irq_polarity, int, 0444);
++MODULE_PARM_DESC(irq_polarity, "IRQ polarity 0: active-high 1: active-low");
++
+ static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
+ {
+ 	if (hu->serdev)
+@@ -975,11 +979,17 @@ static int bcm_acpi_probe(struct bcm_device *dev)
+ 	}
+ 	acpi_dev_free_resource_list(&resources);
+ 
+-	dmi_id = dmi_first_match(bcm_active_low_irq_dmi_table);
+-	if (dmi_id) {
+-		dev_warn(dev->dev, "%s: Overwriting IRQ polarity to active low",
+-			    dmi_id->ident);
+-		dev->irq_active_low = true;
++	if (irq_polarity != -1) {
++		dev->irq_active_low = irq_polarity;
++		dev_warn(dev->dev, "Overwriting IRQ polarity to active %s by module-param\n",
++			 dev->irq_active_low ? "low" : "high");
++	} else {
++		dmi_id = dmi_first_match(bcm_active_low_irq_dmi_table);
++		if (dmi_id) {
++			dev_warn(dev->dev, "%s: Overwriting IRQ polarity to active low",
++				 dmi_id->ident);
++			dev->irq_active_low = true;
++		}
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index e5b3d3ba4660..38729baed6ee 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -427,8 +427,9 @@ struct crng_state primary_crng = {
+  * its value (from 0->1->2).
+  */
+ static int crng_init = 0;
+-#define crng_ready() (likely(crng_init > 0))
++#define crng_ready() (likely(crng_init > 1))
+ static int crng_init_cnt = 0;
++static unsigned long crng_global_init_time = 0;
+ #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
+ static void _extract_crng(struct crng_state *crng,
+ 			  __u32 out[CHACHA20_BLOCK_WORDS]);
+@@ -732,7 +733,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
+ 
+ static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+ {
+-	const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
++	const int nbits_max = r->poolinfo->poolwords * 32;
+ 
+ 	if (nbits < 0)
+ 		return -EINVAL;
+@@ -786,6 +787,10 @@ static void crng_initialize(struct crng_state *crng)
+ 	crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+ }
+ 
++/*
++ * crng_fast_load() can be called by code in the interrupt service
++ * path.  So we can't afford to dilly-dally.
++ */
+ static int crng_fast_load(const char *cp, size_t len)
+ {
+ 	unsigned long flags;
+@@ -793,7 +798,7 @@ static int crng_fast_load(const char *cp, size_t len)
+ 
+ 	if (!spin_trylock_irqsave(&primary_crng.lock, flags))
+ 		return 0;
+-	if (crng_ready()) {
++	if (crng_init != 0) {
+ 		spin_unlock_irqrestore(&primary_crng.lock, flags);
+ 		return 0;
+ 	}
+@@ -812,6 +817,51 @@ static int crng_fast_load(const char *cp, size_t len)
+ 	return 1;
+ }
+ 
++/*
++ * crng_slow_load() is called by add_device_randomness, which has two
++ * attributes.  (1) We can't trust the buffer passed to it is
++ * guaranteed to be unpredictable (so it might not have any entropy at
++ * all), and (2) it doesn't have the performance constraints of
++ * crng_fast_load().
++ *
++ * So we do something more comprehensive which is guaranteed to touch
++ * all of the primary_crng's state, and which uses a LFSR with a
++ * period of 255 as part of the mixing algorithm.  Finally, we do
++ * *not* advance crng_init_cnt since buffer we may get may be something
++ * like a fixed DMI table (for example), which might very well be
++ * unique to the machine, but is otherwise unvarying.
++ */
++static int crng_slow_load(const char *cp, size_t len)
++{
++	unsigned long		flags;
++	static unsigned char	lfsr = 1;
++	unsigned char		tmp;
++	unsigned		i, max = CHACHA20_KEY_SIZE;
++	const char *		src_buf = cp;
++	char *			dest_buf = (char *) &primary_crng.state[4];
++
++	if (!spin_trylock_irqsave(&primary_crng.lock, flags))
++		return 0;
++	if (crng_init != 0) {
++		spin_unlock_irqrestore(&primary_crng.lock, flags);
++		return 0;
++	}
++	if (len > max)
++		max = len;
++
++	for (i = 0; i < max ; i++) {
++		tmp = lfsr;
++		lfsr >>= 1;
++		if (tmp & 1)
++			lfsr ^= 0xE1;
++		tmp = dest_buf[i % CHACHA20_KEY_SIZE];
++		dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
++		lfsr += (tmp << 3) | (tmp >> 5);
++	}
++	spin_unlock_irqrestore(&primary_crng.lock, flags);
++	return 1;
++}
++
+ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
+ {
+ 	unsigned long	flags;
+@@ -830,7 +880,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
+ 		_crng_backtrack_protect(&primary_crng, buf.block,
+ 					CHACHA20_KEY_SIZE);
+ 	}
+-	spin_lock_irqsave(&primary_crng.lock, flags);
++	spin_lock_irqsave(&crng->lock, flags);
+ 	for (i = 0; i < 8; i++) {
+ 		unsigned long	rv;
+ 		if (!arch_get_random_seed_long(&rv) &&
+@@ -840,7 +890,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
+ 	}
+ 	memzero_explicit(&buf, sizeof(buf));
+ 	crng->init_time = jiffies;
+-	spin_unlock_irqrestore(&primary_crng.lock, flags);
++	spin_unlock_irqrestore(&crng->lock, flags);
+ 	if (crng == &primary_crng && crng_init < 2) {
+ 		invalidate_batched_entropy();
+ 		crng_init = 2;
+@@ -855,8 +905,9 @@ static void _extract_crng(struct crng_state *crng,
+ {
+ 	unsigned long v, flags;
+ 
+-	if (crng_init > 1 &&
+-	    time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))
++	if (crng_ready() &&
++	    (time_after(crng_global_init_time, crng->init_time) ||
++	     time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
+ 		crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
+ 	spin_lock_irqsave(&crng->lock, flags);
+ 	if (arch_get_random_long(&v))
+@@ -981,10 +1032,8 @@ void add_device_randomness(const void *buf, unsigned int size)
+ 	unsigned long time = random_get_entropy() ^ jiffies;
+ 	unsigned long flags;
+ 
+-	if (!crng_ready()) {
+-		crng_fast_load(buf, size);
+-		return;
+-	}
++	if (!crng_ready() && size)
++		crng_slow_load(buf, size);
+ 
+ 	trace_add_device_randomness(size, _RET_IP_);
+ 	spin_lock_irqsave(&input_pool.lock, flags);
+@@ -1141,7 +1190,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
+ 	fast_mix(fast_pool);
+ 	add_interrupt_bench(cycles);
+ 
+-	if (!crng_ready()) {
++	if (unlikely(crng_init == 0)) {
+ 		if ((fast_pool->count >= 64) &&
+ 		    crng_fast_load((char *) fast_pool->pool,
+ 				   sizeof(fast_pool->pool))) {
+@@ -1691,6 +1740,7 @@ static int rand_initialize(void)
+ 	init_std_data(&input_pool);
+ 	init_std_data(&blocking_pool);
+ 	crng_initialize(&primary_crng);
++	crng_global_init_time = jiffies;
+ 
+ #ifdef CONFIG_NUMA
+ 	pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+@@ -1877,6 +1927,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+ 		input_pool.entropy_count = 0;
+ 		blocking_pool.entropy_count = 0;
+ 		return 0;
++	case RNDRESEEDCRNG:
++		if (!capable(CAP_SYS_ADMIN))
++			return -EPERM;
++		if (crng_init < 2)
++			return -ENODATA;
++		crng_reseed(&primary_crng, NULL);
++		crng_global_init_time = jiffies - 1;
++		return 0;
+ 	default:
+ 		return -EINVAL;
+ 	}
+@@ -2214,7 +2272,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
+ {
+ 	struct entropy_store *poolp = &input_pool;
+ 
+-	if (!crng_ready()) {
++	if (unlikely(crng_init == 0)) {
+ 		crng_fast_load(buffer, count);
+ 		return;
+ 	}
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index 9e80a953d693..248c04090dea 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -969,6 +969,10 @@ int tpm_do_selftest(struct tpm_chip *chip)
+ 	loops = jiffies_to_msecs(duration) / delay_msec;
+ 
+ 	rc = tpm_continue_selftest(chip);
++	if (rc == TPM_ERR_INVALID_POSTINIT) {
++		chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
++		dev_info(&chip->dev, "TPM not ready (%d)\n", rc);
++	}
+ 	/* This may fail if there was no TPM driver during a suspend/resume
+ 	 * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST)
+ 	 */
+diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
+index a07f6451694a..fa0d5c8611a0 100644
+--- a/drivers/clk/bcm/clk-bcm2835.c
++++ b/drivers/clk/bcm/clk-bcm2835.c
+@@ -602,9 +602,7 @@ static void bcm2835_pll_off(struct clk_hw *hw)
+ 	const struct bcm2835_pll_data *data = pll->data;
+ 
+ 	spin_lock(&cprman->regs_lock);
+-	cprman_write(cprman, data->cm_ctrl_reg,
+-		     cprman_read(cprman, data->cm_ctrl_reg) |
+-		     CM_PLL_ANARST);
++	cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
+ 	cprman_write(cprman, data->a2w_ctrl_reg,
+ 		     cprman_read(cprman, data->a2w_ctrl_reg) |
+ 		     A2W_PLL_CTRL_PWRDN);
+@@ -640,6 +638,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
+ 		cpu_relax();
+ 	}
+ 
++	cprman_write(cprman, data->a2w_ctrl_reg,
++		     cprman_read(cprman, data->a2w_ctrl_reg) |
++		     A2W_PLL_CTRL_PRST_DISABLE);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
+index 8e7f16fd87c9..deca7527f92f 100644
+--- a/drivers/clk/mediatek/clk-mt2701.c
++++ b/drivers/clk/mediatek/clk-mt2701.c
+@@ -148,6 +148,7 @@ static const struct mtk_fixed_factor top_fixed_divs[] = {
+ 	FACTOR(CLK_TOP_CLK26M_D8, "clk26m_d8", "clk26m", 1, 8),
+ 	FACTOR(CLK_TOP_32K_INTERNAL, "32k_internal", "clk26m", 1, 793),
+ 	FACTOR(CLK_TOP_32K_EXTERNAL, "32k_external", "rtc32k", 1, 1),
++	FACTOR(CLK_TOP_AXISEL_D4, "axisel_d4", "axi_sel", 1, 4),
+ };
+ 
+ static const char * const axi_parents[] = {
+@@ -857,13 +858,13 @@ static const struct mtk_gate peri_clks[] = {
+ 	GATE_PERI0(CLK_PERI_USB1, "usb1_ck", "usb20_sel", 11),
+ 	GATE_PERI0(CLK_PERI_USB0, "usb0_ck", "usb20_sel", 10),
+ 	GATE_PERI0(CLK_PERI_PWM, "pwm_ck", "axi_sel", 9),
+-	GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axi_sel", 8),
+-	GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axi_sel", 7),
+-	GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axi_sel", 6),
+-	GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axi_sel", 5),
+-	GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axi_sel", 4),
+-	GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axi_sel", 3),
+-	GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axi_sel", 2),
++	GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axisel_d4", 8),
++	GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axisel_d4", 7),
++	GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axisel_d4", 6),
++	GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axisel_d4", 5),
++	GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axisel_d4", 4),
++	GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axisel_d4", 3),
++	GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axisel_d4", 2),
+ 	GATE_PERI0(CLK_PERI_THERM, "therm_ck", "axi_sel", 1),
+ 	GATE_PERI0(CLK_PERI_NFI, "nfi_ck", "nfi2x_sel", 0),
+ 
+diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
+index 394aa6f03f01..9ff4ea63932d 100644
+--- a/drivers/clk/mvebu/armada-38x.c
++++ b/drivers/clk/mvebu/armada-38x.c
+@@ -46,11 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem *sar)
+ }
+ 
+ static const u32 armada_38x_cpu_frequencies[] __initconst = {
+-	0, 0, 0, 0,
+-	1066 * 1000 * 1000, 0, 0, 0,
++	666 * 1000 * 1000,  0, 800 * 1000 * 1000, 0,
++	1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
+ 	1332 * 1000 * 1000, 0, 0, 0,
+ 	1600 * 1000 * 1000, 0, 0, 0,
+-	1866 * 1000 * 1000,
++	1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
+ };
+ 
+ static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
+@@ -76,11 +76,11 @@ static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = {
+ };
+ 
+ static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
+-	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+-	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+-	{1, 2}, {0, 1}, {0, 1}, {0, 1},
++	{1, 2}, {0, 1}, {1, 2}, {0, 1},
++	{1, 2}, {0, 1}, {1, 2}, {0, 1},
+ 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+ 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
++	{1, 2}, {0, 1}, {0, 1}, {1, 2},
+ 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+ 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+ 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+@@ -91,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
+ 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+ 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+ 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+-	{1, 2}, {0, 1}, {0, 1}, {0, 1},
++	{1, 2}, {0, 1}, {0, 1}, {7, 15},
+ 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+ 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+ 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
+index eea38f6ea77e..3892346c4fcc 100644
+--- a/drivers/clk/renesas/clk-sh73a0.c
++++ b/drivers/clk/renesas/clk-sh73a0.c
+@@ -46,7 +46,7 @@ struct div4_clk {
+ 	unsigned int shift;
+ };
+ 
+-static struct div4_clk div4_clks[] = {
++static const struct div4_clk div4_clks[] = {
+ 	{ "zg", "pll0", CPG_FRQCRA, 16 },
+ 	{ "m3", "pll1", CPG_FRQCRA, 12 },
+ 	{ "b",  "pll1", CPG_FRQCRA,  8 },
+@@ -79,7 +79,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
+ {
+ 	const struct clk_div_table *table = NULL;
+ 	unsigned int shift, reg, width;
+-	const char *parent_name;
++	const char *parent_name = NULL;
+ 	unsigned int mult = 1;
+ 	unsigned int div = 1;
+ 
+@@ -135,7 +135,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
+ 		shift = 24;
+ 		width = 5;
+ 	} else {
+-		struct div4_clk *c;
++		const struct div4_clk *c;
+ 
+ 		for (c = div4_clks; c->name; c++) {
+ 			if (!strcmp(name, c->name)) {
+diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
+index 11a5066e5c27..5234acd30e89 100644
+--- a/drivers/clk/tegra/clk-emc.c
++++ b/drivers/clk/tegra/clk-emc.c
+@@ -515,7 +515,7 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
+ 
+ 	init.name = "emc";
+ 	init.ops = &tegra_clk_emc_ops;
+-	init.flags = 0;
++	init.flags = CLK_IS_CRITICAL;
+ 	init.parent_names = emc_parent_clk_names;
+ 	init.num_parents = ARRAY_SIZE(emc_parent_clk_names);
+ 
+diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
+index c02711927d79..2acba2986bc6 100644
+--- a/drivers/clk/tegra/clk-tegra-periph.c
++++ b/drivers/clk/tegra/clk-tegra-periph.c
+@@ -830,7 +830,7 @@ static struct tegra_periph_init_data gate_clks[] = {
+ 	GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0),
+ 	GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0),
+ 	GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0),
+-	GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED),
++	GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IS_CRITICAL),
+ 	GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0),
+ 	GATE("ispa", "isp", 23, 0, tegra_clk_ispa, 0),
+ 	GATE("ispb", "isp", 3, 0, tegra_clk_ispb, 0),
+diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
+index 10047107c1dc..89d6b47a27a8 100644
+--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
++++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
+@@ -125,7 +125,8 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
+ 		/* SCLK */
+ 		dt_clk = tegra_lookup_dt_id(tegra_clk_sclk, tegra_clks);
+ 		if (dt_clk) {
+-			clk = clk_register_divider(NULL, "sclk", "sclk_mux", 0,
++			clk = clk_register_divider(NULL, "sclk", "sclk_mux",
++						CLK_IS_CRITICAL,
+ 						clk_base + SCLK_DIVIDER, 0, 8,
+ 						0, &sysrate_lock);
+ 			*dt_clk = clk;
+@@ -137,7 +138,8 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
+ 			clk = tegra_clk_register_super_mux("sclk",
+ 						gen_info->sclk_parents,
+ 						gen_info->num_sclk_parents,
+-						CLK_SET_RATE_PARENT,
++						CLK_SET_RATE_PARENT |
++						CLK_IS_CRITICAL,
+ 						clk_base + SCLK_BURST_POLICY,
+ 						0, 4, 0, 0, NULL);
+ 			*dt_clk = clk;
+@@ -151,7 +153,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
+ 				   clk_base + SYSTEM_CLK_RATE, 4, 2, 0,
+ 				   &sysrate_lock);
+ 		clk = clk_register_gate(NULL, "hclk", "hclk_div",
+-				CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
++				CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 				clk_base + SYSTEM_CLK_RATE,
+ 				7, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
+ 		*dt_clk = clk;
+diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
+index 63087d17c3e2..c3945c683f60 100644
+--- a/drivers/clk/tegra/clk-tegra114.c
++++ b/drivers/clk/tegra/clk-tegra114.c
+@@ -955,8 +955,7 @@ static void __init tegra114_pll_init(void __iomem *clk_base,
+ 
+ 	/* PLLM */
+ 	clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
+-			     CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+-			     &pll_m_params, NULL);
++			     CLK_SET_RATE_GATE, &pll_m_params, NULL);
+ 	clks[TEGRA114_CLK_PLL_M] = clk;
+ 
+ 	/* PLLM_OUT1 */
+diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
+index e81ea5b11577..230f9a2c1abf 100644
+--- a/drivers/clk/tegra/clk-tegra124.c
++++ b/drivers/clk/tegra/clk-tegra124.c
+@@ -1089,8 +1089,7 @@ static void __init tegra124_pll_init(void __iomem *clk_base,
+ 
+ 	/* PLLM */
+ 	clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
+-			     CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+-			     &pll_m_params, NULL);
++			     CLK_SET_RATE_GATE, &pll_m_params, NULL);
+ 	clk_register_clkdev(clk, "pll_m", NULL);
+ 	clks[TEGRA124_CLK_PLL_M] = clk;
+ 
+@@ -1099,7 +1098,7 @@ static void __init tegra124_pll_init(void __iomem *clk_base,
+ 				clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 				8, 8, 1, NULL);
+ 	clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
+-				clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
++				clk_base + PLLM_OUT, 1, 0,
+ 				CLK_SET_RATE_PARENT, 0, NULL);
+ 	clk_register_clkdev(clk, "pll_m_out1", NULL);
+ 	clks[TEGRA124_CLK_PLL_M_OUT1] = clk;
+@@ -1272,7 +1271,7 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
+ 	{ TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1 },
+ 	{ TEGRA124_CLK_DSIALP, TEGRA124_CLK_PLL_P, 68000000, 0 },
+ 	{ TEGRA124_CLK_DSIBLP, TEGRA124_CLK_PLL_P, 68000000, 0 },
+-	{ TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1 },
++	{ TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 0 },
+ 	{ TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1 },
+ 	{ TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1 },
+ 	{ TEGRA124_CLK_PLL_C, TEGRA124_CLK_CLK_MAX, 768000000, 0 },
+diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
+index cbd5a2e5c569..e3392ca2c2fc 100644
+--- a/drivers/clk/tegra/clk-tegra20.c
++++ b/drivers/clk/tegra/clk-tegra20.c
+@@ -576,6 +576,7 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
+ 	[tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
+ 	[tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true },
+ 	[tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true },
++	[tegra_clk_emc] = { .dt_id = TEGRA20_CLK_EMC, .present = true },
+ };
+ 
+ static unsigned long tegra20_clk_measure_input_freq(void)
+@@ -651,8 +652,7 @@ static void tegra20_pll_init(void)
+ 
+ 	/* PLLM */
+ 	clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, NULL,
+-			    CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+-			    &pll_m_params, NULL);
++			    CLK_SET_RATE_GATE, &pll_m_params, NULL);
+ 	clks[TEGRA20_CLK_PLL_M] = clk;
+ 
+ 	/* PLLM_OUT1 */
+@@ -660,7 +660,7 @@ static void tegra20_pll_init(void)
+ 				clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 				8, 8, 1, NULL);
+ 	clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
+-				clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
++				clk_base + PLLM_OUT, 1, 0,
+ 				CLK_SET_RATE_PARENT, 0, NULL);
+ 	clks[TEGRA20_CLK_PLL_M_OUT1] = clk;
+ 
+@@ -723,7 +723,8 @@ static void tegra20_super_clk_init(void)
+ 
+ 	/* SCLK */
+ 	clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+-			      ARRAY_SIZE(sclk_parents), CLK_SET_RATE_PARENT,
++			      ARRAY_SIZE(sclk_parents),
++			      CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 			      clk_base + SCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
+ 	clks[TEGRA20_CLK_SCLK] = clk;
+ 
+@@ -814,9 +815,6 @@ static void __init tegra20_periph_clk_init(void)
+ 			       CLK_SET_RATE_NO_REPARENT,
+ 			       clk_base + CLK_SOURCE_EMC,
+ 			       30, 2, 0, &emc_lock);
+-	clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
+-				    57, periph_clk_enb_refcnt);
+-	clks[TEGRA20_CLK_EMC] = clk;
+ 
+ 	clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
+ 				    &emc_lock);
+@@ -1019,13 +1017,12 @@ static struct tegra_clk_init_table init_table[] __initdata = {
+ 	{ TEGRA20_CLK_PLL_P_OUT2, TEGRA20_CLK_CLK_MAX, 48000000, 1 },
+ 	{ TEGRA20_CLK_PLL_P_OUT3, TEGRA20_CLK_CLK_MAX, 72000000, 1 },
+ 	{ TEGRA20_CLK_PLL_P_OUT4, TEGRA20_CLK_CLK_MAX, 24000000, 1 },
+-	{ TEGRA20_CLK_PLL_C, TEGRA20_CLK_CLK_MAX, 600000000, 1 },
+-	{ TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 216000000, 1 },
+-	{ TEGRA20_CLK_SCLK, TEGRA20_CLK_PLL_C_OUT1, 0, 1 },
+-	{ TEGRA20_CLK_HCLK, TEGRA20_CLK_CLK_MAX, 0, 1 },
+-	{ TEGRA20_CLK_PCLK, TEGRA20_CLK_CLK_MAX, 60000000, 1 },
++	{ TEGRA20_CLK_PLL_C, TEGRA20_CLK_CLK_MAX, 600000000, 0 },
++	{ TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 216000000, 0 },
++	{ TEGRA20_CLK_SCLK, TEGRA20_CLK_PLL_C_OUT1, 0, 0 },
++	{ TEGRA20_CLK_HCLK, TEGRA20_CLK_CLK_MAX, 0, 0 },
++	{ TEGRA20_CLK_PCLK, TEGRA20_CLK_CLK_MAX, 60000000, 0 },
+ 	{ TEGRA20_CLK_CSITE, TEGRA20_CLK_CLK_MAX, 0, 1 },
+-	{ TEGRA20_CLK_EMC, TEGRA20_CLK_CLK_MAX, 0, 1 },
+ 	{ TEGRA20_CLK_CCLK, TEGRA20_CLK_CLK_MAX, 0, 1 },
+ 	{ TEGRA20_CLK_UARTA, TEGRA20_CLK_PLL_P, 0, 0 },
+ 	{ TEGRA20_CLK_UARTB, TEGRA20_CLK_PLL_P, 0, 0 },
+diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
+index 9e6260869eb9..25cc6e0905be 100644
+--- a/drivers/clk/tegra/clk-tegra210.c
++++ b/drivers/clk/tegra/clk-tegra210.c
+@@ -3025,7 +3025,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
+ 	{ TEGRA210_CLK_I2S4, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 },
+ 	{ TEGRA210_CLK_HOST1X, TEGRA210_CLK_PLL_P, 136000000, 1 },
+ 	{ TEGRA210_CLK_SCLK_MUX, TEGRA210_CLK_PLL_P, 0, 1 },
+-	{ TEGRA210_CLK_SCLK, TEGRA210_CLK_CLK_MAX, 102000000, 1 },
++	{ TEGRA210_CLK_SCLK, TEGRA210_CLK_CLK_MAX, 102000000, 0 },
+ 	{ TEGRA210_CLK_DFLL_SOC, TEGRA210_CLK_PLL_P, 51000000, 1 },
+ 	{ TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
+ 	{ TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
+@@ -3040,7 +3040,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
+ 	{ TEGRA210_CLK_XUSB_DEV_SRC, TEGRA210_CLK_PLL_P_OUT_XUSB, 102000000, 0 },
+ 	{ TEGRA210_CLK_SATA, TEGRA210_CLK_PLL_P, 104000000, 0 },
+ 	{ TEGRA210_CLK_SATA_OOB, TEGRA210_CLK_PLL_P, 204000000, 0 },
+-	{ TEGRA210_CLK_EMC, TEGRA210_CLK_CLK_MAX, 0, 1 },
+ 	{ TEGRA210_CLK_MSELECT, TEGRA210_CLK_CLK_MAX, 0, 1 },
+ 	{ TEGRA210_CLK_CSITE, TEGRA210_CLK_CLK_MAX, 0, 1 },
+ 	/* TODO find a way to enable this on-demand */
+diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
+index bee84c554932..8428895ad475 100644
+--- a/drivers/clk/tegra/clk-tegra30.c
++++ b/drivers/clk/tegra/clk-tegra30.c
+@@ -819,6 +819,7 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
+ 	[tegra_clk_pll_a] = { .dt_id = TEGRA30_CLK_PLL_A, .present = true },
+ 	[tegra_clk_pll_a_out0] = { .dt_id = TEGRA30_CLK_PLL_A_OUT0, .present = true },
+ 	[tegra_clk_cec] = { .dt_id = TEGRA30_CLK_CEC, .present = true },
++	[tegra_clk_emc] = { .dt_id = TEGRA30_CLK_EMC, .present = true },
+ };
+ 
+ static const char *pll_e_parents[] = { "pll_ref", "pll_p" };
+@@ -843,8 +844,7 @@ static void __init tegra30_pll_init(void)
+ 
+ 	/* PLLM */
+ 	clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, pmc_base,
+-			    CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+-			    &pll_m_params, NULL);
++			    CLK_SET_RATE_GATE, &pll_m_params, NULL);
+ 	clks[TEGRA30_CLK_PLL_M] = clk;
+ 
+ 	/* PLLM_OUT1 */
+@@ -852,7 +852,7 @@ static void __init tegra30_pll_init(void)
+ 				clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
+ 				8, 8, 1, NULL);
+ 	clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
+-				clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
++				clk_base + PLLM_OUT, 1, 0,
+ 				CLK_SET_RATE_PARENT, 0, NULL);
+ 	clks[TEGRA30_CLK_PLL_M_OUT1] = clk;
+ 
+@@ -990,7 +990,7 @@ static void __init tegra30_super_clk_init(void)
+ 	/* SCLK */
+ 	clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+ 				  ARRAY_SIZE(sclk_parents),
+-				  CLK_SET_RATE_PARENT,
++				  CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ 				  clk_base + SCLK_BURST_POLICY,
+ 				  0, 4, 0, 0, NULL);
+ 	clks[TEGRA30_CLK_SCLK] = clk;
+@@ -1060,9 +1060,6 @@ static void __init tegra30_periph_clk_init(void)
+ 			       CLK_SET_RATE_NO_REPARENT,
+ 			       clk_base + CLK_SOURCE_EMC,
+ 			       30, 2, 0, &emc_lock);
+-	clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
+-				    57, periph_clk_enb_refcnt);
+-	clks[TEGRA30_CLK_EMC] = clk;
+ 
+ 	clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
+ 				    &emc_lock);
+@@ -1252,10 +1249,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
+ 	{ TEGRA30_CLK_SDMMC1, TEGRA30_CLK_PLL_P, 48000000, 0 },
+ 	{ TEGRA30_CLK_SDMMC2, TEGRA30_CLK_PLL_P, 48000000, 0 },
+ 	{ TEGRA30_CLK_SDMMC3, TEGRA30_CLK_PLL_P, 48000000, 0 },
+-	{ TEGRA30_CLK_PLL_M, TEGRA30_CLK_CLK_MAX, 0, 1 },
+-	{ TEGRA30_CLK_PCLK, TEGRA30_CLK_CLK_MAX, 0, 1 },
+ 	{ TEGRA30_CLK_CSITE, TEGRA30_CLK_CLK_MAX, 0, 1 },
+-	{ TEGRA30_CLK_EMC, TEGRA30_CLK_CLK_MAX, 0, 1 },
+ 	{ TEGRA30_CLK_MSELECT, TEGRA30_CLK_CLK_MAX, 0, 1 },
+ 	{ TEGRA30_CLK_SBC1, TEGRA30_CLK_PLL_P, 100000000, 0 },
+ 	{ TEGRA30_CLK_SBC2, TEGRA30_CLK_PLL_P, 100000000, 0 },
+diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
+index c6ebc88a7d8d..72a2975499db 100644
+--- a/drivers/cpufreq/armada-37xx-cpufreq.c
++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
+@@ -202,6 +202,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
+ 	cur_frequency = clk_get_rate(clk);
+ 	if (!cur_frequency) {
+ 		dev_err(cpu_dev, "Failed to get clock rate for CPU\n");
++		clk_put(clk);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -210,6 +211,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
+ 		return -EINVAL;
+ 
+ 	armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
++	clk_put(clk);
+ 
+ 	for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
+ 	     load_lvl++) {
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index a1c3025f9df7..dcb1cb9a4572 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -20,6 +20,7 @@
+ #include <linux/cpu.h>
+ #include <linux/cpufreq.h>
+ #include <linux/dmi.h>
++#include <linux/time.h>
+ #include <linux/vmalloc.h>
+ 
+ #include <asm/unaligned.h>
+@@ -162,6 +163,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ 	policy->cpuinfo.max_freq = cppc_dmi_max_khz;
+ 
+ 	policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
++	policy->transition_delay_us = cppc_get_transition_latency(cpu_num) /
++		NSEC_PER_USEC;
+ 	policy->shared_type = cpu->shared_type;
+ 
+ 	if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+diff --git a/drivers/dax/device.c b/drivers/dax/device.c
+index 2137dbc29877..383779707404 100644
+--- a/drivers/dax/device.c
++++ b/drivers/dax/device.c
+@@ -19,6 +19,7 @@
+ #include <linux/dax.h>
+ #include <linux/fs.h>
+ #include <linux/mm.h>
++#include <linux/mman.h>
+ #include "dax-private.h"
+ #include "dax.h"
+ 
+@@ -534,6 +535,7 @@ static const struct file_operations dax_fops = {
+ 	.release = dax_release,
+ 	.get_unmapped_area = dax_get_unmapped_area,
+ 	.mmap = dax_mmap,
++	.mmap_supported_flags = MAP_SYNC,
+ };
+ 
+ static void dev_dax_release(struct device *dev)
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index c00e3923d7d8..94236ec9d410 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -1471,10 +1471,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 	for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
+ 		check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ 		rmb();
+-		initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
+-		rmb();
+ 		cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ 		rmb();
++		initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
++		rmb();
+ 		cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ 		rmb();
+ 
+diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
+index 7c4bc8c44c3f..b7e9ea377d70 100644
+--- a/drivers/extcon/extcon-intel-cht-wc.c
++++ b/drivers/extcon/extcon-intel-cht-wc.c
+@@ -66,6 +66,8 @@
+ 
+ #define CHT_WC_VBUS_GPIO_CTLO		0x6e2d
+ #define CHT_WC_VBUS_GPIO_CTLO_OUTPUT	BIT(0)
++#define CHT_WC_VBUS_GPIO_CTLO_DRV_OD	BIT(4)
++#define CHT_WC_VBUS_GPIO_CTLO_DIR_OUT	BIT(5)
+ 
+ enum cht_wc_usb_id {
+ 	USB_ID_OTG,
+@@ -183,14 +185,15 @@ static void cht_wc_extcon_set_5v_boost(struct cht_wc_extcon_data *ext,
+ {
+ 	int ret, val;
+ 
+-	val = enable ? CHT_WC_VBUS_GPIO_CTLO_OUTPUT : 0;
+-
+ 	/*
+ 	 * The 5V boost converter is enabled through a gpio on the PMIC, since
+ 	 * there currently is no gpio driver we access the gpio reg directly.
+ 	 */
+-	ret = regmap_update_bits(ext->regmap, CHT_WC_VBUS_GPIO_CTLO,
+-				 CHT_WC_VBUS_GPIO_CTLO_OUTPUT, val);
++	val = CHT_WC_VBUS_GPIO_CTLO_DRV_OD | CHT_WC_VBUS_GPIO_CTLO_DIR_OUT;
++	if (enable)
++		val |= CHT_WC_VBUS_GPIO_CTLO_OUTPUT;
++
++	ret = regmap_write(ext->regmap, CHT_WC_VBUS_GPIO_CTLO, val);
+ 	if (ret)
+ 		dev_err(ext->dev, "Error writing Vbus GPIO CTLO: %d\n", ret);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index c53095b3b0fb..1ae5ae8c45a4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -569,6 +569,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
+ 	{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ 	{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ 	{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
++	{ 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ 	{ 0, 0, 0, 0, 0 },
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 59089e027f4d..92be7f6de197 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -233,8 +233,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+ 	for (i = 0; i < list->num_entries; i++) {
+ 		unsigned priority = list->array[i].priority;
+ 
+-		list_add_tail(&list->array[i].tv.head,
+-			      &bucket[priority]);
++		if (!list->array[i].robj->parent)
++			list_add_tail(&list->array[i].tv.head,
++				      &bucket[priority]);
++
+ 		list->array[i].user_pages = NULL;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index e80fc38141b5..b03b2983de1e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -542,7 +542,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
+ 	INIT_LIST_HEAD(&duplicates);
+ 	amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
+ 
+-	if (p->uf_entry.robj)
++	if (p->uf_entry.robj && !p->uf_entry.robj->parent)
+ 		list_add(&p->uf_entry.tv.head, &p->validated);
+ 
+ 	while (1) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+index 6e8278e689b1..0066da3e79bb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+@@ -866,7 +866,7 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ 	amdgpu_ring_write(ring, addr & 0xfffffffc);
+ 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ 	amdgpu_ring_write(ring, seq); /* reference */
+-	amdgpu_ring_write(ring, 0xfffffff); /* mask */
++	amdgpu_ring_write(ring, 0xffffffff); /* mask */
+ 	amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+index d4787ad4d346..bd844edad6b7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+@@ -844,7 +844,7 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ 	amdgpu_ring_write(ring, addr & 0xfffffffc);
+ 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ 	amdgpu_ring_write(ring, seq); /* reference */
+-	amdgpu_ring_write(ring, 0xfffffff); /* mask */
++	amdgpu_ring_write(ring, 0xffffffff); /* mask */
+ 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+index 521978c40537..fa63c564cf91 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+@@ -1110,7 +1110,7 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ 	amdgpu_ring_write(ring, addr & 0xfffffffc);
+ 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ 	amdgpu_ring_write(ring, seq); /* reference */
+-	amdgpu_ring_write(ring, 0xfffffff); /* mask */
++	amdgpu_ring_write(ring, 0xffffffff); /* mask */
+ 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+index 91cf95a8c39c..036798b52f67 100644
+--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+@@ -1113,7 +1113,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+ 	amdgpu_ring_write(ring, addr & 0xfffffffc);
+ 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ 	amdgpu_ring_write(ring, seq); /* reference */
+-	amdgpu_ring_write(ring, 0xfffffff); /* mask */
++	amdgpu_ring_write(ring, 0xffffffff); /* mask */
+ 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
+index 2095173aaabf..3598151652d7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si.c
++++ b/drivers/gpu/drm/amd/amdgpu/si.c
+@@ -1231,6 +1231,71 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev)
+ 		adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
+ }
+ 
++static int si_get_pcie_lanes(struct amdgpu_device *adev)
++{
++	u32 link_width_cntl;
++
++	if (adev->flags & AMD_IS_APU)
++		return 0;
++
++	link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
++
++	switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
++	case LC_LINK_WIDTH_X1:
++		return 1;
++	case LC_LINK_WIDTH_X2:
++		return 2;
++	case LC_LINK_WIDTH_X4:
++		return 4;
++	case LC_LINK_WIDTH_X8:
++		return 8;
++	case LC_LINK_WIDTH_X0:
++	case LC_LINK_WIDTH_X16:
++	default:
++		return 16;
++	}
++}
++
++static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
++{
++	u32 link_width_cntl, mask;
++
++	if (adev->flags & AMD_IS_APU)
++		return;
++
++	switch (lanes) {
++	case 0:
++		mask = LC_LINK_WIDTH_X0;
++		break;
++	case 1:
++		mask = LC_LINK_WIDTH_X1;
++		break;
++	case 2:
++		mask = LC_LINK_WIDTH_X2;
++		break;
++	case 4:
++		mask = LC_LINK_WIDTH_X4;
++		break;
++	case 8:
++		mask = LC_LINK_WIDTH_X8;
++		break;
++	case 16:
++		mask = LC_LINK_WIDTH_X16;
++		break;
++	default:
++		DRM_ERROR("invalid pcie lane request: %d\n", lanes);
++		return;
++	}
++
++	link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
++	link_width_cntl &= ~LC_LINK_WIDTH_MASK;
++	link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
++	link_width_cntl |= (LC_RECONFIG_NOW |
++			    LC_RECONFIG_ARC_MISSING_ESCAPE);
++
++	WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
++}
++
+ static const struct amdgpu_asic_funcs si_asic_funcs =
+ {
+ 	.read_disabled_bios = &si_read_disabled_bios,
+@@ -1241,6 +1306,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
+ 	.get_xclk = &si_get_xclk,
+ 	.set_uvd_clocks = &si_set_uvd_clocks,
+ 	.set_vce_clocks = NULL,
++	.get_pcie_lanes = &si_get_pcie_lanes,
++	.set_pcie_lanes = &si_set_pcie_lanes,
+ 	.get_config_memsize = &si_get_config_memsize,
+ };
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+index 22f0b7ff3ac9..b1a3ca585ed1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+@@ -6370,9 +6370,9 @@ static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
+ {
+ 	u32 lane_width;
+ 	u32 new_lane_width =
+-		(amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
++		((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+ 	u32 current_lane_width =
+-		(amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
++		((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+ 
+ 	if (new_lane_width != current_lane_width) {
+ 		amdgpu_set_pcie_lanes(adev, new_lane_width);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 63c67346d316..8a6e6fbc78cd 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4776,33 +4776,6 @@ static int dm_update_planes_state(struct dc *dc,
+ 	return ret;
+ }
+ 
+-static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
+-					  struct drm_crtc *crtc)
+-{
+-	struct drm_plane *plane;
+-	struct drm_crtc_state *crtc_state;
+-
+-	WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
+-
+-	drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+-		struct drm_plane_state *plane_state =
+-			drm_atomic_get_plane_state(state, plane);
+-
+-		if (IS_ERR(plane_state))
+-			return -EDEADLK;
+-
+-		crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
+-		if (IS_ERR(crtc_state))
+-			return PTR_ERR(crtc_state);
+-
+-		if (crtc->primary == plane && crtc_state->active) {
+-			if (!plane_state->fb)
+-				return -EINVAL;
+-		}
+-	}
+-	return 0;
+-}
+-
+ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 				  struct drm_atomic_state *state)
+ {
+@@ -4826,10 +4799,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ 		goto fail;
+ 
+ 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+-		ret = dm_atomic_check_plane_state_fb(state, crtc);
+-		if (ret)
+-			goto fail;
+-
+ 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+ 		    !new_crtc_state->color_mgmt_changed)
+ 			continue;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+index 83bae207371d..b3c30abcb8f1 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+@@ -736,6 +736,8 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
+ 		if (info_frame->avi.valid) {
+ 			const uint32_t *content =
+ 				(const uint32_t *) &info_frame->avi.sb[0];
++			/*we need turn on clock before programming AFMT block*/
++			REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+ 
+ 			REG_WRITE(AFMT_AVI_INFO0, content[0]);
+ 
+diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
+index c6197d990818..d35b93f5ecfc 100644
+--- a/drivers/gpu/drm/i915/gvt/gvt.h
++++ b/drivers/gpu/drm/i915/gvt/gvt.h
+@@ -308,7 +308,10 @@ struct intel_gvt {
+ 	wait_queue_head_t service_thread_wq;
+ 	unsigned long service_request;
+ 
+-	struct engine_mmio *engine_mmio_list;
++	struct {
++		struct engine_mmio *mmio;
++		int ctx_mmio_count[I915_NUM_ENGINES];
++	} engine_mmio_list;
+ 
+ 	struct dentry *debugfs_root;
+ };
+diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
+index 152df3d0291e..c44dba338c57 100644
+--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
++++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
+@@ -50,6 +50,8 @@
+ #define RING_GFX_MODE(base)	_MMIO((base) + 0x29c)
+ #define VF_GUARDBAND		_MMIO(0x83a4)
+ 
++#define GEN9_MOCS_SIZE		64
++
+ /* Raw offset is appened to each line for convenience. */
+ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
+ 	{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+@@ -152,8 +154,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
+ 
+ static struct {
+ 	bool initialized;
+-	u32 control_table[I915_NUM_ENGINES][64];
+-	u32 l3cc_table[32];
++	u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE];
++	u32 l3cc_table[GEN9_MOCS_SIZE / 2];
+ } gen9_render_mocs;
+ 
+ static void load_render_mocs(struct drm_i915_private *dev_priv)
+@@ -170,7 +172,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
+ 
+ 	for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
+ 		offset.reg = regs[ring_id];
+-		for (i = 0; i < 64; i++) {
++		for (i = 0; i < GEN9_MOCS_SIZE; i++) {
+ 			gen9_render_mocs.control_table[ring_id][i] =
+ 				I915_READ_FW(offset);
+ 			offset.reg += 4;
+@@ -178,7 +180,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
+ 	}
+ 
+ 	offset.reg = 0xb020;
+-	for (i = 0; i < 32; i++) {
++	for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
+ 		gen9_render_mocs.l3cc_table[i] =
+ 			I915_READ_FW(offset);
+ 		offset.reg += 4;
+@@ -186,6 +188,153 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
+ 	gen9_render_mocs.initialized = true;
+ }
+ 
++static int
++restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
++				 struct drm_i915_gem_request *req)
++{
++	u32 *cs;
++	int ret;
++	struct engine_mmio *mmio;
++	struct intel_gvt *gvt = vgpu->gvt;
++	int ring_id = req->engine->id;
++	int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
++
++	if (count == 0)
++		return 0;
++
++	ret = req->engine->emit_flush(req, EMIT_BARRIER);
++	if (ret)
++		return ret;
++
++	cs = intel_ring_begin(req, count * 2 + 2);
++	if (IS_ERR(cs))
++		return PTR_ERR(cs);
++
++	*cs++ = MI_LOAD_REGISTER_IMM(count);
++	for (mmio = gvt->engine_mmio_list.mmio;
++	     i915_mmio_reg_valid(mmio->reg); mmio++) {
++		if (mmio->ring_id != ring_id ||
++		    !mmio->in_context)
++			continue;
++
++		*cs++ = i915_mmio_reg_offset(mmio->reg);
++		*cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
++				(mmio->mask << 16);
++		gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
++			      *(cs-2), *(cs-1), vgpu->id, ring_id);
++	}
++
++	*cs++ = MI_NOOP;
++	intel_ring_advance(req, cs);
++
++	ret = req->engine->emit_flush(req, EMIT_BARRIER);
++	if (ret)
++		return ret;
++
++	return 0;
++}
++
++static int
++restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu,
++					struct drm_i915_gem_request *req)
++{
++	unsigned int index;
++	u32 *cs;
++
++	cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
++	if (IS_ERR(cs))
++		return PTR_ERR(cs);
++
++	*cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE);
++
++	for (index = 0; index < GEN9_MOCS_SIZE; index++) {
++		*cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
++		*cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
++		gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
++			      *(cs-2), *(cs-1), vgpu->id, req->engine->id);
++
++	}
++
++	*cs++ = MI_NOOP;
++	intel_ring_advance(req, cs);
++
++	return 0;
++}
++
++static int
++restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu,
++				     struct drm_i915_gem_request *req)
++{
++	unsigned int index;
++	u32 *cs;
++
++	cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
++	if (IS_ERR(cs))
++		return PTR_ERR(cs);
++
++	*cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2);
++
++	for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) {
++		*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
++		*cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
++		gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
++			      *(cs-2), *(cs-1), vgpu->id, req->engine->id);
++
++	}
++
++	*cs++ = MI_NOOP;
++	intel_ring_advance(req, cs);
++
++	return 0;
++}
++
++/*
++ * Use lri command to initialize the mmio which is in context state image for
++ * inhibit context, it contains tracked engine mmio, render_mocs and
++ * render_mocs_l3cc.
++ */
++int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
++				       struct drm_i915_gem_request *req)
++{
++	int ret;
++	u32 *cs;
++
++	cs = intel_ring_begin(req, 2);
++	if (IS_ERR(cs))
++		return PTR_ERR(cs);
++
++	*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
++	*cs++ = MI_NOOP;
++	intel_ring_advance(req, cs);
++
++	ret = restore_context_mmio_for_inhibit(vgpu, req);
++	if (ret)
++		goto out;
++
++	/* no MOCS register in context except render engine */
++	if (req->engine->id != RCS)
++		goto out;
++
++	ret = restore_render_mocs_control_for_inhibit(vgpu, req);
++	if (ret)
++		goto out;
++
++	ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req);
++	if (ret)
++		goto out;
++
++out:
++	cs = intel_ring_begin(req, 2);
++	if (IS_ERR(cs))
++		return PTR_ERR(cs);
++
++	*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
++	*cs++ = MI_NOOP;
++	intel_ring_advance(req, cs);
++
++	return ret;
++}
++
+ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+ {
+ 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+@@ -252,11 +401,14 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
+ 	if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ 		return;
+ 
++	if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
++		return;
++
+ 	if (!pre && !gen9_render_mocs.initialized)
+ 		load_render_mocs(dev_priv);
+ 
+ 	offset.reg = regs[ring_id];
+-	for (i = 0; i < 64; i++) {
++	for (i = 0; i < GEN9_MOCS_SIZE; i++) {
+ 		if (pre)
+ 			old_v = vgpu_vreg_t(pre, offset);
+ 		else
+@@ -274,7 +426,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
+ 
+ 	if (ring_id == RCS) {
+ 		l3_offset.reg = 0xb020;
+-		for (i = 0; i < 32; i++) {
++		for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
+ 			if (pre)
+ 				old_v = vgpu_vreg_t(pre, l3_offset);
+ 			else
+@@ -294,6 +446,16 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
+ 
+ #define CTX_CONTEXT_CONTROL_VAL	0x03
+ 
++bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
++{
++	u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
++	u32 inhibit_mask =
++		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
++
++	return inhibit_mask ==
++		(reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
++}
++
+ /* Switch ring mmio values (context). */
+ static void switch_mmio(struct intel_vgpu *pre,
+ 			struct intel_vgpu *next,
+@@ -301,9 +463,6 @@ static void switch_mmio(struct intel_vgpu *pre,
+ {
+ 	struct drm_i915_private *dev_priv;
+ 	struct intel_vgpu_submission *s;
+-	u32 *reg_state, ctx_ctrl;
+-	u32 inhibit_mask =
+-		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ 	struct engine_mmio *mmio;
+ 	u32 old_v, new_v;
+ 
+@@ -311,10 +470,18 @@ static void switch_mmio(struct intel_vgpu *pre,
+ 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ 		switch_mocs(pre, next, ring_id);
+ 
+-	for (mmio = dev_priv->gvt->engine_mmio_list;
++	for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
+ 	     i915_mmio_reg_valid(mmio->reg); mmio++) {
+ 		if (mmio->ring_id != ring_id)
+ 			continue;
++		/*
++		 * No need to do save or restore of the mmio which is in context
++		 * state image on kabylake, it's initialized by lri command and
++		 * save or restore with context together.
++		 */
++		if (IS_KABYLAKE(dev_priv) && mmio->in_context)
++			continue;
++
+ 		// save
+ 		if (pre) {
+ 			vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
+@@ -328,16 +495,13 @@ static void switch_mmio(struct intel_vgpu *pre,
+ 		// restore
+ 		if (next) {
+ 			s = &next->submission;
+-			reg_state =
+-				s->shadow_ctx->engine[ring_id].lrc_reg_state;
+-			ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
+ 			/*
+-			 * if it is an inhibit context, load in_context mmio
+-			 * into HW by mmio write. If it is not, skip this mmio
+-			 * write.
++			 * No need to restore the mmio which is in context state
++			 * image if it's not inhibit context, it will restore
++			 * itself.
+ 			 */
+ 			if (mmio->in_context &&
+-			    (ctx_ctrl & inhibit_mask) != inhibit_mask)
++			    !is_inhibit_context(s->shadow_ctx, ring_id))
+ 				continue;
+ 
+ 			if (mmio->mask)
+@@ -408,8 +572,16 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
+  */
+ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
+ {
++	struct engine_mmio *mmio;
++
+ 	if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+-		gvt->engine_mmio_list = gen9_engine_mmio_list;
++		gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
+ 	else
+-		gvt->engine_mmio_list = gen8_engine_mmio_list;
++		gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
++
++	for (mmio = gvt->engine_mmio_list.mmio;
++	     i915_mmio_reg_valid(mmio->reg); mmio++) {
++		if (mmio->in_context)
++			gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
++	}
+ }
+diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
+index ca2c6a745673..0b1d98536653 100644
+--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
++++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
+@@ -49,4 +49,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
+ 
+ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
+ 
++bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
++
++int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
++				       struct drm_i915_gem_request *req);
++
+ #endif
+diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
+index d74d6f05c62c..88b7b47695bd 100644
+--- a/drivers/gpu/drm/i915/gvt/scheduler.c
++++ b/drivers/gpu/drm/i915/gvt/scheduler.c
+@@ -275,6 +275,11 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
+ 	struct intel_vgpu *vgpu = workload->vgpu;
+ 	void *shadow_ring_buffer_va;
+ 	u32 *cs;
++	struct drm_i915_gem_request *req = workload->req;
++
++	if (IS_KABYLAKE(req->i915) &&
++	    is_inhibit_context(req->ctx, req->engine->id))
++		intel_vgpu_restore_inhibit_context(vgpu, req);
+ 
+ 	/* allocate shadow ring buffer */
+ 	cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 2f5209de0391..f1cd4f0ffc62 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -1599,15 +1599,12 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
+ {
+ 	struct drm_i915_private *dev_priv = to_i915(dev);
+ 	struct pci_dev *pdev = dev_priv->drm.pdev;
+-	bool fw_csr;
+ 	int ret;
+ 
+ 	disable_rpm_wakeref_asserts(dev_priv);
+ 
+ 	intel_display_set_init_power(dev_priv, false);
+ 
+-	fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation &&
+-		suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
+ 	/*
+ 	 * In case of firmware assisted context save/restore don't manually
+ 	 * deinit the power domains. This also means the CSR/DMC firmware will
+@@ -1615,8 +1612,11 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
+ 	 * also enable deeper system power states that would be blocked if the
+ 	 * firmware was inactive.
+ 	 */
+-	if (!fw_csr)
++	if (IS_GEN9_LP(dev_priv) || hibernation || !suspend_to_idle(dev_priv) ||
++	    dev_priv->csr.dmc_payload == NULL) {
+ 		intel_power_domains_suspend(dev_priv);
++		dev_priv->power_domains_suspended = true;
++	}
+ 
+ 	ret = 0;
+ 	if (IS_GEN9_LP(dev_priv))
+@@ -1628,8 +1628,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
+ 
+ 	if (ret) {
+ 		DRM_ERROR("Suspend complete failed: %d\n", ret);
+-		if (!fw_csr)
++		if (dev_priv->power_domains_suspended) {
+ 			intel_power_domains_init_hw(dev_priv, true);
++			dev_priv->power_domains_suspended = false;
++		}
+ 
+ 		goto out;
+ 	}
+@@ -1650,8 +1652,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
+ 	if (!(hibernation && INTEL_GEN(dev_priv) < 6))
+ 		pci_set_power_state(pdev, PCI_D3hot);
+ 
+-	dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
+-
+ out:
+ 	enable_rpm_wakeref_asserts(dev_priv);
+ 
+@@ -1818,8 +1818,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
+ 	intel_uncore_resume_early(dev_priv);
+ 
+ 	if (IS_GEN9_LP(dev_priv)) {
+-		if (!dev_priv->suspended_to_idle)
+-			gen9_sanitize_dc_state(dev_priv);
++		gen9_sanitize_dc_state(dev_priv);
+ 		bxt_disable_dc9(dev_priv);
+ 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ 		hsw_disable_pc8(dev_priv);
+@@ -1827,8 +1826,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
+ 
+ 	intel_uncore_sanitize(dev_priv);
+ 
+-	if (IS_GEN9_LP(dev_priv) ||
+-	    !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
++	if (dev_priv->power_domains_suspended)
+ 		intel_power_domains_init_hw(dev_priv, true);
+ 	else
+ 		intel_display_set_init_power(dev_priv, true);
+@@ -1838,7 +1836,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
+ 	enable_rpm_wakeref_asserts(dev_priv);
+ 
+ out:
+-	dev_priv->suspended_to_idle = false;
++	dev_priv->power_domains_suspended = false;
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index d307429a5ae0..55c6d9077a8a 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -2099,7 +2099,7 @@ struct drm_i915_private {
+ 	u32 bxt_phy_grc;
+ 
+ 	u32 suspend_count;
+-	bool suspended_to_idle;
++	bool power_domains_suspended;
+ 	struct i915_suspend_saved_registers regfile;
+ 	struct vlv_s0ix_state vlv_s0ix_state;
+ 
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 33eb0c5b1d32..175d552c8bae 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -6236,6 +6236,12 @@ enum {
+ #define _SPATILEOFF		(VLV_DISPLAY_BASE + 0x721a4)
+ #define _SPACONSTALPHA		(VLV_DISPLAY_BASE + 0x721a8)
+ #define   SP_CONST_ALPHA_ENABLE		(1<<31)
++#define _SPACLRC0		(VLV_DISPLAY_BASE + 0x721d0)
++#define   SP_CONTRAST(x)		((x) << 18) /* u3.6 */
++#define   SP_BRIGHTNESS(x)		((x) & 0xff) /* s8 */
++#define _SPACLRC1		(VLV_DISPLAY_BASE + 0x721d4)
++#define   SP_SH_SIN(x)			(((x) & 0x7ff) << 16) /* s4.7 */
++#define   SP_SH_COS(x)			(x) /* u3.7 */
+ #define _SPAGAMC		(VLV_DISPLAY_BASE + 0x721f4)
+ 
+ #define _SPBCNTR		(VLV_DISPLAY_BASE + 0x72280)
+@@ -6249,6 +6255,8 @@ enum {
+ #define _SPBKEYMAXVAL		(VLV_DISPLAY_BASE + 0x722a0)
+ #define _SPBTILEOFF		(VLV_DISPLAY_BASE + 0x722a4)
+ #define _SPBCONSTALPHA		(VLV_DISPLAY_BASE + 0x722a8)
++#define _SPBCLRC0		(VLV_DISPLAY_BASE + 0x722d0)
++#define _SPBCLRC1		(VLV_DISPLAY_BASE + 0x722d4)
+ #define _SPBGAMC		(VLV_DISPLAY_BASE + 0x722f4)
+ 
+ #define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
+@@ -6265,6 +6273,8 @@ enum {
+ #define SPKEYMAXVAL(pipe, plane_id)	_MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
+ #define SPTILEOFF(pipe, plane_id)	_MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
+ #define SPCONSTALPHA(pipe, plane_id)	_MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
++#define SPCLRC0(pipe, plane_id)		_MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
++#define SPCLRC1(pipe, plane_id)		_MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
+ #define SPGAMC(pipe, plane_id)		_MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC)
+ 
+ /*
+diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
+index dd485f59eb1d..fb95074a67ff 100644
+--- a/drivers/gpu/drm/i915/intel_sprite.c
++++ b/drivers/gpu/drm/i915/intel_sprite.c
+@@ -346,44 +346,87 @@ skl_plane_get_hw_state(struct intel_plane *plane)
+ }
+ 
+ static void
+-chv_update_csc(struct intel_plane *plane, uint32_t format)
++chv_update_csc(const struct intel_plane_state *plane_state)
+ {
++	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
++	const struct drm_framebuffer *fb = plane_state->base.fb;
+ 	enum plane_id plane_id = plane->id;
+ 
+ 	/* Seems RGB data bypasses the CSC always */
+-	if (!format_is_yuv(format))
++	if (!format_is_yuv(fb->format->format))
+ 		return;
+ 
+ 	/*
+-	 * BT.601 limited range YCbCr -> full range RGB
++	 * BT.601 full range YCbCr -> full range RGB
+ 	 *
+-	 * |r|   | 6537 4769     0|   |cr  |
+-	 * |g| = |-3330 4769 -1605| x |y-64|
+-	 * |b|   |    0 4769  8263|   |cb  |
++	 * |r|   | 5743 4096     0|   |cr|
++	 * |g| = |-2925 4096 -1410| x |y |
++	 * |b|   |    0 4096  7258|   |cb|
+ 	 *
+-	 * Cb and Cr apparently come in as signed already, so no
+-	 * need for any offset. For Y we need to remove the offset.
++	 * Cb and Cr apparently come in as signed already,
++	 * and we get full range data in on account of CLRC0/1
+ 	 */
+-	I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
++	I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ 	I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ 	I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ 
+-	I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537));
+-	I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0));
+-	I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769));
+-	I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0));
+-	I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(8263));
++	I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(4096) | SPCSC_C0(5743));
++	I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(-2925) | SPCSC_C0(0));
++	I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(-1410) | SPCSC_C0(4096));
++	I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(4096) | SPCSC_C0(0));
++	I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(7258));
+ 
+-	I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64));
+-	I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+-	I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
++	I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0));
++	I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
++	I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+ 
+ 	I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ 	I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ 	I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ }
+ 
++#define SIN_0 0
++#define COS_0 1
++
++static void
++vlv_update_clrc(const struct intel_plane_state *plane_state)
++{
++	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
++	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
++	const struct drm_framebuffer *fb = plane_state->base.fb;
++	enum pipe pipe = plane->pipe;
++	enum plane_id plane_id = plane->id;
++	int contrast, brightness, sh_scale, sh_sin, sh_cos;
++
++	if (format_is_yuv(fb->format->format)) {
++		/*
++		 * Expand limited range to full range:
++		 * Contrast is applied first and is used to expand Y range.
++		 * Brightness is applied second and is used to remove the
++		 * offset from Y. Saturation/hue is used to expand CbCr range.
++		 */
++		contrast = DIV_ROUND_CLOSEST(255 << 6, 235 - 16);
++		brightness = -DIV_ROUND_CLOSEST(16 * 255, 235 - 16);
++		sh_scale = DIV_ROUND_CLOSEST(128 << 7, 240 - 128);
++		sh_sin = SIN_0 * sh_scale;
++		sh_cos = COS_0 * sh_scale;
++	} else {
++		/* Pass-through everything. */
++		contrast = 1 << 6;
++		brightness = 0;
++		sh_scale = 1 << 7;
++		sh_sin = SIN_0 * sh_scale;
++		sh_cos = COS_0 * sh_scale;
++	}
++
++	/* FIXME these register are single buffered :( */
++	I915_WRITE_FW(SPCLRC0(pipe, plane_id),
++		      SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
++	I915_WRITE_FW(SPCLRC1(pipe, plane_id),
++		      SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
++}
++
+ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
+ 			  const struct intel_plane_state *plane_state)
+ {
+@@ -477,8 +520,10 @@ vlv_update_plane(struct intel_plane *plane,
+ 
+ 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ 
++	vlv_update_clrc(plane_state);
++
+ 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
+-		chv_update_csc(plane, fb->format->format);
++		chv_update_csc(plane_state);
+ 
+ 	if (key->flags) {
+ 		I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 7828a5e10629..0bbc23175d49 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -139,6 +139,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
+ 	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
+ 	 */
+ 	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
++	/* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
++	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
++	 */
++	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
+ 	{ 0, 0, 0, 0, 0 },
+ };
+ 
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 97a0a639dad9..90d5b41007bf 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -5912,9 +5912,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
+ {
+ 	u32 lane_width;
+ 	u32 new_lane_width =
+-		(radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
++		((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+ 	u32 current_lane_width =
+-		(radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
++		((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+ 
+ 	if (new_lane_width != current_lane_width) {
+ 		radeon_set_pcie_lanes(rdev, new_lane_width);
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index ba7505292b78..7b224e08cbf1 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -1414,6 +1414,9 @@ static int vop_initial(struct vop *vop)
+ 	usleep_range(10, 20);
+ 	reset_control_deassert(ahb_rst);
+ 
++	VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1);
++	VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0);
++
+ 	memcpy(vop->regsbak, vop->regs, vop->len);
+ 
+ 	VOP_REG_SET(vop, misc, global_regdone_en, 1);
+@@ -1569,17 +1572,9 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
+ 
+ 	mutex_init(&vop->vsync_mutex);
+ 
+-	ret = devm_request_irq(dev, vop->irq, vop_isr,
+-			       IRQF_SHARED, dev_name(dev), vop);
+-	if (ret)
+-		return ret;
+-
+-	/* IRQ is initially disabled; it gets enabled in power_on */
+-	disable_irq(vop->irq);
+-
+ 	ret = vop_create_crtc(vop);
+ 	if (ret)
+-		goto err_enable_irq;
++		return ret;
+ 
+ 	pm_runtime_enable(&pdev->dev);
+ 
+@@ -1590,13 +1585,19 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
+ 		goto err_disable_pm_runtime;
+ 	}
+ 
++	ret = devm_request_irq(dev, vop->irq, vop_isr,
++			       IRQF_SHARED, dev_name(dev), vop);
++	if (ret)
++		goto err_disable_pm_runtime;
++
++	/* IRQ is initially disabled; it gets enabled in power_on */
++	disable_irq(vop->irq);
++
+ 	return 0;
+ 
+ err_disable_pm_runtime:
+ 	pm_runtime_disable(&pdev->dev);
+ 	vop_destroy_crtc(vop);
+-err_enable_irq:
+-	enable_irq(vop->irq); /* To balance out the disable_irq above */
+ 	return ret;
+ }
+ 
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index c2560aae5542..4fc08c38bc0e 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1365,7 +1365,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
+ 	 * of implement() working on 8 byte chunks
+ 	 */
+ 
+-	int len = hid_report_len(report) + 7;
++	u32 len = hid_report_len(report) + 7;
+ 
+ 	return kmalloc(len, flags);
+ }
+@@ -1430,7 +1430,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
+ {
+ 	char *buf;
+ 	int ret;
+-	int len;
++	u32 len;
+ 
+ 	buf = hid_alloc_report_buf(report, GFP_KERNEL);
+ 	if (!buf)
+@@ -1456,14 +1456,14 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
+ }
+ EXPORT_SYMBOL_GPL(__hid_request);
+ 
+-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
+ 		int interrupt)
+ {
+ 	struct hid_report_enum *report_enum = hid->report_enum + type;
+ 	struct hid_report *report;
+ 	struct hid_driver *hdrv;
+ 	unsigned int a;
+-	int rsize, csize = size;
++	u32 rsize, csize = size;
+ 	u8 *cdata = data;
+ 	int ret = 0;
+ 
+@@ -1521,7 +1521,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
+  *
+  * This is data entry for lower layers.
+  */
+-int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
++int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
+ {
+ 	struct hid_report_enum *report_enum;
+ 	struct hid_driver *hdrv;
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 9454ac134ce2..c631d2c8988d 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -519,6 +519,9 @@
+ #define I2C_VENDOR_ID_HANTICK		0x0911
+ #define I2C_PRODUCT_ID_HANTICK_5288	0x5288
+ 
++#define I2C_VENDOR_ID_RAYD		0x2386
++#define I2C_PRODUCT_ID_RAYD_3118	0x3118
++
+ #define USB_VENDOR_ID_HANWANG		0x0b57
+ #define USB_DEVICE_ID_HANWANG_TABLET_FIRST	0x5000
+ #define USB_DEVICE_ID_HANWANG_TABLET_LAST	0x8fff
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 04d01b57d94c..0b9e06569bf5 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -387,7 +387,8 @@ static int hidinput_get_battery_property(struct power_supply *psy,
+ 		break;
+ 
+ 	case POWER_SUPPLY_PROP_CAPACITY:
+-		if (dev->battery_report_type == HID_FEATURE_REPORT) {
++		if (dev->battery_status != HID_BATTERY_REPORTED &&
++		    !dev->battery_avoid_query) {
+ 			value = hidinput_query_battery_capacity(dev);
+ 			if (value < 0)
+ 				return value;
+@@ -403,17 +404,17 @@ static int hidinput_get_battery_property(struct power_supply *psy,
+ 		break;
+ 
+ 	case POWER_SUPPLY_PROP_STATUS:
+-		if (!dev->battery_reported &&
+-		    dev->battery_report_type == HID_FEATURE_REPORT) {
++		if (dev->battery_status != HID_BATTERY_REPORTED &&
++		    !dev->battery_avoid_query) {
+ 			value = hidinput_query_battery_capacity(dev);
+ 			if (value < 0)
+ 				return value;
+ 
+ 			dev->battery_capacity = value;
+-			dev->battery_reported = true;
++			dev->battery_status = HID_BATTERY_QUERIED;
+ 		}
+ 
+-		if (!dev->battery_reported)
++		if (dev->battery_status == HID_BATTERY_UNKNOWN)
+ 			val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ 		else if (dev->battery_capacity == 100)
+ 			val->intval = POWER_SUPPLY_STATUS_FULL;
+@@ -486,6 +487,14 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
+ 	dev->battery_report_type = report_type;
+ 	dev->battery_report_id = field->report->id;
+ 
++	/*
++	 * Stylus is normally not connected to the device and thus we
++	 * can't query the device and get meaningful battery strength.
++	 * We have to wait for the device to report it on its own.
++	 */
++	dev->battery_avoid_query = report_type == HID_INPUT_REPORT &&
++				   field->physical == HID_DG_STYLUS;
++
+ 	dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg);
+ 	if (IS_ERR(dev->battery)) {
+ 		error = PTR_ERR(dev->battery);
+@@ -530,9 +539,10 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
+ 
+ 	capacity = hidinput_scale_battery_capacity(dev, value);
+ 
+-	if (!dev->battery_reported || capacity != dev->battery_capacity) {
++	if (dev->battery_status != HID_BATTERY_REPORTED ||
++	    capacity != dev->battery_capacity) {
+ 		dev->battery_capacity = capacity;
+-		dev->battery_reported = true;
++		dev->battery_status = HID_BATTERY_REPORTED;
+ 		power_supply_changed(dev->battery);
+ 	}
+ }
+@@ -1368,7 +1378,8 @@ static void hidinput_led_worker(struct work_struct *work)
+ 					      led_work);
+ 	struct hid_field *field;
+ 	struct hid_report *report;
+-	int len, ret;
++	int ret;
++	u32 len;
+ 	__u8 *buf;
+ 
+ 	field = hidinput_get_led_field(hid);
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 3b4739bde05d..2e1736ba2444 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -370,7 +370,8 @@ static const struct attribute_group mt_attribute_group = {
+ static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
+ {
+ 	struct mt_device *td = hid_get_drvdata(hdev);
+-	int ret, size = hid_report_len(report);
++	int ret;
++	u32 size = hid_report_len(report);
+ 	u8 *buf;
+ 
+ 	/*
+@@ -1183,7 +1184,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
+ 	struct hid_report_enum *re;
+ 	struct mt_class *cls = &td->mtclass;
+ 	char *buf;
+-	int report_len;
++	u32 report_len;
+ 
+ 	if (td->inputmode < 0)
+ 		return;
+diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
+index c6c05df3e8d2..9c9362149641 100644
+--- a/drivers/hid/hid-rmi.c
++++ b/drivers/hid/hid-rmi.c
+@@ -89,8 +89,8 @@ struct rmi_data {
+ 	u8 *writeReport;
+ 	u8 *readReport;
+ 
+-	int input_report_size;
+-	int output_report_size;
++	u32 input_report_size;
++	u32 output_report_size;
+ 
+ 	unsigned long flags;
+ 
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index fbfcc8009432..b39844adea47 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
+ 	int ret = 0, len;
+ 	unsigned char report_number;
+ 
++	if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	dev = hidraw_table[minor]->hid;
+ 
+ 	if (!dev->ll_driver->raw_request) {
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index 7230243b94d3..fd9f70a8b813 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -47,6 +47,7 @@
+ /* quirks to control the device */
+ #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV	BIT(0)
+ #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET	BIT(1)
++#define I2C_HID_QUIRK_RESEND_REPORT_DESCR	BIT(2)
+ 
+ /* flags */
+ #define I2C_HID_STARTED		0
+@@ -144,10 +145,10 @@ struct i2c_hid {
+ 						   * register of the HID
+ 						   * descriptor. */
+ 	unsigned int		bufsize;	/* i2c buffer size */
+-	char			*inbuf;		/* Input buffer */
+-	char			*rawbuf;	/* Raw Input buffer */
+-	char			*cmdbuf;	/* Command buffer */
+-	char			*argsbuf;	/* Command arguments buffer */
++	u8			*inbuf;		/* Input buffer */
++	u8			*rawbuf;	/* Raw Input buffer */
++	u8			*cmdbuf;	/* Command buffer */
++	u8			*argsbuf;	/* Command arguments buffer */
+ 
+ 	unsigned long		flags;		/* device flags */
+ 	unsigned long		quirks;		/* Various quirks */
+@@ -171,6 +172,8 @@ static const struct i2c_hid_quirks {
+ 		I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
+ 	{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
+ 		I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
++	{ I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
++		I2C_HID_QUIRK_RESEND_REPORT_DESCR },
+ 	{ 0, 0 }
+ };
+ 
+@@ -455,7 +458,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
+ 
+ static void i2c_hid_get_input(struct i2c_hid *ihid)
+ {
+-	int ret, ret_size;
++	int ret;
++	u32 ret_size;
+ 	int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+ 
+ 	if (size > ihid->bufsize)
+@@ -480,7 +484,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
+ 		return;
+ 	}
+ 
+-	if (ret_size > size) {
++	if ((ret_size > size) || (ret_size <= 2)) {
+ 		dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
+ 			__func__, size, ret_size);
+ 		return;
+@@ -1219,6 +1223,16 @@ static int i2c_hid_resume(struct device *dev)
+ 	if (ret)
+ 		return ret;
+ 
++	/* RAYDIUM device (2386:3118) need to re-send report descr cmd
++	 * after resume, after this it will be back normal.
++	 * otherwise it issues too many incomplete reports.
++	 */
++	if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
++		ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
++		if (ret)
++			return ret;
++	}
++
+ 	if (hid->driver && hid->driver->reset_resume) {
+ 		ret = hid->driver->reset_resume(hid);
+ 		return ret;
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 409543160af7..b54ef1ffcbec 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -219,7 +219,7 @@ static void wacom_feature_mapping(struct hid_device *hdev,
+ 	unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ 	u8 *data;
+ 	int ret;
+-	int n;
++	u32 n;
+ 
+ 	switch (equivalent_usage) {
+ 	case HID_DG_CONTACTMAX:
+@@ -519,7 +519,7 @@ static int wacom_set_device_mode(struct hid_device *hdev,
+ 	u8 *rep_data;
+ 	struct hid_report *r;
+ 	struct hid_report_enum *re;
+-	int length;
++	u32 length;
+ 	int error = -ENOMEM, limit = 0;
+ 
+ 	if (wacom_wac->mode_report < 0)
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 90c38a0523e9..44b2c7b0838c 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -689,6 +689,45 @@ static int wacom_intuos_get_tool_type(int tool_id)
+ 	return tool_type;
+ }
+ 
++static void wacom_exit_report(struct wacom_wac *wacom)
++{
++	struct input_dev *input = wacom->pen_input;
++	struct wacom_features *features = &wacom->features;
++	unsigned char *data = wacom->data;
++	int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0;
++
++	/*
++	 * Reset all states otherwise we lose the initial states
++	 * when in-prox next time
++	 */
++	input_report_abs(input, ABS_X, 0);
++	input_report_abs(input, ABS_Y, 0);
++	input_report_abs(input, ABS_DISTANCE, 0);
++	input_report_abs(input, ABS_TILT_X, 0);
++	input_report_abs(input, ABS_TILT_Y, 0);
++	if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
++		input_report_key(input, BTN_LEFT, 0);
++		input_report_key(input, BTN_MIDDLE, 0);
++		input_report_key(input, BTN_RIGHT, 0);
++		input_report_key(input, BTN_SIDE, 0);
++		input_report_key(input, BTN_EXTRA, 0);
++		input_report_abs(input, ABS_THROTTLE, 0);
++		input_report_abs(input, ABS_RZ, 0);
++	} else {
++		input_report_abs(input, ABS_PRESSURE, 0);
++		input_report_key(input, BTN_STYLUS, 0);
++		input_report_key(input, BTN_STYLUS2, 0);
++		input_report_key(input, BTN_TOUCH, 0);
++		input_report_abs(input, ABS_WHEEL, 0);
++		if (features->type >= INTUOS3S)
++			input_report_abs(input, ABS_Z, 0);
++	}
++	input_report_key(input, wacom->tool[idx], 0);
++	input_report_abs(input, ABS_MISC, 0); /* reset tool id */
++	input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
++	wacom->id[idx] = 0;
++}
++
+ static int wacom_intuos_inout(struct wacom_wac *wacom)
+ {
+ 	struct wacom_features *features = &wacom->features;
+@@ -741,36 +780,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
+ 		if (!wacom->id[idx])
+ 			return 1;
+ 
+-		/*
+-		 * Reset all states otherwise we lose the initial states
+-		 * when in-prox next time
+-		 */
+-		input_report_abs(input, ABS_X, 0);
+-		input_report_abs(input, ABS_Y, 0);
+-		input_report_abs(input, ABS_DISTANCE, 0);
+-		input_report_abs(input, ABS_TILT_X, 0);
+-		input_report_abs(input, ABS_TILT_Y, 0);
+-		if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
+-			input_report_key(input, BTN_LEFT, 0);
+-			input_report_key(input, BTN_MIDDLE, 0);
+-			input_report_key(input, BTN_RIGHT, 0);
+-			input_report_key(input, BTN_SIDE, 0);
+-			input_report_key(input, BTN_EXTRA, 0);
+-			input_report_abs(input, ABS_THROTTLE, 0);
+-			input_report_abs(input, ABS_RZ, 0);
+-		} else {
+-			input_report_abs(input, ABS_PRESSURE, 0);
+-			input_report_key(input, BTN_STYLUS, 0);
+-			input_report_key(input, BTN_STYLUS2, 0);
+-			input_report_key(input, BTN_TOUCH, 0);
+-			input_report_abs(input, ABS_WHEEL, 0);
+-			if (features->type >= INTUOS3S)
+-				input_report_abs(input, ABS_Z, 0);
+-		}
+-		input_report_key(input, wacom->tool[idx], 0);
+-		input_report_abs(input, ABS_MISC, 0); /* reset tool id */
+-		input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
+-		wacom->id[idx] = 0;
++		wacom_exit_report(wacom);
+ 		return 2;
+ 	}
+ 
+@@ -1226,6 +1236,12 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
+ 		if (!valid)
+ 			continue;
+ 
++		if (!prox) {
++			wacom->shared->stylus_in_proximity = false;
++			wacom_exit_report(wacom);
++			input_sync(pen_input);
++			return;
++		}
+ 		if (range) {
+ 			/* Fix rotation alignment: userspace expects zero at left */
+ 			int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]);
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 692b34125866..e0d59e9ff3c6 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -966,8 +966,6 @@ static void i801_enable_host_notify(struct i2c_adapter *adapter)
+ 	if (!(priv->features & FEATURE_HOST_NOTIFY))
+ 		return;
+ 
+-	priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
+-
+ 	if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
+ 		outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
+ 		       SMBSLVCMD(priv));
+@@ -1615,6 +1613,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 		outb_p(inb_p(SMBAUXCTL(priv)) &
+ 		       ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
+ 
++	/* Remember original Host Notify setting */
++	if (priv->features & FEATURE_HOST_NOTIFY)
++		priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
++
+ 	/* Default timeout in interrupt mode: 200 ms */
+ 	priv->adapter.timeout = HZ / 5;
+ 
+@@ -1699,6 +1701,15 @@ static void i801_remove(struct pci_dev *dev)
+ 	 */
+ }
+ 
++static void i801_shutdown(struct pci_dev *dev)
++{
++	struct i801_priv *priv = pci_get_drvdata(dev);
++
++	/* Restore config registers to avoid hard hang on some systems */
++	i801_disable_host_notify(priv);
++	pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
++}
++
+ #ifdef CONFIG_PM
+ static int i801_suspend(struct device *dev)
+ {
+@@ -1728,6 +1739,7 @@ static struct pci_driver i801_driver = {
+ 	.id_table	= i801_ids,
+ 	.probe		= i801_probe,
+ 	.remove		= i801_remove,
++	.shutdown	= i801_shutdown,
+ 	.driver		= {
+ 		.pm	= &i801_pm_ops,
+ 	},
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index d933336d7e01..5c21ae237f82 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1241,6 +1241,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
+ 	if (!optlen)
+ 		return -EINVAL;
+ 
++	if (!ctx->cm_id->device)
++		return -EINVAL;
++
+ 	memset(&sa_path, 0, sizeof(sa_path));
+ 
+ 	sa_path.rec_type = SA_PATH_REC_TYPE_IB;
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index 93025d2009b8..c715123742a4 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -2194,7 +2194,14 @@ static void __ib_drain_sq(struct ib_qp *qp)
+ 	struct ib_cq *cq = qp->send_cq;
+ 	struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ 	struct ib_drain_cqe sdrain;
+-	struct ib_send_wr swr = {}, *bad_swr;
++	struct ib_send_wr *bad_swr;
++	struct ib_rdma_wr swr = {
++		.wr = {
++			.next = NULL,
++			{ .wr_cqe	= &sdrain.cqe, },
++			.opcode	= IB_WR_RDMA_WRITE,
++		},
++	};
+ 	int ret;
+ 
+ 	ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+@@ -2203,11 +2210,10 @@ static void __ib_drain_sq(struct ib_qp *qp)
+ 		return;
+ 	}
+ 
+-	swr.wr_cqe = &sdrain.cqe;
+ 	sdrain.cqe.done = ib_drain_qp_done;
+ 	init_completion(&sdrain.done);
+ 
+-	ret = ib_post_send(qp, &swr, &bad_swr);
++	ret = ib_post_send(qp, &swr.wr, &bad_swr);
+ 	if (ret) {
+ 		WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ 		return;
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 3e0b3f0238d6..6857c61bdee1 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1223,6 +1223,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ 			return ERR_PTR(-EINVAL);
+ 
+ 		mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
++		if (IS_ERR(mr))
++			return ERR_CAST(mr);
+ 		return &mr->ibmr;
+ 	}
+ #endif
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index f4bab2cd0ec2..45594091353c 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -711,9 +711,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
+ 		memcpy(wqe->dma.sge, ibwr->sg_list,
+ 		       num_sge * sizeof(struct ib_sge));
+ 
+-	wqe->iova		= (mask & WR_ATOMIC_MASK) ?
+-					atomic_wr(ibwr)->remote_addr :
+-					rdma_wr(ibwr)->remote_addr;
++	wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
++		mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
+ 	wqe->mask		= mask;
+ 	wqe->dma.length		= length;
+ 	wqe->dma.resid		= length;
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index b48843833d69..4a1a489ce8bb 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -2974,9 +2974,11 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ 		ret = FAST_IO_FAIL;
+ 	else
+ 		ret = FAILED;
+-	srp_free_req(ch, req, scmnd, 0);
+-	scmnd->result = DID_ABORT << 16;
+-	scmnd->scsi_done(scmnd);
++	if (ret == SUCCESS) {
++		srp_free_req(ch, req, scmnd, 0);
++		scmnd->result = DID_ABORT << 16;
++		scmnd->scsi_done(scmnd);
++	}
+ 
+ 	return ret;
+ }
+@@ -3871,12 +3873,10 @@ static ssize_t srp_create_target(struct device *dev,
+ 				      num_online_nodes());
+ 		const int ch_end = ((node_idx + 1) * target->ch_count /
+ 				    num_online_nodes());
+-		const int cv_start = (node_idx * ibdev->num_comp_vectors /
+-				      num_online_nodes() + target->comp_vector)
+-				     % ibdev->num_comp_vectors;
+-		const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
+-				    num_online_nodes() + target->comp_vector)
+-				   % ibdev->num_comp_vectors;
++		const int cv_start = node_idx * ibdev->num_comp_vectors /
++				     num_online_nodes();
++		const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
++				   num_online_nodes();
+ 		int cpu_idx = 0;
+ 
+ 		for_each_online_cpu(cpu) {
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 0373b7c40902..f1be280e701a 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -838,16 +838,20 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
+  */
+ static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
+ {
+-	struct ib_send_wr wr, *bad_wr;
++	struct ib_send_wr *bad_wr;
++	struct ib_rdma_wr wr = {
++		.wr = {
++			.next		= NULL,
++			{ .wr_cqe	= &ch->zw_cqe, },
++			.opcode		= IB_WR_RDMA_WRITE,
++			.send_flags	= IB_SEND_SIGNALED,
++		}
++	};
+ 
+ 	pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
+ 		 ch->qp->qp_num);
+ 
+-	memset(&wr, 0, sizeof(wr));
+-	wr.opcode = IB_WR_RDMA_WRITE;
+-	wr.wr_cqe = &ch->zw_cqe;
+-	wr.send_flags = IB_SEND_SIGNALED;
+-	return ib_post_send(ch->qp, &wr, &bad_wr);
++	return ib_post_send(ch->qp, &wr.wr, &bad_wr);
+ }
+ 
+ static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
+diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
+index 99bc9bd64b9e..9124a625fe83 100644
+--- a/drivers/iommu/intel-svm.c
++++ b/drivers/iommu/intel-svm.c
+@@ -396,6 +396,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
+ 				pasid_max - 1, GFP_KERNEL);
+ 		if (ret < 0) {
+ 			kfree(svm);
++			kfree(sdev);
+ 			goto out;
+ 		}
+ 		svm->pasid = ret;
+diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
+index 30017df5b54c..01e673c680cd 100644
+--- a/drivers/irqchip/irq-gic-common.c
++++ b/drivers/irqchip/irq-gic-common.c
+@@ -21,6 +21,8 @@
+ 
+ #include "irq-gic-common.h"
+ 
++static DEFINE_RAW_SPINLOCK(irq_controller_lock);
++
+ static const struct gic_kvm_info *gic_kvm_info;
+ 
+ const struct gic_kvm_info *gic_get_kvm_info(void)
+@@ -53,11 +55,13 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
+ 	u32 confoff = (irq / 16) * 4;
+ 	u32 val, oldval;
+ 	int ret = 0;
++	unsigned long flags;
+ 
+ 	/*
+ 	 * Read current configuration register, and insert the config
+ 	 * for "irq", depending on "type".
+ 	 */
++	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+ 	val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
+ 	if (type & IRQ_TYPE_LEVEL_MASK)
+ 		val &= ~confmask;
+@@ -65,8 +69,10 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
+ 		val |= confmask;
+ 
+ 	/* If the current configuration is the same, then we are done */
+-	if (val == oldval)
++	if (val == oldval) {
++		raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+ 		return 0;
++	}
+ 
+ 	/*
+ 	 * Write back the new configuration, and possibly re-enable
+@@ -84,6 +90,7 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
+ 			pr_warn("GIC: PPI%d is secure or misconfigured\n",
+ 				irq - 16);
+ 	}
++	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+ 
+ 	if (sync_access)
+ 		sync_access();
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 8168f737590e..e7b4a0256949 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -148,6 +148,8 @@ struct crypt_config {
+ 	mempool_t *tag_pool;
+ 	unsigned tag_pool_max_sectors;
+ 
++	struct percpu_counter n_allocated_pages;
++
+ 	struct bio_set *bs;
+ 	struct mutex bio_alloc_lock;
+ 
+@@ -219,6 +221,12 @@ struct crypt_config {
+ #define MAX_TAG_SIZE	480
+ #define POOL_ENTRY_SIZE	512
+ 
++static DEFINE_SPINLOCK(dm_crypt_clients_lock);
++static unsigned dm_crypt_clients_n = 0;
++static volatile unsigned long dm_crypt_pages_per_client;
++#define DM_CRYPT_MEMORY_PERCENT			2
++#define DM_CRYPT_MIN_PAGES_PER_CLIENT		(BIO_MAX_PAGES * 16)
++
+ static void clone_init(struct dm_crypt_io *, struct bio *);
+ static void kcryptd_queue_crypt(struct dm_crypt_io *io);
+ static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
+@@ -2155,6 +2163,43 @@ static int crypt_wipe_key(struct crypt_config *cc)
+ 	return r;
+ }
+ 
++static void crypt_calculate_pages_per_client(void)
++{
++	unsigned long pages = (totalram_pages - totalhigh_pages) * DM_CRYPT_MEMORY_PERCENT / 100;
++
++	if (!dm_crypt_clients_n)
++		return;
++
++	pages /= dm_crypt_clients_n;
++	if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
++		pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
++	dm_crypt_pages_per_client = pages;
++}
++
++static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
++{
++	struct crypt_config *cc = pool_data;
++	struct page *page;
++
++	if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
++	    likely(gfp_mask & __GFP_NORETRY))
++		return NULL;
++
++	page = alloc_page(gfp_mask);
++	if (likely(page != NULL))
++		percpu_counter_add(&cc->n_allocated_pages, 1);
++
++	return page;
++}
++
++static void crypt_page_free(void *page, void *pool_data)
++{
++	struct crypt_config *cc = pool_data;
++
++	__free_page(page);
++	percpu_counter_sub(&cc->n_allocated_pages, 1);
++}
++
+ static void crypt_dtr(struct dm_target *ti)
+ {
+ 	struct crypt_config *cc = ti->private;
+@@ -2181,6 +2226,10 @@ static void crypt_dtr(struct dm_target *ti)
+ 	mempool_destroy(cc->req_pool);
+ 	mempool_destroy(cc->tag_pool);
+ 
++	if (cc->page_pool)
++		WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
++	percpu_counter_destroy(&cc->n_allocated_pages);
++
+ 	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
+ 		cc->iv_gen_ops->dtr(cc);
+ 
+@@ -2197,6 +2246,12 @@ static void crypt_dtr(struct dm_target *ti)
+ 
+ 	/* Must zero key material before freeing */
+ 	kzfree(cc);
++
++	spin_lock(&dm_crypt_clients_lock);
++	WARN_ON(!dm_crypt_clients_n);
++	dm_crypt_clients_n--;
++	crypt_calculate_pages_per_client();
++	spin_unlock(&dm_crypt_clients_lock);
+ }
+ 
+ static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
+@@ -2644,6 +2699,15 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 
+ 	ti->private = cc;
+ 
++	spin_lock(&dm_crypt_clients_lock);
++	dm_crypt_clients_n++;
++	crypt_calculate_pages_per_client();
++	spin_unlock(&dm_crypt_clients_lock);
++
++	ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
++	if (ret < 0)
++		goto bad;
++
+ 	/* Optional parameters need to be read before cipher constructor */
+ 	if (argc > 5) {
+ 		ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
+@@ -2698,7 +2762,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 		ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
+ 		      ARCH_KMALLOC_MINALIGN);
+ 
+-	cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
++	cc->page_pool = mempool_create(BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
+ 	if (!cc->page_pool) {
+ 		ti->error = "Cannot allocate page mempool";
+ 		goto bad;
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index c1d1034ff7b7..335ebd46a986 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3408,7 +3408,8 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
+ 		set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+ 
+ 	} else {
+-		if (!test_bit(MD_RECOVERY_INTR, &recovery) &&
++		if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) &&
++		    !test_bit(MD_RECOVERY_INTR, &recovery) &&
+ 		    (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
+ 		     test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
+ 		     test_bit(MD_RECOVERY_RUNNING, &recovery)))
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 353ea0ede091..038c7572fdd4 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1477,6 +1477,23 @@ static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
+ 	return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios, NULL);
+ }
+ 
++static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
++				  int *result)
++{
++	struct bio *bio = ci->bio;
++
++	if (bio_op(bio) == REQ_OP_DISCARD)
++		*result = __send_discard(ci, ti);
++	else if (bio_op(bio) == REQ_OP_WRITE_SAME)
++		*result = __send_write_same(ci, ti);
++	else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
++		*result = __send_write_zeroes(ci, ti);
++	else
++		return false;
++
++	return true;
++}
++
+ /*
+  * Select the correct strategy for processing a non-flush bio.
+  */
+@@ -1491,12 +1508,8 @@ static int __split_and_process_non_flush(struct clone_info *ci)
+ 	if (!dm_target_is_valid(ti))
+ 		return -EIO;
+ 
+-	if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
+-		return __send_discard(ci, ti);
+-	else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
+-		return __send_write_same(ci, ti);
+-	else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES))
+-		return __send_write_zeroes(ci, ti);
++	if (unlikely(__process_abnormal_io(ci, ti, &r)))
++		return r;
+ 
+ 	if (bio_op(bio) == REQ_OP_ZONE_REPORT)
+ 		len = ci->sector_count;
+@@ -1617,9 +1630,12 @@ static blk_qc_t __process_bio(struct mapped_device *md,
+ 			goto out;
+ 		}
+ 
+-		tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
+ 		ci.bio = bio;
+ 		ci.sector_count = bio_sectors(bio);
++		if (unlikely(__process_abnormal_io(&ci, ti, &error)))
++			goto out;
++
++		tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
+ 		ret = __clone_and_map_simple_bio(&ci, tio, NULL);
+ 	}
+ out:
+diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
+index debe35fc66b4..d3f7bb33a54d 100644
+--- a/drivers/media/common/videobuf2/videobuf2-core.c
++++ b/drivers/media/common/videobuf2/videobuf2-core.c
+@@ -1696,6 +1696,15 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
+ 	for (i = 0; i < q->num_buffers; ++i) {
+ 		struct vb2_buffer *vb = q->bufs[i];
+ 
++		if (vb->state == VB2_BUF_STATE_PREPARED ||
++		    vb->state == VB2_BUF_STATE_QUEUED) {
++			unsigned int plane;
++
++			for (plane = 0; plane < vb->num_planes; ++plane)
++				call_void_memop(vb, finish,
++						vb->planes[plane].mem_priv);
++		}
++
+ 		if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ 			vb->state = VB2_BUF_STATE_PREPARED;
+ 			call_void_vb_qop(vb, buf_finish, vb);
+diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
+index a651527d80db..23888fdb94fb 100644
+--- a/drivers/media/platform/vivid/vivid-vid-common.c
++++ b/drivers/media/platform/vivid/vivid-vid-common.c
+@@ -874,7 +874,8 @@ int vidioc_g_edid(struct file *file, void *_fh,
+ 		return -EINVAL;
+ 	if (edid->start_block + edid->blocks > dev->edid_blocks)
+ 		edid->blocks = dev->edid_blocks - edid->start_block;
+-	cec_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
++	if (adap)
++		cec_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
+ 	memcpy(edid->edid, dev->edid + edid->start_block * 128, edid->blocks * 128);
+ 	return 0;
+ }
+diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
+index f7f3b4b2c2de..8bd6b2f1af15 100644
+--- a/drivers/media/platform/vsp1/vsp1_wpf.c
++++ b/drivers/media/platform/vsp1/vsp1_wpf.c
+@@ -452,7 +452,7 @@ static void wpf_configure(struct vsp1_entity *entity,
+ 			: VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index);
+ 	}
+ 
+-	if (pipe->bru || pipe->num_inputs > 1)
++	if (pipe->bru)
+ 		srcrpf |= pipe->bru->type == VSP1_ENTITY_BRU
+ 			? VI6_WPF_SRCRPF_VIRACT_MST
+ 			: VI6_WPF_SRCRPF_VIRACT2_MST;
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 1db8d38fed7c..9b78818c0282 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -1929,12 +1929,12 @@ void rc_unregister_device(struct rc_dev *dev)
+ 	if (!dev)
+ 		return;
+ 
+-	del_timer_sync(&dev->timer_keyup);
+-	del_timer_sync(&dev->timer_repeat);
+-
+ 	if (dev->driver_type == RC_DRIVER_IR_RAW)
+ 		ir_raw_event_unregister(dev);
+ 
++	del_timer_sync(&dev->timer_keyup);
++	del_timer_sync(&dev->timer_repeat);
++
+ 	rc_free_rx_device(dev);
+ 
+ 	mutex_lock(&dev->lock);
+diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
+index 30ccba436b3b..55cd35d1a9cc 100644
+--- a/drivers/misc/cxl/cxllib.c
++++ b/drivers/misc/cxl/cxllib.c
+@@ -208,49 +208,74 @@ int cxllib_get_PE_attributes(struct task_struct *task,
+ }
+ EXPORT_SYMBOL_GPL(cxllib_get_PE_attributes);
+ 
+-int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
++static int get_vma_info(struct mm_struct *mm, u64 addr,
++			u64 *vma_start, u64 *vma_end,
++			unsigned long *page_size)
+ {
+-	int rc;
+-	u64 dar;
+ 	struct vm_area_struct *vma = NULL;
+-	unsigned long page_size;
+-
+-	if (mm == NULL)
+-		return -EFAULT;
++	int rc = 0;
+ 
+ 	down_read(&mm->mmap_sem);
+ 
+ 	vma = find_vma(mm, addr);
+ 	if (!vma) {
+-		pr_err("Can't find vma for addr %016llx\n", addr);
+ 		rc = -EFAULT;
+ 		goto out;
+ 	}
+-	/* get the size of the pages allocated */
+-	page_size = vma_kernel_pagesize(vma);
+-
+-	for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) {
+-		if (dar < vma->vm_start || dar >= vma->vm_end) {
+-			vma = find_vma(mm, addr);
+-			if (!vma) {
+-				pr_err("Can't find vma for addr %016llx\n", addr);
+-				rc = -EFAULT;
+-				goto out;
+-			}
+-			/* get the size of the pages allocated */
+-			page_size = vma_kernel_pagesize(vma);
++	*page_size = vma_kernel_pagesize(vma);
++	*vma_start = vma->vm_start;
++	*vma_end = vma->vm_end;
++out:
++	up_read(&mm->mmap_sem);
++	return rc;
++}
++
++int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
++{
++	int rc;
++	u64 dar, vma_start, vma_end;
++	unsigned long page_size;
++
++	if (mm == NULL)
++		return -EFAULT;
++
++	/*
++	 * The buffer we have to process can extend over several pages
++	 * and may also cover several VMAs.
++	 * We iterate over all the pages. The page size could vary
++	 * between VMAs.
++	 */
++	rc = get_vma_info(mm, addr, &vma_start, &vma_end, &page_size);
++	if (rc)
++		return rc;
++
++	for (dar = (addr & ~(page_size - 1)); dar < (addr + size);
++	     dar += page_size) {
++		if (dar < vma_start || dar >= vma_end) {
++			/*
++			 * We don't hold the mm->mmap_sem semaphore
++			 * while iterating, since the semaphore is
++			 * required by one of the lower-level page
++			 * fault processing functions and it could
++			 * create a deadlock.
++			 *
++			 * It means the VMAs can be altered between 2
++			 * loop iterations and we could theoretically
++			 * miss a page (however unlikely). But that's
++			 * not really a problem, as the driver will
++			 * retry access, get another page fault on the
++			 * missing page and call us again.
++			 */
++			rc = get_vma_info(mm, dar, &vma_start, &vma_end,
++					&page_size);
++			if (rc)
++				return rc;
+ 		}
+ 
+ 		rc = cxl_handle_mm_fault(mm, flags, dar);
+-		if (rc) {
+-			pr_err("cxl_handle_mm_fault failed %d", rc);
+-			rc = -EFAULT;
+-			goto out;
+-		}
++		if (rc)
++			return -EFAULT;
+ 	}
+-	rc = 0;
+-out:
+-	up_read(&mm->mmap_sem);
+-	return rc;
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(cxllib_handle_fault);
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 2cfb963d9f37..9c6f639d8a57 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -3087,6 +3087,7 @@ static void __exit mmc_blk_exit(void)
+ 	mmc_unregister_driver(&mmc_driver);
+ 	unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
+ 	unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
++	bus_unregister(&mmc_rpmb_bus_type);
+ }
+ 
+ module_init(mmc_blk_init);
+diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
+index 712e08d9a45e..a0168e9e4fce 100644
+--- a/drivers/mmc/host/jz4740_mmc.c
++++ b/drivers/mmc/host/jz4740_mmc.c
+@@ -362,9 +362,9 @@ static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
+ 		host->irq_mask &= ~irq;
+ 	else
+ 		host->irq_mask |= irq;
+-	spin_unlock_irqrestore(&host->lock, flags);
+ 
+ 	writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
++	spin_unlock_irqrestore(&host->lock, flags);
+ }
+ 
+ static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index 82c4f05f91d8..0a0852524491 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -1318,7 +1318,7 @@ static void amd_enable_manual_tuning(struct pci_dev *pdev)
+ 	pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
+ }
+ 
+-static int amd_execute_tuning(struct sdhci_host *host, u32 opcode)
++static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
+ {
+ 	struct sdhci_pci_slot *slot = sdhci_priv(host);
+ 	struct pci_dev *pdev = slot->chip->pdev;
+@@ -1357,6 +1357,27 @@ static int amd_execute_tuning(struct sdhci_host *host, u32 opcode)
+ 	return 0;
+ }
+ 
++static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
++{
++	struct sdhci_host *host = mmc_priv(mmc);
++
++	/* AMD requires custom HS200 tuning */
++	if (host->timing == MMC_TIMING_MMC_HS200)
++		return amd_execute_tuning_hs200(host, opcode);
++
++	/* Otherwise perform standard SDHCI tuning */
++	return sdhci_execute_tuning(mmc, opcode);
++}
++
++static int amd_probe_slot(struct sdhci_pci_slot *slot)
++{
++	struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
++
++	ops->execute_tuning = amd_execute_tuning;
++
++	return 0;
++}
++
+ static int amd_probe(struct sdhci_pci_chip *chip)
+ {
+ 	struct pci_dev	*smbus_dev;
+@@ -1391,12 +1412,12 @@ static const struct sdhci_ops amd_sdhci_pci_ops = {
+ 	.set_bus_width			= sdhci_set_bus_width,
+ 	.reset				= sdhci_reset,
+ 	.set_uhs_signaling		= sdhci_set_uhs_signaling,
+-	.platform_execute_tuning	= amd_execute_tuning,
+ };
+ 
+ static const struct sdhci_pci_fixes sdhci_amd = {
+ 	.probe		= amd_probe,
+ 	.ops		= &amd_sdhci_pci_ops,
++	.probe_slot	= amd_probe_slot,
+ };
+ 
+ static const struct pci_device_id pci_ids[] = {
+diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
+index 33494241245a..8fce18253465 100644
+--- a/drivers/mmc/host/tmio_mmc_core.c
++++ b/drivers/mmc/host/tmio_mmc_core.c
+@@ -911,7 +911,7 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
+ 		host->check_scc_error(host);
+ 
+ 	/* If SET_BLOCK_COUNT, continue with main command */
+-	if (host->mrq) {
++	if (host->mrq && !mrq->cmd->error) {
+ 		tmio_process_mrq(host, mrq);
+ 		return;
+ 	}
+diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
+index b1fc28f63882..d0b63bbf46a7 100644
+--- a/drivers/mtd/ubi/block.c
++++ b/drivers/mtd/ubi/block.c
+@@ -244,7 +244,7 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
+ 	 * in any case.
+ 	 */
+ 	if (mode & FMODE_WRITE) {
+-		ret = -EPERM;
++		ret = -EROFS;
+ 		goto out_unlock;
+ 	}
+ 
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index e941395de3ae..753494e042d5 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -854,6 +854,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
++	 * MLC NAND is different and needs special care, otherwise UBI or UBIFS
++	 * will die soon and you will lose all your data.
++	 */
++	if (mtd->type == MTD_MLCNANDFLASH) {
++		pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
++			mtd->index);
++		return -EINVAL;
++	}
++
+ 	if (ubi_num == UBI_DEV_NUM_AUTO) {
+ 		/* Search for an empty slot in the @ubi_devices array */
+ 		for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
+diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
+index 590d967011bb..98f7d6be8d1f 100644
+--- a/drivers/mtd/ubi/fastmap-wl.c
++++ b/drivers/mtd/ubi/fastmap-wl.c
+@@ -362,7 +362,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
+ {
+ 	int i;
+ 
+-	flush_work(&ubi->fm_work);
+ 	return_unused_pool_pebs(ubi, &ubi->fm_pool);
+ 	return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+ 
+diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
+index f8913b8124b6..233907889f96 100644
+--- a/drivers/nvdimm/dimm.c
++++ b/drivers/nvdimm/dimm.c
+@@ -67,9 +67,11 @@ static int nvdimm_probe(struct device *dev)
+ 	ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
+ 	nd_label_copy(ndd, to_next_namespace_index(ndd),
+ 			to_current_namespace_index(ndd));
+-	rc = nd_label_reserve_dpa(ndd);
+-	if (ndd->ns_current >= 0)
+-		nvdimm_set_aliasing(dev);
++	if (ndd->ns_current >= 0) {
++		rc = nd_label_reserve_dpa(ndd);
++		if (rc == 0)
++			nvdimm_set_aliasing(dev);
++	}
+ 	nvdimm_clear_locked(dev);
+ 	nvdimm_bus_unlock(dev);
+ 
+diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
+index 097794d9f786..175e200939b0 100644
+--- a/drivers/nvdimm/dimm_devs.c
++++ b/drivers/nvdimm/dimm_devs.c
+@@ -88,9 +88,9 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
+ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
+ {
+ 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
++	int rc = validate_dimm(ndd), cmd_rc = 0;
+ 	struct nd_cmd_get_config_data_hdr *cmd;
+ 	struct nvdimm_bus_descriptor *nd_desc;
+-	int rc = validate_dimm(ndd);
+ 	u32 max_cmd_size, config_size;
+ 	size_t offset;
+ 
+@@ -124,9 +124,11 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
+ 		cmd->in_offset = offset;
+ 		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
+ 				ND_CMD_GET_CONFIG_DATA, cmd,
+-				cmd->in_length + sizeof(*cmd), NULL);
+-		if (rc || cmd->status) {
+-			rc = -ENXIO;
++				cmd->in_length + sizeof(*cmd), &cmd_rc);
++		if (rc < 0)
++			break;
++		if (cmd_rc < 0) {
++			rc = cmd_rc;
+ 			break;
+ 		}
+ 		memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
+@@ -140,9 +142,9 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
+ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
+ 		void *buf, size_t len)
+ {
+-	int rc = validate_dimm(ndd);
+ 	size_t max_cmd_size, buf_offset;
+ 	struct nd_cmd_set_config_hdr *cmd;
++	int rc = validate_dimm(ndd), cmd_rc = 0;
+ 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
+ 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+ 
+@@ -164,7 +166,6 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
+ 	for (buf_offset = 0; len; len -= cmd->in_length,
+ 			buf_offset += cmd->in_length) {
+ 		size_t cmd_size;
+-		u32 *status;
+ 
+ 		cmd->in_offset = offset + buf_offset;
+ 		cmd->in_length = min(max_cmd_size, len);
+@@ -172,12 +173,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
+ 
+ 		/* status is output in the last 4-bytes of the command buffer */
+ 		cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
+-		status = ((void *) cmd) + cmd_size - sizeof(u32);
+ 
+ 		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
+-				ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL);
+-		if (rc || *status) {
+-			rc = rc ? rc : -ENXIO;
++				ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
++		if (rc < 0)
++			break;
++		if (cmd_rc < 0) {
++			rc = cmd_rc;
+ 			break;
+ 		}
+ 	}
+diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
+index 658ada497be0..6747d899f46e 100644
+--- a/drivers/nvdimm/namespace_devs.c
++++ b/drivers/nvdimm/namespace_devs.c
+@@ -1926,7 +1926,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
+ 	}
+ 
+ 	if (i < nd_region->ndr_mappings) {
+-		struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]);
++		struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
+ 
+ 		/*
+ 		 * Give up if we don't find an instance of a uuid at each
+@@ -1934,7 +1934,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
+ 		 * find a dimm with two instances of the same uuid.
+ 		 */
+ 		dev_err(&nd_region->dev, "%s missing label for %pUb\n",
+-				dev_name(ndd->dev), nd_label->uuid);
++				nvdimm_name(nvdimm), nd_label->uuid);
+ 		rc = -EINVAL;
+ 		goto err;
+ 	}
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index e2198a2feeca..b45b375c0e6c 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -541,6 +541,7 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
+ {
+ 	unsigned long long sta = 0;
+ 	struct acpiphp_func *func;
++	u32 dvid;
+ 
+ 	list_for_each_entry(func, &slot->funcs, sibling) {
+ 		if (func->flags & FUNC_HAS_STA) {
+@@ -551,19 +552,27 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
+ 			if (ACPI_SUCCESS(status) && sta)
+ 				break;
+ 		} else {
+-			u32 dvid;
+-
+-			pci_bus_read_config_dword(slot->bus,
+-						  PCI_DEVFN(slot->device,
+-							    func->function),
+-						  PCI_VENDOR_ID, &dvid);
+-			if (dvid != 0xffffffff) {
++			if (pci_bus_read_dev_vendor_id(slot->bus,
++					PCI_DEVFN(slot->device, func->function),
++					&dvid, 0)) {
+ 				sta = ACPI_STA_ALL;
+ 				break;
+ 			}
+ 		}
+ 	}
+ 
++	if (!sta) {
++		/*
++		 * Check for the slot itself since it may be that the
++		 * ACPI slot is a device below PCIe upstream port so in
++		 * that case it may not even be reachable yet.
++		 */
++		if (pci_bus_read_dev_vendor_id(slot->bus,
++				PCI_DEVFN(slot->device, 0), &dvid, 0)) {
++			sta = ACPI_STA_ALL;
++		}
++	}
++
+ 	return (unsigned int)sta;
+ }
+ 
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 46d47bd6ca1f..81241f981ad7 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4815,9 +4815,13 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
+ 
+ 	pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
+ }
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
+ 
+ #ifdef CONFIG_PCI_ATS
+ /*
+diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
+index aa857be692cf..d5ae307ef4e1 100644
+--- a/drivers/phy/allwinner/phy-sun4i-usb.c
++++ b/drivers/phy/allwinner/phy-sun4i-usb.c
+@@ -410,11 +410,13 @@ static bool sun4i_usb_phy0_poll(struct sun4i_usb_phy_data *data)
+ 		return true;
+ 
+ 	/*
+-	 * The A31 companion pmic (axp221) does not generate vbus change
+-	 * interrupts when the board is driving vbus, so we must poll
++	 * The A31/A23/A33 companion pmics (AXP221/AXP223) do not
++	 * generate vbus change interrupts when the board is driving
++	 * vbus using the N_VBUSEN pin on the pmic, so we must poll
+ 	 * when using the pmic for vbus-det _and_ we're driving vbus.
+ 	 */
+-	if (data->cfg->type == sun6i_a31_phy &&
++	if ((data->cfg->type == sun6i_a31_phy ||
++	     data->cfg->type == sun8i_a33_phy) &&
+ 	    data->vbus_power_supply && data->phys[0].regulator_on)
+ 		return true;
+ 
+@@ -885,7 +887,7 @@ static const struct sun4i_usb_phy_cfg sun7i_a20_cfg = {
+ 
+ static const struct sun4i_usb_phy_cfg sun8i_a23_cfg = {
+ 	.num_phys = 2,
+-	.type = sun4i_a10_phy,
++	.type = sun6i_a31_phy,
+ 	.disc_thresh = 3,
+ 	.phyctl_offset = REG_PHYCTL_A10,
+ 	.dedicated_clocks = true,
+diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
+index f5d97e0ad52b..98b0a933a946 100644
+--- a/drivers/pwm/pwm-mediatek.c
++++ b/drivers/pwm/pwm-mediatek.c
+@@ -29,7 +29,9 @@
+ #define PWMGDUR			0x0c
+ #define PWMWAVENUM		0x28
+ #define PWMDWIDTH		0x2c
++#define PWM45DWIDTH_FIXUP	0x30
+ #define PWMTHRES		0x30
++#define PWM45THRES_FIXUP	0x34
+ 
+ #define PWM_CLK_DIV_MAX		7
+ 
+@@ -54,6 +56,7 @@ static const char * const mtk_pwm_clk_name[MTK_CLK_MAX] = {
+ 
+ struct mtk_pwm_platform_data {
+ 	unsigned int num_pwms;
++	bool pwm45_fixup;
+ };
+ 
+ /**
+@@ -66,6 +69,7 @@ struct mtk_pwm_chip {
+ 	struct pwm_chip chip;
+ 	void __iomem *regs;
+ 	struct clk *clks[MTK_CLK_MAX];
++	const struct mtk_pwm_platform_data *soc;
+ };
+ 
+ static const unsigned int mtk_pwm_reg_offset[] = {
+@@ -131,18 +135,25 @@ static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ {
+ 	struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
+ 	struct clk *clk = pc->clks[MTK_CLK_PWM1 + pwm->hwpwm];
+-	u32 resolution, clkdiv = 0;
++	u32 clkdiv = 0, cnt_period, cnt_duty, reg_width = PWMDWIDTH,
++	    reg_thres = PWMTHRES;
++	u64 resolution;
+ 	int ret;
+ 
+ 	ret = mtk_pwm_clk_enable(chip, pwm);
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	resolution = NSEC_PER_SEC / clk_get_rate(clk);
++	/* Using resolution in picosecond gets accuracy higher */
++	resolution = (u64)NSEC_PER_SEC * 1000;
++	do_div(resolution, clk_get_rate(clk));
+ 
+-	while (period_ns / resolution > 8191) {
++	cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution);
++	while (cnt_period > 8191) {
+ 		resolution *= 2;
+ 		clkdiv++;
++		cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000,
++						   resolution);
+ 	}
+ 
+ 	if (clkdiv > PWM_CLK_DIV_MAX) {
+@@ -151,9 +162,19 @@ static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 		return -EINVAL;
+ 	}
+ 
++	if (pc->soc->pwm45_fixup && pwm->hwpwm > 2) {
++		/*
++		 * PWM[4,5] has distinct offset for PWMDWIDTH and PWMTHRES
++		 * from the other PWMs on MT7623.
++		 */
++		reg_width = PWM45DWIDTH_FIXUP;
++		reg_thres = PWM45THRES_FIXUP;
++	}
++
++	cnt_duty = DIV_ROUND_CLOSEST_ULL((u64)duty_ns * 1000, resolution);
+ 	mtk_pwm_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | clkdiv);
+-	mtk_pwm_writel(pc, pwm->hwpwm, PWMDWIDTH, period_ns / resolution);
+-	mtk_pwm_writel(pc, pwm->hwpwm, PWMTHRES, duty_ns / resolution);
++	mtk_pwm_writel(pc, pwm->hwpwm, reg_width, cnt_period);
++	mtk_pwm_writel(pc, pwm->hwpwm, reg_thres, cnt_duty);
+ 
+ 	mtk_pwm_clk_disable(chip, pwm);
+ 
+@@ -211,6 +232,7 @@ static int mtk_pwm_probe(struct platform_device *pdev)
+ 	data = of_device_get_match_data(&pdev->dev);
+ 	if (data == NULL)
+ 		return -EINVAL;
++	pc->soc = data;
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	pc->regs = devm_ioremap_resource(&pdev->dev, res);
+@@ -251,14 +273,17 @@ static int mtk_pwm_remove(struct platform_device *pdev)
+ 
+ static const struct mtk_pwm_platform_data mt2712_pwm_data = {
+ 	.num_pwms = 8,
++	.pwm45_fixup = false,
+ };
+ 
+ static const struct mtk_pwm_platform_data mt7622_pwm_data = {
+ 	.num_pwms = 6,
++	.pwm45_fixup = false,
+ };
+ 
+ static const struct mtk_pwm_platform_data mt7623_pwm_data = {
+ 	.num_pwms = 5,
++	.pwm45_fixup = true,
+ };
+ 
+ static const struct of_device_id mtk_pwm_of_match[] = {
+diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
+index 1c85ecc9e7ac..0fcf94ffad32 100644
+--- a/drivers/pwm/pwm-rcar.c
++++ b/drivers/pwm/pwm-rcar.c
+@@ -156,8 +156,12 @@ static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	if (div < 0)
+ 		return div;
+ 
+-	/* Let the core driver set pwm->period if disabled and duty_ns == 0 */
+-	if (!pwm_is_enabled(pwm) && !duty_ns)
++	/*
++	 * Let the core driver set pwm->period if disabled and duty_ns == 0.
++	 * But, this driver should prevent to set the new duty_ns if current
++	 * duty_cycle is not set
++	 */
++	if (!pwm_is_enabled(pwm) && !duty_ns && !pwm->state.duty_cycle)
+ 		return 0;
+ 
+ 	rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR);
+diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
+index 435ce5ec648a..59bd749c2f25 100644
+--- a/drivers/soc/mediatek/mtk-scpsys.c
++++ b/drivers/soc/mediatek/mtk-scpsys.c
+@@ -992,7 +992,7 @@ static int scpsys_probe(struct platform_device *pdev)
+ 
+ 	pd_data = &scp->pd_data;
+ 
+-	for (i = 0, sd = soc->subdomains ; i < soc->num_subdomains ; i++) {
++	for (i = 0, sd = soc->subdomains; i < soc->num_subdomains; i++, sd++) {
+ 		ret = pm_genpd_add_subdomain(pd_data->domains[sd->origin],
+ 					     pd_data->domains[sd->subdomain]);
+ 		if (ret && IS_ENABLED(CONFIG_PM))
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index 4a11fc0d4136..b7936f815373 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -1512,6 +1512,11 @@ static void atmel_spi_init(struct atmel_spi *as)
+ {
+ 	spi_writel(as, CR, SPI_BIT(SWRST));
+ 	spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
++
++	/* It is recommended to enable FIFOs first thing after reset */
++	if (as->fifo_size)
++		spi_writel(as, CR, SPI_BIT(FIFOEN));
++
+ 	if (as->caps.has_wdrbt) {
+ 		spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS)
+ 				| SPI_BIT(MSTR));
+@@ -1522,9 +1527,6 @@ static void atmel_spi_init(struct atmel_spi *as)
+ 	if (as->use_pdc)
+ 		spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
+ 	spi_writel(as, CR, SPI_BIT(SPIEN));
+-
+-	if (as->fifo_size)
+-		spi_writel(as, CR, SPI_BIT(FIFOEN));
+ }
+ 
+ static int atmel_spi_probe(struct platform_device *pdev)
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index b33a727a0158..7b213faa0a2b 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -779,8 +779,14 @@ static int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
+ 	for (i = 0; i < sgs; i++) {
+ 
+ 		if (vmalloced_buf || kmap_buf) {
+-			min = min_t(size_t,
+-				    len, desc_len - offset_in_page(buf));
++			/*
++			 * Next scatterlist entry size is the minimum between
++			 * the desc_len and the remaining buffer length that
++			 * fits in a page.
++			 */
++			min = min_t(size_t, desc_len,
++				    min_t(size_t, len,
++					  PAGE_SIZE - offset_in_page(buf)));
+ 			if (vmalloced_buf)
+ 				vm_page = vmalloc_to_page(buf);
+ 			else
+@@ -2254,12 +2260,6 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ 	mutex_lock(&board_lock);
+ 	found = idr_find(&spi_master_idr, id);
+ 	mutex_unlock(&board_lock);
+-	if (found != ctlr) {
+-		dev_dbg(&ctlr->dev,
+-			"attempting to delete unregistered controller [%s]\n",
+-			dev_name(&ctlr->dev));
+-		return;
+-	}
+ 	if (ctlr->queued) {
+ 		if (spi_destroy_queue(ctlr))
+ 			dev_err(&ctlr->dev, "queue remove failed\n");
+@@ -2272,7 +2272,8 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ 	device_unregister(&ctlr->dev);
+ 	/* free bus id */
+ 	mutex_lock(&board_lock);
+-	idr_remove(&spi_master_idr, id);
++	if (found == ctlr)
++		idr_remove(&spi_master_idr, id);
+ 	mutex_unlock(&board_lock);
+ }
+ EXPORT_SYMBOL_GPL(spi_unregister_controller);
+diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
+index 4f9f9dca5e6a..545ef024841d 100644
+--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
++++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
+@@ -1279,7 +1279,10 @@ const struct v4l2_file_operations atomisp_fops = {
+ 	.mmap = atomisp_mmap,
+ 	.unlocked_ioctl = video_ioctl2,
+ #ifdef CONFIG_COMPAT
++	/*
++	 * There are problems with this code. Disable this for now.
+ 	.compat_ioctl32 = atomisp_compat_ioctl32,
++	 */
+ #endif
+ 	.poll = atomisp_poll,
+ };
+@@ -1291,7 +1294,10 @@ const struct v4l2_file_operations atomisp_file_fops = {
+ 	.mmap = atomisp_file_mmap,
+ 	.unlocked_ioctl = video_ioctl2,
+ #ifdef CONFIG_COMPAT
++	/*
++	 * There are problems with this code. Disable this for now.
+ 	.compat_ioctl32 = atomisp_compat_ioctl32,
++	 */
+ #endif
+ 	.poll = atomisp_poll,
+ };
+diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
+index a67781b7a0b2..ee3a215b333a 100644
+--- a/drivers/thermal/imx_thermal.c
++++ b/drivers/thermal/imx_thermal.c
+@@ -637,6 +637,9 @@ static int imx_thermal_probe(struct platform_device *pdev)
+ 	regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
+ 	regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
+ 
++	data->irq_enabled = true;
++	data->mode = THERMAL_DEVICE_ENABLED;
++
+ 	ret = devm_request_threaded_irq(&pdev->dev, data->irq,
+ 			imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
+ 			0, "imx_thermal", data);
+@@ -649,9 +652,6 @@ static int imx_thermal_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	data->irq_enabled = true;
+-	data->mode = THERMAL_DEVICE_ENABLED;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
+index ab02d13f40b7..3e12cb8a23cc 100644
+--- a/drivers/thunderbolt/icm.c
++++ b/drivers/thunderbolt/icm.c
+@@ -383,6 +383,15 @@ static void remove_switch(struct tb_switch *sw)
+ 	tb_switch_remove(sw);
+ }
+ 
++static void remove_xdomain(struct tb_xdomain *xd)
++{
++	struct tb_switch *sw;
++
++	sw = tb_to_switch(xd->dev.parent);
++	tb_port_at(xd->route, sw)->xdomain = NULL;
++	tb_xdomain_remove(xd);
++}
++
+ static void
+ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+ {
+@@ -391,6 +400,7 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+ 	struct tb_switch *sw, *parent_sw;
+ 	struct icm *icm = tb_priv(tb);
+ 	bool authorized = false;
++	struct tb_xdomain *xd;
+ 	u8 link, depth;
+ 	u64 route;
+ 	int ret;
+@@ -467,6 +477,13 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+ 		tb_switch_put(sw);
+ 	}
+ 
++	/* Remove existing XDomain connection if found */
++	xd = tb_xdomain_find_by_link_depth(tb, link, depth);
++	if (xd) {
++		remove_xdomain(xd);
++		tb_xdomain_put(xd);
++	}
++
+ 	parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
+ 	if (!parent_sw) {
+ 		tb_err(tb, "failed to find parent switch for %u.%u\n",
+@@ -529,15 +546,6 @@ icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+ 	tb_switch_put(sw);
+ }
+ 
+-static void remove_xdomain(struct tb_xdomain *xd)
+-{
+-	struct tb_switch *sw;
+-
+-	sw = tb_to_switch(xd->dev.parent);
+-	tb_port_at(xd->route, sw)->xdomain = NULL;
+-	tb_xdomain_remove(xd);
+-}
+-
+ static void
+ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+ {
+@@ -728,14 +736,14 @@ static bool icm_ar_is_supported(struct tb *tb)
+ static int icm_ar_get_mode(struct tb *tb)
+ {
+ 	struct tb_nhi *nhi = tb->nhi;
+-	int retries = 5;
++	int retries = 60;
+ 	u32 val;
+ 
+ 	do {
+ 		val = ioread32(nhi->iobase + REG_FW_STS);
+ 		if (val & REG_FW_STS_NVM_AUTH_DONE)
+ 			break;
+-		msleep(30);
++		msleep(50);
+ 	} while (--retries);
+ 
+ 	if (!retries) {
+@@ -915,6 +923,9 @@ static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
+ 	struct icm *icm = tb_priv(tb);
+ 	u32 val;
+ 
++	if (!icm->upstream_port)
++		return -ENODEV;
++
+ 	/* Put ARC to wait for CIO reset event to happen */
+ 	val = ioread32(nhi->iobase + REG_FW_STS);
+ 	val |= REG_FW_STS_CIO_RESET_REQ;
+@@ -1054,6 +1065,9 @@ static int icm_firmware_init(struct tb *tb)
+ 			break;
+ 
+ 		default:
++			if (ret < 0)
++				return ret;
++
+ 			tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
+ 			return -ENODEV;
+ 		}
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index f45bcbc63738..80c33c7404f5 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -1064,6 +1064,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
+ 					    * we just disable hotplug, the
+ 					    * pci-tunnels stay alive.
+ 					    */
++	.thaw_noirq = nhi_resume_noirq,
+ 	.restore_noirq = nhi_resume_noirq,
+ 	.suspend = nhi_suspend,
+ 	.freeze = nhi_suspend,
+diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
+index da54ace4dd2f..1cc79785ce42 100644
+--- a/drivers/thunderbolt/switch.c
++++ b/drivers/thunderbolt/switch.c
+@@ -716,6 +716,13 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
+ 	if (sw->authorized)
+ 		goto unlock;
+ 
++	/*
++	 * Make sure there is no PCIe rescan ongoing when a new PCIe
++	 * tunnel is created. Otherwise the PCIe rescan code might find
++	 * the new tunnel too early.
++	 */
++	pci_lock_rescan_remove();
++
+ 	switch (val) {
+ 	/* Approve switch */
+ 	case 1:
+@@ -735,6 +742,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
+ 		break;
+ 	}
+ 
++	pci_unlock_rescan_remove();
++
+ 	if (!ret) {
+ 		sw->authorized = val;
+ 		/* Notify status change to the userspace */
+diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
+index 83c14dda6300..bc8242bc4564 100644
+--- a/drivers/usb/core/generic.c
++++ b/drivers/usb/core/generic.c
+@@ -210,8 +210,13 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
+ 	if (!udev->parent)
+ 		rc = hcd_bus_suspend(udev, msg);
+ 
+-	/* Non-root devices don't need to do anything for FREEZE or PRETHAW */
+-	else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
++	/*
++	 * Non-root USB2 devices don't need to do anything for FREEZE
++	 * or PRETHAW. USB3 devices don't support global suspend and
++	 * needs to be selectively suspended.
++	 */
++	else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
++		 && (udev->speed < USB_SPEED_SUPER))
+ 		rc = 0;
+ 	else
+ 		rc = usb_port_suspend(udev, msg);
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index e94bf91cc58a..df4569df7eaf 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -119,6 +119,9 @@ static void __dwc3_set_mode(struct work_struct *work)
+ 	if (dwc->dr_mode != USB_DR_MODE_OTG)
+ 		return;
+ 
++	if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG)
++		return;
++
+ 	switch (dwc->current_dr_role) {
+ 	case DWC3_GCTL_PRTCAP_HOST:
+ 		dwc3_host_exit(dwc);
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 3ba11136ebf0..c961a94d136b 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -222,7 +222,7 @@ static int dwc3_pci_probe(struct pci_dev *pci,
+ 	ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res));
+ 	if (ret) {
+ 		dev_err(dev, "couldn't add resources to dwc3 device\n");
+-		return ret;
++		goto err;
+ 	}
+ 
+ 	dwc->pci = pci;
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 2bda4eb1e9ac..100454c514d5 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -166,18 +166,8 @@ static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
+ 	dwc3_ep_inc_trb(&dep->trb_dequeue);
+ }
+ 
+-/**
+- * dwc3_gadget_giveback - call struct usb_request's ->complete callback
+- * @dep: The endpoint to whom the request belongs to
+- * @req: The request we're giving back
+- * @status: completion code for the request
+- *
+- * Must be called with controller's lock held and interrupts disabled. This
+- * function will unmap @req and call its ->complete() callback to notify upper
+- * layers that it has completed.
+- */
+-void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+-		int status)
++void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
++		struct dwc3_request *req, int status)
+ {
+ 	struct dwc3			*dwc = dep->dwc;
+ 
+@@ -190,18 +180,35 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ 
+ 	if (req->trb)
+ 		usb_gadget_unmap_request_by_dev(dwc->sysdev,
+-						&req->request, req->direction);
++				&req->request, req->direction);
+ 
+ 	req->trb = NULL;
+-
+ 	trace_dwc3_gadget_giveback(req);
+ 
++	if (dep->number > 1)
++		pm_runtime_put(dwc->dev);
++}
++
++/**
++ * dwc3_gadget_giveback - call struct usb_request's ->complete callback
++ * @dep: The endpoint to whom the request belongs to
++ * @req: The request we're giving back
++ * @status: completion code for the request
++ *
++ * Must be called with controller's lock held and interrupts disabled. This
++ * function will unmap @req and call its ->complete() callback to notify upper
++ * layers that it has completed.
++ */
++void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
++		int status)
++{
++	struct dwc3			*dwc = dep->dwc;
++
++	dwc3_gadget_del_and_unmap_request(dep, req, status);
++
+ 	spin_unlock(&dwc->lock);
+ 	usb_gadget_giveback_request(&dep->endpoint, &req->request);
+ 	spin_lock(&dwc->lock);
+-
+-	if (dep->number > 1)
+-		pm_runtime_put(dwc->dev);
+ }
+ 
+ /**
+@@ -1227,7 +1234,7 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
+ 		if (req->trb)
+ 			memset(req->trb, 0, sizeof(struct dwc3_trb));
+ 		dep->queued_requests--;
+-		dwc3_gadget_giveback(dep, req, ret);
++		dwc3_gadget_del_and_unmap_request(dep, req, ret);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
+index 4eb96b91cc40..e8f35db42394 100644
+--- a/drivers/usb/gadget/function/f_midi.c
++++ b/drivers/usb/gadget/function/f_midi.c
+@@ -404,7 +404,8 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ 		if (err) {
+ 			ERROR(midi, "%s: couldn't enqueue request: %d\n",
+ 				    midi->out_ep->name, err);
+-			free_ep_req(midi->out_ep, req);
++			if (req->buf != NULL)
++				free_ep_req(midi->out_ep, req);
+ 			return err;
+ 		}
+ 	}
+diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
+index c3fbef2bb5db..09f90447fed5 100644
+--- a/drivers/usb/gadget/u_f.h
++++ b/drivers/usb/gadget/u_f.h
+@@ -61,7 +61,9 @@ struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len);
+ /* Frees a usb_request previously allocated by alloc_ep_req() */
+ static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
+ {
++	WARN_ON(req->buf == NULL);
+ 	kfree(req->buf);
++	req->buf = NULL;
+ 	usb_ep_free_request(ep, req);
+ }
+ 
+diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
+index 1f8b19d9cf97..6a266687ca99 100644
+--- a/drivers/usb/gadget/udc/core.c
++++ b/drivers/usb/gadget/udc/core.c
+@@ -238,6 +238,9 @@ EXPORT_SYMBOL_GPL(usb_ep_free_request);
+  * arranges to poll once per interval, and the gadget driver usually will
+  * have queued some data to transfer at that time.
+  *
++ * Note that @req's ->complete() callback must never be called from
++ * within usb_ep_queue() as that can create deadlock situations.
++ *
+  * Returns zero, or a negative error code.  Endpoints that are not enabled
+  * report errors; errors will also be
+  * reported when the usb peripheral is disconnected.
+diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
+index 18da4873e52e..91a5027b5c1f 100644
+--- a/drivers/usb/musb/musb_gadget_ep0.c
++++ b/drivers/usb/musb/musb_gadget_ep0.c
+@@ -89,15 +89,19 @@ static int service_tx_status_request(
+ 		}
+ 
+ 		is_in = epnum & USB_DIR_IN;
+-		if (is_in) {
+-			epnum &= 0x0f;
++		epnum &= 0x0f;
++		if (epnum >= MUSB_C_NUM_EPS) {
++			handled = -EINVAL;
++			break;
++		}
++
++		if (is_in)
+ 			ep = &musb->endpoints[epnum].ep_in;
+-		} else {
++		else
+ 			ep = &musb->endpoints[epnum].ep_out;
+-		}
+ 		regs = musb->endpoints[epnum].regs;
+ 
+-		if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
++		if (!ep->desc) {
+ 			handled = -EINVAL;
+ 			break;
+ 		}
+diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
+index 3a33c5344bd5..9a1c761258ce 100644
+--- a/drivers/watchdog/f71808e_wdt.c
++++ b/drivers/watchdog/f71808e_wdt.c
+@@ -496,7 +496,7 @@ static bool watchdog_is_running(void)
+ 
+ 	is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
+ 		&& (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
+-			& F71808FG_FLAG_WD_EN);
++			& BIT(F71808FG_FLAG_WD_EN));
+ 
+ 	superio_exit(watchdog.sioaddr);
+ 
+diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
+index 82e8f6edfb48..b12e37f27530 100644
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -749,7 +749,7 @@ static int autofs4_dir_mkdir(struct inode *dir,
+ 
+ 	autofs4_del_active(dentry);
+ 
+-	inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
++	inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
+ 	if (!inode)
+ 		return -ENOMEM;
+ 	d_add(dentry, inode);
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index c6ec5aa46100..236313efd347 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -660,13 +660,15 @@ void ceph_fill_file_time(struct inode *inode, int issued,
+ 		      CEPH_CAP_FILE_BUFFER|
+ 		      CEPH_CAP_AUTH_EXCL|
+ 		      CEPH_CAP_XATTR_EXCL)) {
+-		if (timespec_compare(ctime, &inode->i_ctime) > 0) {
++		if (ci->i_version == 0 ||
++		    timespec_compare(ctime, &inode->i_ctime) > 0) {
+ 			dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
+ 			     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
+ 			     ctime->tv_sec, ctime->tv_nsec);
+ 			inode->i_ctime = *ctime;
+ 		}
+-		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
++		if (ci->i_version == 0 ||
++		    ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
+ 			/* the MDS did a utimes() */
+ 			dout("mtime %ld.%09ld -> %ld.%09ld "
+ 			     "tw %d -> %d\n",
+@@ -786,7 +788,6 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
+ 	new_issued = ~issued & le32_to_cpu(info->cap.caps);
+ 
+ 	/* update inode */
+-	ci->i_version = le64_to_cpu(info->version);
+ 	inode->i_rdev = le32_to_cpu(info->rdev);
+ 	inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+ 
+@@ -857,6 +858,9 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
+ 		xattr_blob = NULL;
+ 	}
+ 
++	/* finally update i_version */
++	ci->i_version = le64_to_cpu(info->version);
++
+ 	inode->i_mapping->a_ops = &ceph_aops;
+ 
+ 	switch (inode->i_mode & S_IFMT) {
+diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
+index 687da62daf4e..e901ef6a4813 100644
+--- a/fs/cifs/Kconfig
++++ b/fs/cifs/Kconfig
+@@ -189,6 +189,7 @@ config CIFS_NFSD_EXPORT
+ config CIFS_SMB311
+ 	bool "SMB3.1.1 network file system support (Experimental)"
+ 	depends on CIFS
++	select CRYPTO_SHA512
+ 
+ 	help
+ 	  This enables experimental support for the newest, SMB3.1.1, dialect.
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index f2b0a7f124da..a6ef088e057b 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -36,37 +36,6 @@
+ #include <crypto/skcipher.h>
+ #include <crypto/aead.h>
+ 
+-static int
+-cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
+-{
+-	int rc;
+-	unsigned int size;
+-
+-	if (server->secmech.sdescmd5 != NULL)
+-		return 0; /* already allocated */
+-
+-	server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
+-	if (IS_ERR(server->secmech.md5)) {
+-		cifs_dbg(VFS, "could not allocate crypto md5\n");
+-		rc = PTR_ERR(server->secmech.md5);
+-		server->secmech.md5 = NULL;
+-		return rc;
+-	}
+-
+-	size = sizeof(struct shash_desc) +
+-			crypto_shash_descsize(server->secmech.md5);
+-	server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL);
+-	if (!server->secmech.sdescmd5) {
+-		crypto_free_shash(server->secmech.md5);
+-		server->secmech.md5 = NULL;
+-		return -ENOMEM;
+-	}
+-	server->secmech.sdescmd5->shash.tfm = server->secmech.md5;
+-	server->secmech.sdescmd5->shash.flags = 0x0;
+-
+-	return 0;
+-}
+-
+ int __cifs_calc_signature(struct smb_rqst *rqst,
+ 			struct TCP_Server_Info *server, char *signature,
+ 			struct shash_desc *shash)
+@@ -132,13 +101,10 @@ static int cifs_calc_signature(struct smb_rqst *rqst,
+ 	if (!rqst->rq_iov || !signature || !server)
+ 		return -EINVAL;
+ 
+-	if (!server->secmech.sdescmd5) {
+-		rc = cifs_crypto_shash_md5_allocate(server);
+-		if (rc) {
+-			cifs_dbg(VFS, "%s: Can't alloc md5 crypto\n", __func__);
+-			return -1;
+-		}
+-	}
++	rc = cifs_alloc_hash("md5", &server->secmech.md5,
++			     &server->secmech.sdescmd5);
++	if (rc)
++		return -1;
+ 
+ 	rc = crypto_shash_init(&server->secmech.sdescmd5->shash);
+ 	if (rc) {
+@@ -663,37 +629,6 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
+ 	return rc;
+ }
+ 
+-static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
+-{
+-	int rc;
+-	unsigned int size;
+-
+-	/* check if already allocated */
+-	if (server->secmech.sdeschmacmd5)
+-		return 0;
+-
+-	server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
+-	if (IS_ERR(server->secmech.hmacmd5)) {
+-		cifs_dbg(VFS, "could not allocate crypto hmacmd5\n");
+-		rc = PTR_ERR(server->secmech.hmacmd5);
+-		server->secmech.hmacmd5 = NULL;
+-		return rc;
+-	}
+-
+-	size = sizeof(struct shash_desc) +
+-			crypto_shash_descsize(server->secmech.hmacmd5);
+-	server->secmech.sdeschmacmd5 = kmalloc(size, GFP_KERNEL);
+-	if (!server->secmech.sdeschmacmd5) {
+-		crypto_free_shash(server->secmech.hmacmd5);
+-		server->secmech.hmacmd5 = NULL;
+-		return -ENOMEM;
+-	}
+-	server->secmech.sdeschmacmd5->shash.tfm = server->secmech.hmacmd5;
+-	server->secmech.sdeschmacmd5->shash.flags = 0x0;
+-
+-	return 0;
+-}
+-
+ int
+ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ {
+@@ -757,9 +692,10 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 
+ 	mutex_lock(&ses->server->srv_mutex);
+ 
+-	rc = crypto_hmacmd5_alloc(ses->server);
++	rc = cifs_alloc_hash("hmac(md5)",
++			     &ses->server->secmech.hmacmd5,
++			     &ses->server->secmech.sdeschmacmd5);
+ 	if (rc) {
+-		cifs_dbg(VFS, "could not crypto alloc hmacmd5 rc %d\n", rc);
+ 		goto unlock;
+ 	}
+ 
+@@ -893,6 +829,11 @@ cifs_crypto_secmech_release(struct TCP_Server_Info *server)
+ 		server->secmech.md5 = NULL;
+ 	}
+ 
++	if (server->secmech.sha512) {
++		crypto_free_shash(server->secmech.sha512);
++		server->secmech.sha512 = NULL;
++	}
++
+ 	if (server->secmech.hmacmd5) {
+ 		crypto_free_shash(server->secmech.hmacmd5);
+ 		server->secmech.hmacmd5 = NULL;
+@@ -916,4 +857,6 @@ cifs_crypto_secmech_release(struct TCP_Server_Info *server)
+ 	server->secmech.sdeschmacmd5 = NULL;
+ 	kfree(server->secmech.sdescmd5);
+ 	server->secmech.sdescmd5 = NULL;
++	kfree(server->secmech.sdescsha512);
++	server->secmech.sdescsha512 = NULL;
+ }
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 32cdea67bbfd..f715609b13f3 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -1486,6 +1486,7 @@ MODULE_SOFTDEP("pre: nls");
+ MODULE_SOFTDEP("pre: aes");
+ MODULE_SOFTDEP("pre: cmac");
+ MODULE_SOFTDEP("pre: sha256");
++MODULE_SOFTDEP("pre: sha512");
+ MODULE_SOFTDEP("pre: aead2");
+ MODULE_SOFTDEP("pre: ccm");
+ module_init(init_cifs)
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 48f7c197cd2d..edc640db0842 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -130,10 +130,12 @@ struct cifs_secmech {
+ 	struct crypto_shash *md5; /* md5 hash function */
+ 	struct crypto_shash *hmacsha256; /* hmac-sha256 hash function */
+ 	struct crypto_shash *cmacaes; /* block-cipher based MAC function */
++	struct crypto_shash *sha512; /* sha512 hash function */
+ 	struct sdesc *sdeschmacmd5;  /* ctxt to generate ntlmv2 hash, CR1 */
+ 	struct sdesc *sdescmd5; /* ctxt to generate cifs/smb signature */
+ 	struct sdesc *sdeschmacsha256;  /* ctxt to generate smb2 signature */
+ 	struct sdesc *sdesccmacaes;  /* ctxt to generate smb3 signature */
++	struct sdesc *sdescsha512; /* ctxt to generate smb3.11 signing key */
+ 	struct crypto_aead *ccmaesencrypt; /* smb3 encryption aead */
+ 	struct crypto_aead *ccmaesdecrypt; /* smb3 decryption aead */
+ };
+@@ -673,7 +675,8 @@ struct TCP_Server_Info {
+ 	unsigned int	max_read;
+ 	unsigned int	max_write;
+ #ifdef CONFIG_CIFS_SMB311
+-	__u8	preauth_sha_hash[64]; /* save initital negprot hash */
++	 /* save initital negprot hash */
++	__u8	preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
+ #endif /* 3.1.1 */
+ 	struct delayed_work reconnect; /* reconnect workqueue job */
+ 	struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
+@@ -862,7 +865,7 @@ struct cifs_ses {
+ 	__u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
+ 	__u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
+ #ifdef CONFIG_CIFS_SMB311
+-	__u8 preauth_sha_hash[64];
++	__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
+ #endif /* 3.1.1 */
+ };
+ 
+@@ -1466,6 +1469,7 @@ struct dfs_info3_param {
+ #define CIFS_FATTR_NEED_REVAL		0x4
+ #define CIFS_FATTR_INO_COLLISION	0x8
+ #define CIFS_FATTR_UNKNOWN_NLINK	0x10
++#define CIFS_FATTR_FAKE_ROOT_INO	0x20
+ 
+ struct cifs_fattr {
+ 	u32		cf_flags;
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index 93d565186698..365a414a75e9 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -542,4 +542,9 @@ enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
+ struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
+ void cifs_aio_ctx_release(struct kref *refcount);
+ int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
++
++int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
++		    struct sdesc **sdesc);
++void cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc);
++
+ #endif			/* _CIFSPROTO_H */
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 8f9a8cc7cc62..ef8580139cef 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -707,6 +707,18 @@ cifs_get_file_info(struct file *filp)
+ 	return rc;
+ }
+ 
++/* Simple function to return a 64 bit hash of string.  Rarely called */
++static __u64 simple_hashstr(const char *str)
++{
++	const __u64 hash_mult =  1125899906842597L; /* a big enough prime */
++	__u64 hash = 0;
++
++	while (*str)
++		hash = (hash + (__u64) *str++) * hash_mult;
++
++	return hash;
++}
++
+ int
+ cifs_get_inode_info(struct inode **inode, const char *full_path,
+ 		    FILE_ALL_INFO *data, struct super_block *sb, int xid,
+@@ -816,6 +828,14 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
+ 						 tmprc);
+ 					fattr.cf_uniqueid = iunique(sb, ROOT_I);
+ 					cifs_autodisable_serverino(cifs_sb);
++				} else if ((fattr.cf_uniqueid == 0) &&
++						strlen(full_path) == 0) {
++					/* some servers ret bad root ino ie 0 */
++					cifs_dbg(FYI, "Invalid (0) inodenum\n");
++					fattr.cf_flags |=
++						CIFS_FATTR_FAKE_ROOT_INO;
++					fattr.cf_uniqueid =
++						simple_hashstr(tcon->treeName);
+ 				}
+ 			}
+ 		} else
+@@ -832,6 +852,16 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
+ 				&fattr.cf_uniqueid, data);
+ 			if (tmprc)
+ 				fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
++			else if ((fattr.cf_uniqueid == 0) &&
++					strlen(full_path) == 0) {
++				/*
++				 * Reuse existing root inode num since
++				 * inum zero for root causes ls of . and .. to
++				 * not be returned
++				 */
++				cifs_dbg(FYI, "Srv ret 0 inode num for root\n");
++				fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
++			}
+ 		} else
+ 			fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
+ 	}
+@@ -893,6 +923,9 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
+ 	}
+ 
+ cgii_exit:
++	if ((*inode) && ((*inode)->i_ino == 0))
++		cifs_dbg(FYI, "inode number of zero returned\n");
++
+ 	kfree(buf);
+ 	cifs_put_tlink(tlink);
+ 	return rc;
+diff --git a/fs/cifs/link.c b/fs/cifs/link.c
+index 60b5a11ee11b..889a840172eb 100644
+--- a/fs/cifs/link.c
++++ b/fs/cifs/link.c
+@@ -50,25 +50,12 @@ static int
+ symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
+ {
+ 	int rc;
+-	unsigned int size;
+-	struct crypto_shash *md5;
+-	struct sdesc *sdescmd5;
+-
+-	md5 = crypto_alloc_shash("md5", 0, 0);
+-	if (IS_ERR(md5)) {
+-		rc = PTR_ERR(md5);
+-		cifs_dbg(VFS, "%s: Crypto md5 allocation error %d\n",
+-			 __func__, rc);
+-		return rc;
+-	}
+-	size = sizeof(struct shash_desc) + crypto_shash_descsize(md5);
+-	sdescmd5 = kmalloc(size, GFP_KERNEL);
+-	if (!sdescmd5) {
+-		rc = -ENOMEM;
++	struct crypto_shash *md5 = NULL;
++	struct sdesc *sdescmd5 = NULL;
++
++	rc = cifs_alloc_hash("md5", &md5, &sdescmd5);
++	if (rc)
+ 		goto symlink_hash_err;
+-	}
+-	sdescmd5->shash.tfm = md5;
+-	sdescmd5->shash.flags = 0x0;
+ 
+ 	rc = crypto_shash_init(&sdescmd5->shash);
+ 	if (rc) {
+@@ -85,9 +72,7 @@ symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
+ 		cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+ 
+ symlink_hash_err:
+-	crypto_free_shash(md5);
+-	kfree(sdescmd5);
+-
++	cifs_free_hash(&md5, &sdescmd5);
+ 	return rc;
+ }
+ 
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index a0dbced4a45c..460084a8eac5 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -848,3 +848,57 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
+ 	iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len);
+ 	return 0;
+ }
++
++/**
++ * cifs_alloc_hash - allocate hash and hash context together
++ *
++ * The caller has to make sure @sdesc is initialized to either NULL or
++ * a valid context. Both can be freed via cifs_free_hash().
++ */
++int
++cifs_alloc_hash(const char *name,
++		struct crypto_shash **shash, struct sdesc **sdesc)
++{
++	int rc = 0;
++	size_t size;
++
++	if (*sdesc != NULL)
++		return 0;
++
++	*shash = crypto_alloc_shash(name, 0, 0);
++	if (IS_ERR(*shash)) {
++		cifs_dbg(VFS, "could not allocate crypto %s\n", name);
++		rc = PTR_ERR(*shash);
++		*shash = NULL;
++		*sdesc = NULL;
++		return rc;
++	}
++
++	size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
++	*sdesc = kmalloc(size, GFP_KERNEL);
++	if (*sdesc == NULL) {
++		cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
++		crypto_free_shash(*shash);
++		*shash = NULL;
++		return -ENOMEM;
++	}
++
++	(*sdesc)->shash.tfm = *shash;
++	(*sdesc)->shash.flags = 0x0;
++	return 0;
++}
++
++/**
++ * cifs_free_hash - free hash and hash context together
++ *
++ * Freeing a NULL hash or context is safe.
++ */
++void
++cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
++{
++	kfree(*sdesc);
++	*sdesc = NULL;
++	if (*shash)
++		crypto_free_shash(*shash);
++	*shash = NULL;
++}
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index 76d03abaa38c..da012c3ab700 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -706,3 +706,67 @@ smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
+ 
+ 	return 0;
+ }
++
++#ifdef CONFIG_CIFS_SMB311
++/**
++ * smb311_update_preauth_hash - update @ses hash with the packet data in @iov
++ *
++ * Assumes @iov does not contain the rfc1002 length and iov[0] has the
++ * SMB2 header.
++ */
++int
++smb311_update_preauth_hash(struct cifs_ses *ses, struct kvec *iov, int nvec)
++{
++	int i, rc;
++	struct sdesc *d;
++	struct smb2_sync_hdr *hdr;
++
++	if (ses->server->tcpStatus == CifsGood) {
++		/* skip non smb311 connections */
++		if (ses->server->dialect != SMB311_PROT_ID)
++			return 0;
++
++		/* skip last sess setup response */
++		hdr = (struct smb2_sync_hdr *)iov[0].iov_base;
++		if (hdr->Flags & SMB2_FLAGS_SIGNED)
++			return 0;
++	}
++
++	rc = smb311_crypto_shash_allocate(ses->server);
++	if (rc)
++		return rc;
++
++	d = ses->server->secmech.sdescsha512;
++	rc = crypto_shash_init(&d->shash);
++	if (rc) {
++		cifs_dbg(VFS, "%s: could not init sha512 shash\n", __func__);
++		return rc;
++	}
++
++	rc = crypto_shash_update(&d->shash, ses->preauth_sha_hash,
++				 SMB2_PREAUTH_HASH_SIZE);
++	if (rc) {
++		cifs_dbg(VFS, "%s: could not update sha512 shash\n", __func__);
++		return rc;
++	}
++
++	for (i = 0; i < nvec; i++) {
++		rc = crypto_shash_update(&d->shash,
++					 iov[i].iov_base, iov[i].iov_len);
++		if (rc) {
++			cifs_dbg(VFS, "%s: could not update sha512 shash\n",
++				 __func__);
++			return rc;
++		}
++	}
++
++	rc = crypto_shash_final(&d->shash, ses->preauth_sha_hash);
++	if (rc) {
++		cifs_dbg(VFS, "%s: could not finalize sha512 shash\n",
++			 __func__);
++		return rc;
++	}
++
++	return 0;
++}
++#endif
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index eb68e2fcc500..dfd6fb02b7a3 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -2066,6 +2066,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, struct smb_rqst *old_rq)
+ 	inc_rfc1001_len(tr_hdr, orig_len);
+ }
+ 
++/* We can not use the normal sg_set_buf() as we will sometimes pass a
++ * stack object as buf.
++ */
++static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
++				   unsigned int buflen)
++{
++	sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
++}
++
+ static struct scatterlist *
+ init_sg(struct smb_rqst *rqst, u8 *sign)
+ {
+@@ -2080,16 +2089,16 @@ init_sg(struct smb_rqst *rqst, u8 *sign)
+ 		return NULL;
+ 
+ 	sg_init_table(sg, sg_len);
+-	sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 24, assoc_data_len);
++	smb2_sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 24, assoc_data_len);
+ 	for (i = 1; i < rqst->rq_nvec; i++)
+-		sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
++		smb2_sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
+ 						rqst->rq_iov[i].iov_len);
+ 	for (j = 0; i < sg_len - 1; i++, j++) {
+ 		unsigned int len = (j < rqst->rq_npages - 1) ? rqst->rq_pagesz
+ 							: rqst->rq_tailsz;
+ 		sg_set_page(&sg[i], rqst->rq_pages[j], len, 0);
+ 	}
+-	sg_set_buf(&sg[sg_len - 1], sign, SMB2_SIGNATURE_SIZE);
++	smb2_sg_set_buf(&sg[sg_len - 1], sign, SMB2_SIGNATURE_SIZE);
+ 	return sg;
+ }
+ 
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 63778ac22fd9..af62c75b17c4 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -453,6 +453,10 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+ 		return rc;
+ 
+ 	req->sync_hdr.SessionId = 0;
++#ifdef CONFIG_CIFS_SMB311
++	memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
++	memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
++#endif
+ 
+ 	if (strcmp(ses->server->vals->version_string,
+ 		   SMB3ANY_VERSION_STRING) == 0) {
+@@ -564,6 +568,15 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+ 
+ 	/* BB: add check that dialect was valid given dialect(s) we asked for */
+ 
++#ifdef CONFIG_CIFS_SMB311
++	/*
++	 * Keep a copy of the hash after negprot. This hash will be
++	 * the starting hash value for all sessions made from this
++	 * server.
++	 */
++	memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
++	       SMB2_PREAUTH_HASH_SIZE);
++#endif
+ 	/* SMB2 only has an extended negflavor */
+ 	server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
+ 	/* set it to the maximum buffer size value we can send with 1 credit */
+@@ -621,6 +634,10 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
+ 		return 0;
+ #endif
+ 
++	/* In SMB3.11 preauth integrity supersedes validate negotiate */
++	if (tcon->ses->server->dialect == SMB311_PROT_ID)
++		return 0;
++
+ 	/*
+ 	 * validation ioctl must be signed, so no point sending this if we
+ 	 * can not sign it (ie are not known user).  Even if signing is not
+@@ -1148,6 +1165,14 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ 	sess_data->buf0_type = CIFS_NO_BUFFER;
+ 	sess_data->nls_cp = (struct nls_table *) nls_cp;
+ 
++#ifdef CONFIG_CIFS_SMB311
++	/*
++	 * Initialize the session hash with the server one.
++	 */
++	memcpy(ses->preauth_sha_hash, ses->server->preauth_sha_hash,
++	       SMB2_PREAUTH_HASH_SIZE);
++#endif
++
+ 	while (sess_data->func)
+ 		sess_data->func(sess_data);
+ 
+@@ -1280,6 +1305,11 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+ 	iov[1].iov_base = unc_path;
+ 	iov[1].iov_len = unc_path_len;
+ 
++	/* 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1 */
++	if ((ses->server->dialect == SMB311_PROT_ID) &&
++	    !encryption_required(tcon))
++		req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
++
+ 	rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+ 	cifs_small_buf_release(req);
+ 	rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
+@@ -1738,8 +1768,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ 		rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
+ 						 &name_len,
+ 						 tcon->treeName, path);
+-		if (rc)
++		if (rc) {
++			cifs_small_buf_release(req);
+ 			return rc;
++		}
+ 		req->NameLength = cpu_to_le16(name_len * 2);
+ 		uni_path_len = copy_size;
+ 		path = copy_path;
+@@ -1750,8 +1782,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
+ 		if (uni_path_len % 8 != 0) {
+ 			copy_size = roundup(uni_path_len, 8);
+ 			copy_path = kzalloc(copy_size, GFP_KERNEL);
+-			if (!copy_path)
++			if (!copy_path) {
++				cifs_small_buf_release(req);
+ 				return -ENOMEM;
++			}
+ 			memcpy((char *)copy_path, (const char *)path,
+ 			       uni_path_len);
+ 			uni_path_len = copy_size;
+diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
+index 2a2b34ccaf49..8b901c69a65a 100644
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -264,6 +264,7 @@ struct smb2_negotiate_req {
+ #define SMB311_SALT_SIZE			32
+ /* Hash Algorithm Types */
+ #define SMB2_PREAUTH_INTEGRITY_SHA512	cpu_to_le16(0x0001)
++#define SMB2_PREAUTH_HASH_SIZE 64
+ 
+ struct smb2_preauth_neg_context {
+ 	__le16	ContextType; /* 1 */
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+index 05287b01f596..cbcce3f7e86f 100644
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -202,4 +202,9 @@ extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
+ 
+ extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
+ 					enum securityEnum);
++#ifdef CONFIG_CIFS_SMB311
++extern int smb311_crypto_shash_allocate(struct TCP_Server_Info *server);
++extern int smb311_update_preauth_hash(struct cifs_ses *ses,
++				      struct kvec *iov, int nvec);
++#endif
+ #endif			/* _SMB2PROTO_H */
+diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
+index 99493946e2f9..bf49cb73b9e6 100644
+--- a/fs/cifs/smb2transport.c
++++ b/fs/cifs/smb2transport.c
+@@ -43,77 +43,62 @@
+ static int
+ smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
+ {
+-	int rc;
+-	unsigned int size;
++	return cifs_alloc_hash("hmac(sha256)",
++			       &server->secmech.hmacsha256,
++			       &server->secmech.sdeschmacsha256);
++}
+ 
+-	if (server->secmech.sdeschmacsha256 != NULL)
+-		return 0; /* already allocated */
++static int
++smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
++{
++	struct cifs_secmech *p = &server->secmech;
++	int rc;
+ 
+-	server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0);
+-	if (IS_ERR(server->secmech.hmacsha256)) {
+-		cifs_dbg(VFS, "could not allocate crypto hmacsha256\n");
+-		rc = PTR_ERR(server->secmech.hmacsha256);
+-		server->secmech.hmacsha256 = NULL;
+-		return rc;
+-	}
++	rc = cifs_alloc_hash("hmac(sha256)",
++			     &p->hmacsha256,
++			     &p->sdeschmacsha256);
++	if (rc)
++		goto err;
+ 
+-	size = sizeof(struct shash_desc) +
+-			crypto_shash_descsize(server->secmech.hmacsha256);
+-	server->secmech.sdeschmacsha256 = kmalloc(size, GFP_KERNEL);
+-	if (!server->secmech.sdeschmacsha256) {
+-		crypto_free_shash(server->secmech.hmacsha256);
+-		server->secmech.hmacsha256 = NULL;
+-		return -ENOMEM;
+-	}
+-	server->secmech.sdeschmacsha256->shash.tfm = server->secmech.hmacsha256;
+-	server->secmech.sdeschmacsha256->shash.flags = 0x0;
++	rc = cifs_alloc_hash("cmac(aes)", &p->cmacaes, &p->sdesccmacaes);
++	if (rc)
++		goto err;
+ 
+ 	return 0;
++err:
++	cifs_free_hash(&p->hmacsha256, &p->sdeschmacsha256);
++	return rc;
+ }
+ 
+-static int
+-smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
++#ifdef CONFIG_CIFS_SMB311
++int
++smb311_crypto_shash_allocate(struct TCP_Server_Info *server)
+ {
+-	unsigned int size;
+-	int rc;
+-
+-	if (server->secmech.sdesccmacaes != NULL)
+-		return 0;  /* already allocated */
++	struct cifs_secmech *p = &server->secmech;
++	int rc = 0;
+ 
+-	rc = smb2_crypto_shash_allocate(server);
++	rc = cifs_alloc_hash("hmac(sha256)",
++			     &p->hmacsha256,
++			     &p->sdeschmacsha256);
+ 	if (rc)
+ 		return rc;
+ 
+-	server->secmech.cmacaes = crypto_alloc_shash("cmac(aes)", 0, 0);
+-	if (IS_ERR(server->secmech.cmacaes)) {
+-		cifs_dbg(VFS, "could not allocate crypto cmac-aes");
+-		kfree(server->secmech.sdeschmacsha256);
+-		server->secmech.sdeschmacsha256 = NULL;
+-		crypto_free_shash(server->secmech.hmacsha256);
+-		server->secmech.hmacsha256 = NULL;
+-		rc = PTR_ERR(server->secmech.cmacaes);
+-		server->secmech.cmacaes = NULL;
+-		return rc;
+-	}
++	rc = cifs_alloc_hash("cmac(aes)", &p->cmacaes, &p->sdesccmacaes);
++	if (rc)
++		goto err;
+ 
+-	size = sizeof(struct shash_desc) +
+-			crypto_shash_descsize(server->secmech.cmacaes);
+-	server->secmech.sdesccmacaes = kmalloc(size, GFP_KERNEL);
+-	if (!server->secmech.sdesccmacaes) {
+-		cifs_dbg(VFS, "%s: Can't alloc cmacaes\n", __func__);
+-		kfree(server->secmech.sdeschmacsha256);
+-		server->secmech.sdeschmacsha256 = NULL;
+-		crypto_free_shash(server->secmech.hmacsha256);
+-		crypto_free_shash(server->secmech.cmacaes);
+-		server->secmech.hmacsha256 = NULL;
+-		server->secmech.cmacaes = NULL;
+-		return -ENOMEM;
+-	}
+-	server->secmech.sdesccmacaes->shash.tfm = server->secmech.cmacaes;
+-	server->secmech.sdesccmacaes->shash.flags = 0x0;
++	rc = cifs_alloc_hash("sha512", &p->sha512, &p->sdescsha512);
++	if (rc)
++		goto err;
+ 
+ 	return 0;
++
++err:
++	cifs_free_hash(&p->cmacaes, &p->sdesccmacaes);
++	cifs_free_hash(&p->hmacsha256, &p->sdeschmacsha256);
++	return rc;
+ }
++#endif
+ 
+ static struct cifs_ses *
+ smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
+@@ -457,7 +442,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
+ 		cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__);
+ 		return rc;
+ 	}
+-	
++
+ 	rc = __cifs_calc_signature(rqst, server, sigptr,
+ 				   &server->secmech.sdesccmacaes->shash);
+ 
+diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
+index 91710eb571fb..52cccdbb7e14 100644
+--- a/fs/cifs/smbdirect.c
++++ b/fs/cifs/smbdirect.c
+@@ -862,6 +862,8 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+ 	ib_dma_unmap_single(info->id->device, request->sge[0].addr,
+ 		request->sge[0].length, DMA_TO_DEVICE);
+ 
++	smbd_disconnect_rdma_connection(info);
++
+ dma_mapping_failed:
+ 	mempool_free(request, info->request_mempool);
+ 	return rc;
+@@ -1061,6 +1063,7 @@ static int smbd_post_send(struct smbd_connection *info,
+ 			if (atomic_dec_and_test(&info->send_pending))
+ 				wake_up(&info->wait_send_pending);
+ 		}
++		smbd_disconnect_rdma_connection(info);
+ 	} else
+ 		/* Reset timer for idle connection after packet is sent */
+ 		mod_delayed_work(info->workqueue, &info->idle_timer_work,
+@@ -1202,7 +1205,7 @@ static int smbd_post_recv(
+ 	if (rc) {
+ 		ib_dma_unmap_single(info->id->device, response->sge.addr,
+ 				    response->sge.length, DMA_FROM_DEVICE);
+-
++		smbd_disconnect_rdma_connection(info);
+ 		log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
+ 	}
+ 
+@@ -1498,8 +1501,8 @@ int smbd_reconnect(struct TCP_Server_Info *server)
+ 	log_rdma_event(INFO, "reconnecting rdma session\n");
+ 
+ 	if (!server->smbd_conn) {
+-		log_rdma_event(ERR, "rdma session already destroyed\n");
+-		return -EINVAL;
++		log_rdma_event(INFO, "rdma session already destroyed\n");
++		goto create_conn;
+ 	}
+ 
+ 	/*
+@@ -1512,15 +1515,19 @@ int smbd_reconnect(struct TCP_Server_Info *server)
+ 	}
+ 
+ 	/* wait until the transport is destroyed */
+-	wait_event(server->smbd_conn->wait_destroy,
+-		server->smbd_conn->transport_status == SMBD_DESTROYED);
++	if (!wait_event_timeout(server->smbd_conn->wait_destroy,
++		server->smbd_conn->transport_status == SMBD_DESTROYED, 5*HZ))
++		return -EAGAIN;
+ 
+ 	destroy_workqueue(server->smbd_conn->workqueue);
+ 	kfree(server->smbd_conn);
+ 
++create_conn:
+ 	log_rdma_event(INFO, "creating rdma session\n");
+ 	server->smbd_conn = smbd_get_connection(
+ 		server, (struct sockaddr *) &server->dstaddr);
++	log_rdma_event(INFO, "created rdma session info=%p\n",
++		server->smbd_conn);
+ 
+ 	return server->smbd_conn ? 0 : -ENOENT;
+ }
+@@ -2542,6 +2549,8 @@ struct smbd_mr *smbd_register_mr(
+ 	if (atomic_dec_and_test(&info->mr_used_count))
+ 		wake_up(&info->wait_for_mr_cleanup);
+ 
++	smbd_disconnect_rdma_connection(info);
++
+ 	return NULL;
+ }
+ 
+diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
+index c12bffefa3c9..a0b80ac651a6 100644
+--- a/fs/cifs/smbencrypt.c
++++ b/fs/cifs/smbencrypt.c
+@@ -121,25 +121,12 @@ int
+ mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
+ {
+ 	int rc;
+-	unsigned int size;
+-	struct crypto_shash *md4;
+-	struct sdesc *sdescmd4;
+-
+-	md4 = crypto_alloc_shash("md4", 0, 0);
+-	if (IS_ERR(md4)) {
+-		rc = PTR_ERR(md4);
+-		cifs_dbg(VFS, "%s: Crypto md4 allocation error %d\n",
+-			 __func__, rc);
+-		return rc;
+-	}
+-	size = sizeof(struct shash_desc) + crypto_shash_descsize(md4);
+-	sdescmd4 = kmalloc(size, GFP_KERNEL);
+-	if (!sdescmd4) {
+-		rc = -ENOMEM;
++	struct crypto_shash *md4 = NULL;
++	struct sdesc *sdescmd4 = NULL;
++
++	rc = cifs_alloc_hash("md4", &md4, &sdescmd4);
++	if (rc)
+ 		goto mdfour_err;
+-	}
+-	sdescmd4->shash.tfm = md4;
+-	sdescmd4->shash.flags = 0x0;
+ 
+ 	rc = crypto_shash_init(&sdescmd4->shash);
+ 	if (rc) {
+@@ -156,9 +143,7 @@ mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
+ 		cifs_dbg(VFS, "%s: Could not generate md4 hash\n", __func__);
+ 
+ mdfour_err:
+-	crypto_free_shash(md4);
+-	kfree(sdescmd4);
+-
++	cifs_free_hash(&md4, &sdescmd4);
+ 	return rc;
+ }
+ 
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 9779b3292d8e..665661464067 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -37,6 +37,7 @@
+ #include "cifsglob.h"
+ #include "cifsproto.h"
+ #include "cifs_debug.h"
++#include "smb2proto.h"
+ #include "smbdirect.h"
+ 
+ /* Max number of iovectors we can use off the stack when sending requests. */
+@@ -751,6 +752,12 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ 	if (rc < 0)
+ 		goto out;
+ 
++#ifdef CONFIG_CIFS_SMB311
++	if (ses->status == CifsNew)
++		smb311_update_preauth_hash(ses, rqst->rq_iov+1,
++					   rqst->rq_nvec-1);
++#endif
++
+ 	if (timeout == CIFS_ASYNC_OP)
+ 		goto out;
+ 
+@@ -789,6 +796,16 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ 	else
+ 		*resp_buf_type = CIFS_SMALL_BUFFER;
+ 
++#ifdef CONFIG_CIFS_SMB311
++	if (ses->status == CifsNew) {
++		struct kvec iov = {
++			.iov_base = buf + 4,
++			.iov_len = get_rfc1002_length(buf)
++		};
++		smb311_update_preauth_hash(ses, &iov, 1);
++	}
++#endif
++
+ 	credits = ses->server->ops->get_credits(midQ);
+ 
+ 	rc = ses->server->ops->check_receive(midQ, ses->server,
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index f9b3e0a83526..f82c4966f4ce 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -243,8 +243,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
+ 	 */
+ 	ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
+ 			     sb->s_blocksize * 8, bh->b_data);
+-	ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
+-	ext4_group_desc_csum_set(sb, block_group, gdp);
+ 	return 0;
+ }
+ 
+@@ -448,6 +446,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
+ 		err = ext4_init_block_bitmap(sb, bh, block_group, desc);
+ 		set_bitmap_uptodate(bh);
+ 		set_buffer_uptodate(bh);
++		set_buffer_verified(bh);
+ 		ext4_unlock_group(sb, block_group);
+ 		unlock_buffer(bh);
+ 		if (err) {
+diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
+index da87cf757f7d..e2902d394f1b 100644
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -365,13 +365,15 @@ static loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
+ {
+ 	struct inode *inode = file->f_mapping->host;
+ 	int dx_dir = is_dx_dir(inode);
+-	loff_t htree_max = ext4_get_htree_eof(file);
++	loff_t ret, htree_max = ext4_get_htree_eof(file);
+ 
+ 	if (likely(dx_dir))
+-		return generic_file_llseek_size(file, offset, whence,
++		ret = generic_file_llseek_size(file, offset, whence,
+ 						    htree_max, htree_max);
+ 	else
+-		return ext4_llseek(file, offset, whence);
++		ret = ext4_llseek(file, offset, whence);
++	file->f_version = inode_peek_iversion(inode) - 1;
++	return ret;
+ }
+ 
+ /*
+diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
+index 2d593201cf7a..7c70b08d104c 100644
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -166,13 +166,6 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line,
+ 	might_sleep();
+ 
+ 	if (ext4_handle_valid(handle)) {
+-		struct super_block *sb;
+-
+-		sb = handle->h_transaction->t_journal->j_private;
+-		if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) {
+-			jbd2_journal_abort_handle(handle);
+-			return -EIO;
+-		}
+ 		err = jbd2_journal_get_write_access(handle, bh);
+ 		if (err)
+ 			ext4_journal_abort_handle(where, line, __func__, bh,
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 7830d28df331..3fa93665b4a3 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -66,44 +66,6 @@ void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
+ 		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
+ }
+ 
+-/* Initializes an uninitialized inode bitmap */
+-static int ext4_init_inode_bitmap(struct super_block *sb,
+-				       struct buffer_head *bh,
+-				       ext4_group_t block_group,
+-				       struct ext4_group_desc *gdp)
+-{
+-	struct ext4_group_info *grp;
+-	struct ext4_sb_info *sbi = EXT4_SB(sb);
+-	J_ASSERT_BH(bh, buffer_locked(bh));
+-
+-	/* If checksum is bad mark all blocks and inodes use to prevent
+-	 * allocation, essentially implementing a per-group read-only flag. */
+-	if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
+-		grp = ext4_get_group_info(sb, block_group);
+-		if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+-			percpu_counter_sub(&sbi->s_freeclusters_counter,
+-					   grp->bb_free);
+-		set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
+-		if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+-			int count;
+-			count = ext4_free_inodes_count(sb, gdp);
+-			percpu_counter_sub(&sbi->s_freeinodes_counter,
+-					   count);
+-		}
+-		set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
+-		return -EFSBADCRC;
+-	}
+-
+-	memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
+-	ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
+-			bh->b_data);
+-	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
+-				   EXT4_INODES_PER_GROUP(sb) / 8);
+-	ext4_group_desc_csum_set(sb, block_group, gdp);
+-
+-	return 0;
+-}
+-
+ void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
+ {
+ 	if (uptodate) {
+@@ -187,17 +149,14 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
+ 
+ 	ext4_lock_group(sb, block_group);
+ 	if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
+-		err = ext4_init_inode_bitmap(sb, bh, block_group, desc);
++		memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
++		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
++				     sb->s_blocksize * 8, bh->b_data);
+ 		set_bitmap_uptodate(bh);
+ 		set_buffer_uptodate(bh);
+ 		set_buffer_verified(bh);
+ 		ext4_unlock_group(sb, block_group);
+ 		unlock_buffer(bh);
+-		if (err) {
+-			ext4_error(sb, "Failed to init inode bitmap for group "
+-				   "%u: %d", block_group, err);
+-			goto out;
+-		}
+ 		return bh;
+ 	}
+ 	ext4_unlock_group(sb, block_group);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index c94780075b04..3350454fc5a7 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3524,7 +3524,7 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ 		iomap->flags |= IOMAP_F_DIRTY;
+ 	iomap->bdev = inode->i_sb->s_bdev;
+ 	iomap->dax_dev = sbi->s_daxdev;
+-	iomap->offset = first_block << blkbits;
++	iomap->offset = (u64)first_block << blkbits;
+ 	iomap->length = (u64)map.m_len << blkbits;
+ 
+ 	if (ret == 0) {
+@@ -3658,7 +3658,6 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
+ {
+ 	struct file *file = iocb->ki_filp;
+ 	struct inode *inode = file->f_mapping->host;
+-	struct ext4_inode_info *ei = EXT4_I(inode);
+ 	ssize_t ret;
+ 	loff_t offset = iocb->ki_pos;
+ 	size_t count = iov_iter_count(iter);
+@@ -3682,7 +3681,7 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
+ 			goto out;
+ 		}
+ 		orphan = 1;
+-		ei->i_disksize = inode->i_size;
++		ext4_update_i_disksize(inode, inode->i_size);
+ 		ext4_journal_stop(handle);
+ 	}
+ 
+@@ -3790,7 +3789,7 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
+ 		if (ret > 0) {
+ 			loff_t end = offset + ret;
+ 			if (end > inode->i_size) {
+-				ei->i_disksize = end;
++				ext4_update_i_disksize(inode, end);
+ 				i_size_write(inode, end);
+ 				/*
+ 				 * We're going to return a positive `ret'
+@@ -4746,6 +4745,12 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
+ 		goto bad_inode;
+ 	raw_inode = ext4_raw_inode(&iloc);
+ 
++	if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
++		EXT4_ERROR_INODE(inode, "root inode unallocated");
++		ret = -EFSCORRUPTED;
++		goto bad_inode;
++	}
++
+ 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
+ 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
+ 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 7e99ad02f1ba..be8d78472ef8 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -492,15 +492,13 @@ static int ext4_shutdown(struct super_block *sb, unsigned long arg)
+ 		set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
+ 		if (sbi->s_journal && !is_journal_aborted(sbi->s_journal)) {
+ 			(void) ext4_force_commit(sb);
+-			jbd2_journal_abort(sbi->s_journal, 0);
++			jbd2_journal_abort(sbi->s_journal, -ESHUTDOWN);
+ 		}
+ 		break;
+ 	case EXT4_GOING_FLAGS_NOLOGFLUSH:
+ 		set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
+-		if (sbi->s_journal && !is_journal_aborted(sbi->s_journal)) {
+-			msleep(100);
+-			jbd2_journal_abort(sbi->s_journal, 0);
+-		}
++		if (sbi->s_journal && !is_journal_aborted(sbi->s_journal))
++			jbd2_journal_abort(sbi->s_journal, -ESHUTDOWN);
+ 		break;
+ 	default:
+ 		return -EINVAL;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 39bf464c35f1..192c5ad09d71 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2333,6 +2333,8 @@ static int ext4_check_descriptors(struct super_block *sb,
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ 				 "Block bitmap for group %u overlaps "
+ 				 "superblock", i);
++			if (!sb_rdonly(sb))
++				return 0;
+ 		}
+ 		if (block_bitmap < first_block || block_bitmap > last_block) {
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+@@ -2345,6 +2347,8 @@ static int ext4_check_descriptors(struct super_block *sb,
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ 				 "Inode bitmap for group %u overlaps "
+ 				 "superblock", i);
++			if (!sb_rdonly(sb))
++				return 0;
+ 		}
+ 		if (inode_bitmap < first_block || inode_bitmap > last_block) {
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+@@ -2357,6 +2361,8 @@ static int ext4_check_descriptors(struct super_block *sb,
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ 				 "Inode table for group %u overlaps "
+ 				 "superblock", i);
++			if (!sb_rdonly(sb))
++				return 0;
+ 		}
+ 		if (inode_table < first_block ||
+ 		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
+@@ -3490,15 +3496,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	}
+ 
+ 	/* Load the checksum driver */
+-	if (ext4_has_feature_metadata_csum(sb) ||
+-	    ext4_has_feature_ea_inode(sb)) {
+-		sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
+-		if (IS_ERR(sbi->s_chksum_driver)) {
+-			ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
+-			ret = PTR_ERR(sbi->s_chksum_driver);
+-			sbi->s_chksum_driver = NULL;
+-			goto failed_mount;
+-		}
++	sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
++	if (IS_ERR(sbi->s_chksum_driver)) {
++		ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
++		ret = PTR_ERR(sbi->s_chksum_driver);
++		sbi->s_chksum_driver = NULL;
++		goto failed_mount;
+ 	}
+ 
+ 	/* Check superblock checksum */
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 63656dbafdc4..499cb4b1fbd2 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -195,10 +195,13 @@ ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
+ 
+ 	/* Check the values */
+ 	while (!IS_LAST_ENTRY(entry)) {
+-		if (entry->e_value_size != 0 &&
+-		    entry->e_value_inum == 0) {
++		u32 size = le32_to_cpu(entry->e_value_size);
++
++		if (size > EXT4_XATTR_SIZE_MAX)
++			return -EFSCORRUPTED;
++
++		if (size != 0 && entry->e_value_inum == 0) {
+ 			u16 offs = le16_to_cpu(entry->e_value_offs);
+-			u32 size = le32_to_cpu(entry->e_value_size);
+ 			void *value;
+ 
+ 			/*
+@@ -222,25 +225,36 @@ ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
+ }
+ 
+ static inline int
+-ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
++__ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
++			 const char *function, unsigned int line)
+ {
+-	int error;
++	int error = -EFSCORRUPTED;
+ 
+ 	if (buffer_verified(bh))
+ 		return 0;
+ 
+ 	if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
+ 	    BHDR(bh)->h_blocks != cpu_to_le32(1))
+-		return -EFSCORRUPTED;
++		goto errout;
++	error = -EFSBADCRC;
+ 	if (!ext4_xattr_block_csum_verify(inode, bh))
+-		return -EFSBADCRC;
++		goto errout;
+ 	error = ext4_xattr_check_entries(BFIRST(bh), bh->b_data + bh->b_size,
+ 					 bh->b_data);
+-	if (!error)
++errout:
++	if (error)
++		__ext4_error_inode(inode, function, line, 0,
++				   "corrupted xattr block %llu",
++				   (unsigned long long) bh->b_blocknr);
++	else
+ 		set_buffer_verified(bh);
+ 	return error;
+ }
+ 
++#define ext4_xattr_check_block(inode, bh) \
++	__ext4_xattr_check_block((inode), (bh),  __func__, __LINE__)
++
++
+ static int
+ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
+ 			 void *end, const char *function, unsigned int line)
+@@ -262,18 +276,22 @@ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
+ 	__xattr_check_inode((inode), (header), (end), __func__, __LINE__)
+ 
+ static int
+-ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
+-		      const char *name, int sorted)
++xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
++		 void *end, int name_index, const char *name, int sorted)
+ {
+-	struct ext4_xattr_entry *entry;
++	struct ext4_xattr_entry *entry, *next;
+ 	size_t name_len;
+ 	int cmp = 1;
+ 
+ 	if (name == NULL)
+ 		return -EINVAL;
+ 	name_len = strlen(name);
+-	entry = *pentry;
+-	for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
++	for (entry = *pentry; !IS_LAST_ENTRY(entry); entry = next) {
++		next = EXT4_XATTR_NEXT(entry);
++		if ((void *) next >= end) {
++			EXT4_ERROR_INODE(inode, "corrupted xattr entries");
++			return -EFSCORRUPTED;
++		}
+ 		cmp = name_index - entry->e_name_index;
+ 		if (!cmp)
+ 			cmp = name_len - entry->e_name_len;
+@@ -495,6 +513,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
+ 	struct buffer_head *bh = NULL;
+ 	struct ext4_xattr_entry *entry;
+ 	size_t size;
++	void *end;
+ 	int error;
+ 	struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
+ 
+@@ -511,20 +530,20 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
+ 		goto cleanup;
+ 	ea_bdebug(bh, "b_count=%d, refcount=%d",
+ 		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
+-	if (ext4_xattr_check_block(inode, bh)) {
+-		EXT4_ERROR_INODE(inode, "bad block %llu",
+-				 EXT4_I(inode)->i_file_acl);
+-		error = -EFSCORRUPTED;
++	error = ext4_xattr_check_block(inode, bh);
++	if (error)
+ 		goto cleanup;
+-	}
+ 	ext4_xattr_block_cache_insert(ea_block_cache, bh);
+ 	entry = BFIRST(bh);
+-	error = ext4_xattr_find_entry(&entry, name_index, name, 1);
++	end = bh->b_data + bh->b_size;
++	error = xattr_find_entry(inode, &entry, end, name_index, name, 1);
+ 	if (error)
+ 		goto cleanup;
+ 	size = le32_to_cpu(entry->e_value_size);
++	error = -ERANGE;
++	if (unlikely(size > EXT4_XATTR_SIZE_MAX))
++		goto cleanup;
+ 	if (buffer) {
+-		error = -ERANGE;
+ 		if (size > buffer_size)
+ 			goto cleanup;
+ 		if (entry->e_value_inum) {
+@@ -533,8 +552,12 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
+ 			if (error)
+ 				goto cleanup;
+ 		} else {
+-			memcpy(buffer, bh->b_data +
+-			       le16_to_cpu(entry->e_value_offs), size);
++			u16 offset = le16_to_cpu(entry->e_value_offs);
++			void *p = bh->b_data + offset;
++
++			if (unlikely(p + size > end))
++				goto cleanup;
++			memcpy(buffer, p, size);
+ 		}
+ 	}
+ 	error = size;
+@@ -568,12 +591,14 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
+ 	if (error)
+ 		goto cleanup;
+ 	entry = IFIRST(header);
+-	error = ext4_xattr_find_entry(&entry, name_index, name, 0);
++	error = xattr_find_entry(inode, &entry, end, name_index, name, 0);
+ 	if (error)
+ 		goto cleanup;
+ 	size = le32_to_cpu(entry->e_value_size);
++	error = -ERANGE;
++	if (unlikely(size > EXT4_XATTR_SIZE_MAX))
++		goto cleanup;
+ 	if (buffer) {
+-		error = -ERANGE;
+ 		if (size > buffer_size)
+ 			goto cleanup;
+ 		if (entry->e_value_inum) {
+@@ -582,8 +607,12 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
+ 			if (error)
+ 				goto cleanup;
+ 		} else {
+-			memcpy(buffer, (void *)IFIRST(header) +
+-			       le16_to_cpu(entry->e_value_offs), size);
++			u16 offset = le16_to_cpu(entry->e_value_offs);
++			void *p = (void *)IFIRST(header) + offset;
++
++			if (unlikely(p + size > end))
++				goto cleanup;
++			memcpy(buffer, p, size);
+ 		}
+ 	}
+ 	error = size;
+@@ -676,12 +705,9 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
+ 		goto cleanup;
+ 	ea_bdebug(bh, "b_count=%d, refcount=%d",
+ 		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
+-	if (ext4_xattr_check_block(inode, bh)) {
+-		EXT4_ERROR_INODE(inode, "bad block %llu",
+-				 EXT4_I(inode)->i_file_acl);
+-		error = -EFSCORRUPTED;
++	error = ext4_xattr_check_block(inode, bh);
++	if (error)
+ 		goto cleanup;
+-	}
+ 	ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
+ 	error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
+ 
+@@ -808,10 +834,9 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
+ 			goto out;
+ 		}
+ 
+-		if (ext4_xattr_check_block(inode, bh)) {
+-			ret = -EFSCORRUPTED;
++		ret = ext4_xattr_check_block(inode, bh);
++		if (ret)
+ 			goto out;
+-		}
+ 
+ 		for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
+ 		     entry = EXT4_XATTR_NEXT(entry))
+@@ -1793,19 +1818,16 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
+ 		ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
+ 			atomic_read(&(bs->bh->b_count)),
+ 			le32_to_cpu(BHDR(bs->bh)->h_refcount));
+-		if (ext4_xattr_check_block(inode, bs->bh)) {
+-			EXT4_ERROR_INODE(inode, "bad block %llu",
+-					 EXT4_I(inode)->i_file_acl);
+-			error = -EFSCORRUPTED;
++		error = ext4_xattr_check_block(inode, bs->bh);
++		if (error)
+ 			goto cleanup;
+-		}
+ 		/* Find the named attribute. */
+ 		bs->s.base = BHDR(bs->bh);
+ 		bs->s.first = BFIRST(bs->bh);
+ 		bs->s.end = bs->bh->b_data + bs->bh->b_size;
+ 		bs->s.here = bs->s.first;
+-		error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
+-					      i->name, 1);
++		error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
++					 i->name_index, i->name, 1);
+ 		if (error && error != -ENODATA)
+ 			goto cleanup;
+ 		bs->s.not_found = error;
+@@ -2164,8 +2186,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
+ 		if (error)
+ 			return error;
+ 		/* Find the named attribute. */
+-		error = ext4_xattr_find_entry(&is->s.here, i->name_index,
+-					      i->name, 0);
++		error = xattr_find_entry(inode, &is->s.here, is->s.end,
++					 i->name_index, i->name, 0);
+ 		if (error && error != -ENODATA)
+ 			return error;
+ 		is->s.not_found = error;
+@@ -2721,13 +2743,9 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
+ 		error = -EIO;
+ 		if (!bh)
+ 			goto cleanup;
+-		if (ext4_xattr_check_block(inode, bh)) {
+-			EXT4_ERROR_INODE(inode, "bad block %llu",
+-					 EXT4_I(inode)->i_file_acl);
+-			error = -EFSCORRUPTED;
+-			brelse(bh);
++		error = ext4_xattr_check_block(inode, bh);
++		if (error)
+ 			goto cleanup;
+-		}
+ 		base = BHDR(bh);
+ 		end = bh->b_data + bh->b_size;
+ 		min_offs = end - base;
+@@ -2884,11 +2902,8 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
+ 			goto cleanup;
+ 		}
+ 		error = ext4_xattr_check_block(inode, bh);
+-		if (error) {
+-			EXT4_ERROR_INODE(inode, "bad block %llu (error %d)",
+-					 EXT4_I(inode)->i_file_acl, error);
++		if (error)
+ 			goto cleanup;
+-		}
+ 
+ 		if (ext4_has_feature_ea_inode(inode->i_sb)) {
+ 			for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
+diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
+index dd54c4f995c8..f39cad2abe2a 100644
+--- a/fs/ext4/xattr.h
++++ b/fs/ext4/xattr.h
+@@ -70,6 +70,17 @@ struct ext4_xattr_entry {
+ 		EXT4_I(inode)->i_extra_isize))
+ #define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
+ 
++/*
++ * XATTR_SIZE_MAX is currently 64k, but for the purposes of checking
++ * for file system consistency errors, we use a somewhat bigger value.
++ * This allows XATTR_SIZE_MAX to grow in the future, but by using this
++ * instead of INT_MAX for certain consistency checks, we don't need to
++ * worry about arithmetic overflows.  (Actually XATTR_SIZE_MAX is
++ * defined in include/uapi/linux/limits.h, so changing it is going
++ * not going to be trivial....)
++ */
++#define EXT4_XATTR_SIZE_MAX (1 << 24)
++
+ /*
+  * The minimum size of EA value when you start storing it in an external inode
+  * size of block - size of header - size of 1 entry - 4 null bytes
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index d4d04fee568a..40c34a0ef58a 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits)
+ 	 */
+ 	if (inode && inode_to_wb_is_valid(inode)) {
+ 		struct bdi_writeback *wb;
+-		bool locked, congested;
++		struct wb_lock_cookie lock_cookie = {};
++		bool congested;
+ 
+-		wb = unlocked_inode_to_wb_begin(inode, &locked);
++		wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
+ 		congested = wb_congested(wb, cong_bits);
+-		unlocked_inode_to_wb_end(inode, locked);
++		unlocked_inode_to_wb_end(inode, &lock_cookie);
+ 		return congested;
+ 	}
+ 
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 3fbf48ec2188..dfb057900e79 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -974,7 +974,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
+ }
+ 
+ /*
+- * This is a variaon of __jbd2_update_log_tail which checks for validity of
++ * This is a variation of __jbd2_update_log_tail which checks for validity of
+  * provided log tail and locks j_checkpoint_mutex. So it is safe against races
+  * with other threads updating log tail.
+  */
+@@ -1417,6 +1417,9 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
+ 	journal_superblock_t *sb = journal->j_superblock;
+ 	int ret;
+ 
++	if (is_journal_aborted(journal))
++		return -EIO;
++
+ 	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
+ 	jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
+ 		  tail_block, tail_tid);
+@@ -1483,12 +1486,15 @@ static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
+ void jbd2_journal_update_sb_errno(journal_t *journal)
+ {
+ 	journal_superblock_t *sb = journal->j_superblock;
++	int errcode;
+ 
+ 	read_lock(&journal->j_state_lock);
+-	jbd_debug(1, "JBD2: updating superblock error (errno %d)\n",
+-		  journal->j_errno);
+-	sb->s_errno    = cpu_to_be32(journal->j_errno);
++	errcode = journal->j_errno;
+ 	read_unlock(&journal->j_state_lock);
++	if (errcode == -ESHUTDOWN)
++		errcode = 0;
++	jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
++	sb->s_errno    = cpu_to_be32(errcode);
+ 
+ 	jbd2_write_superblock(journal, REQ_SYNC | REQ_FUA);
+ }
+@@ -2105,12 +2111,22 @@ void __jbd2_journal_abort_hard(journal_t *journal)
+  * but don't do any other IO. */
+ static void __journal_abort_soft (journal_t *journal, int errno)
+ {
+-	if (journal->j_flags & JBD2_ABORT)
+-		return;
++	int old_errno;
+ 
+-	if (!journal->j_errno)
++	write_lock(&journal->j_state_lock);
++	old_errno = journal->j_errno;
++	if (!journal->j_errno || errno == -ESHUTDOWN)
+ 		journal->j_errno = errno;
+ 
++	if (journal->j_flags & JBD2_ABORT) {
++		write_unlock(&journal->j_state_lock);
++		if (!old_errno && old_errno != -ESHUTDOWN &&
++		    errno == -ESHUTDOWN)
++			jbd2_journal_update_sb_errno(journal);
++		return;
++	}
++	write_unlock(&journal->j_state_lock);
++
+ 	__jbd2_journal_abort_hard(journal);
+ 
+ 	if (errno) {
+diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
+index f60dee7faf03..87bdf0f4cba1 100644
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -342,7 +342,7 @@ static void jffs2_put_super (struct super_block *sb)
+ static void jffs2_kill_sb(struct super_block *sb)
+ {
+ 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+-	if (!sb_rdonly(sb))
++	if (c && !sb_rdonly(sb))
+ 		jffs2_stop_garbage_collect_thread(c);
+ 	kill_mtd_super(sb);
+ 	kfree(c);
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 9d1374ab6e06..c3ed9dc78655 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1089,7 +1089,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
+ 			goto out_free;
+ 	}
+ 
+-	mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
++	mnt->mnt.mnt_flags = old->mnt.mnt_flags;
++	mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
+ 	/* Don't allow unprivileged users to change mount flags */
+ 	if (flag & CL_UNPRIVILEGED) {
+ 		mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index 6702a6a0bbb5..e0e6a9d627df 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
+ 				       u32 event_mask,
+ 				       const void *data, int data_type)
+ {
+-	__u32 marks_mask, marks_ignored_mask;
++	__u32 marks_mask = 0, marks_ignored_mask = 0;
+ 	const struct path *path = data;
+ 
+ 	pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
+@@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
+ 	    !d_can_lookup(path->dentry))
+ 		return false;
+ 
+-	if (inode_mark && vfsmnt_mark) {
+-		marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
+-		marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
+-	} else if (inode_mark) {
+-		/*
+-		 * if the event is for a child and this inode doesn't care about
+-		 * events on the child, don't send it!
+-		 */
+-		if ((event_mask & FS_EVENT_ON_CHILD) &&
+-		    !(inode_mark->mask & FS_EVENT_ON_CHILD))
+-			return false;
+-		marks_mask = inode_mark->mask;
+-		marks_ignored_mask = inode_mark->ignored_mask;
+-	} else if (vfsmnt_mark) {
+-		marks_mask = vfsmnt_mark->mask;
+-		marks_ignored_mask = vfsmnt_mark->ignored_mask;
+-	} else {
+-		BUG();
++	/*
++	 * if the event is for a child and this inode doesn't care about
++	 * events on the child, don't send it!
++	 */
++	if (inode_mark &&
++	    (!(event_mask & FS_EVENT_ON_CHILD) ||
++	     (inode_mark->mask & FS_EVENT_ON_CHILD))) {
++		marks_mask |= inode_mark->mask;
++		marks_ignored_mask |= inode_mark->ignored_mask;
++	}
++
++	if (vfsmnt_mark) {
++		marks_mask |= vfsmnt_mark->mask;
++		marks_ignored_mask |= vfsmnt_mark->ignored_mask;
+ 	}
+ 
+ 	if (d_is_dir(path->dentry) &&
+diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
+index 3ae5fdba0225..10796d3fe27d 100644
+--- a/fs/orangefs/super.c
++++ b/fs/orangefs/super.c
+@@ -579,6 +579,11 @@ void orangefs_kill_sb(struct super_block *sb)
+ 	/* provided sb cleanup */
+ 	kill_anon_super(sb);
+ 
++	if (!ORANGEFS_SB(sb)) {
++		mutex_lock(&orangefs_request_mutex);
++		mutex_unlock(&orangefs_request_mutex);
++		return;
++	}
+ 	/*
+ 	 * issue the unmount to userspace to tell it to remove the
+ 	 * dynamic mount info it has for this superblock
+diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
+index 70057359fbaf..23148c3ed675 100644
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -2643,7 +2643,7 @@ static int journal_init_dev(struct super_block *super,
+ 	if (IS_ERR(journal->j_dev_bd)) {
+ 		result = PTR_ERR(journal->j_dev_bd);
+ 		journal->j_dev_bd = NULL;
+-		reiserfs_warning(super,
++		reiserfs_warning(super, "sh-457",
+ 				 "journal_init_dev: Cannot open '%s': %i",
+ 				 jdev_name, result);
+ 		return result;
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index b16ef162344a..6c397a389105 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -1737,8 +1737,11 @@ static void ubifs_remount_ro(struct ubifs_info *c)
+ 
+ 	dbg_save_space_info(c);
+ 
+-	for (i = 0; i < c->jhead_cnt; i++)
+-		ubifs_wbuf_sync(&c->jheads[i].wbuf);
++	for (i = 0; i < c->jhead_cnt; i++) {
++		err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
++		if (err)
++			ubifs_ro_mode(c, err);
++	}
+ 
+ 	c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
+ 	c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
+@@ -1804,8 +1807,11 @@ static void ubifs_put_super(struct super_block *sb)
+ 			int err;
+ 
+ 			/* Synchronize write-buffers */
+-			for (i = 0; i < c->jhead_cnt; i++)
+-				ubifs_wbuf_sync(&c->jheads[i].wbuf);
++			for (i = 0; i < c->jhead_cnt; i++) {
++				err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
++				if (err)
++					ubifs_ro_mode(c, err);
++			}
+ 
+ 			/*
+ 			 * We are being cleanly unmounted which means the
+diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
+index f897e55f2cd0..16a8ad21b77e 100644
+--- a/fs/udf/unicode.c
++++ b/fs/udf/unicode.c
+@@ -28,6 +28,9 @@
+ 
+ #include "udf_sb.h"
+ 
++#define SURROGATE_MASK 0xfffff800
++#define SURROGATE_PAIR 0x0000d800
++
+ static int udf_uni2char_utf8(wchar_t uni,
+ 			     unsigned char *out,
+ 			     int boundlen)
+@@ -37,6 +40,9 @@ static int udf_uni2char_utf8(wchar_t uni,
+ 	if (boundlen <= 0)
+ 		return -ENAMETOOLONG;
+ 
++	if ((uni & SURROGATE_MASK) == SURROGATE_PAIR)
++		return -EINVAL;
++
+ 	if (uni < 0x80) {
+ 		out[u_len++] = (unsigned char)uni;
+ 	} else if (uni < 0x800) {
+diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h
+index 551f7600ab58..24e93dfcee9f 100644
+--- a/include/dt-bindings/clock/mt2701-clk.h
++++ b/include/dt-bindings/clock/mt2701-clk.h
+@@ -176,7 +176,8 @@
+ #define CLK_TOP_AUD_EXT1			156
+ #define CLK_TOP_AUD_EXT2			157
+ #define CLK_TOP_NFI1X_PAD			158
+-#define CLK_TOP_NR				159
++#define CLK_TOP_AXISEL_D4			159
++#define CLK_TOP_NR				160
+ 
+ /* APMIXEDSYS */
+ 
+diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
+index bfe86b54f6c1..0bd432a4d7bd 100644
+--- a/include/linux/backing-dev-defs.h
++++ b/include/linux/backing-dev-defs.h
+@@ -223,6 +223,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
+ 	set_wb_congested(bdi->wb.congested, sync);
+ }
+ 
++struct wb_lock_cookie {
++	bool locked;
++	unsigned long flags;
++};
++
+ #ifdef CONFIG_CGROUP_WRITEBACK
+ 
+ /**
+diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
+index 3e4ce54d84ab..82e8b73117d1 100644
+--- a/include/linux/backing-dev.h
++++ b/include/linux/backing-dev.h
+@@ -346,7 +346,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
+ /**
+  * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
+  * @inode: target inode
+- * @lockedp: temp bool output param, to be passed to the end function
++ * @cookie: output param, to be passed to the end function
+  *
+  * The caller wants to access the wb associated with @inode but isn't
+  * holding inode->i_lock, mapping->tree_lock or wb->list_lock.  This
+@@ -354,12 +354,12 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
+  * association doesn't change until the transaction is finished with
+  * unlocked_inode_to_wb_end().
+  *
+- * The caller must call unlocked_inode_to_wb_end() with *@lockdep
+- * afterwards and can't sleep during transaction.  IRQ may or may not be
+- * disabled on return.
++ * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
++ * can't sleep during the transaction.  IRQs may or may not be disabled on
++ * return.
+  */
+ static inline struct bdi_writeback *
+-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
++unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
+ {
+ 	rcu_read_lock();
+ 
+@@ -367,10 +367,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+ 	 * Paired with store_release in inode_switch_wb_work_fn() and
+ 	 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
+ 	 */
+-	*lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
++	cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
+ 
+-	if (unlikely(*lockedp))
+-		spin_lock_irq(&inode->i_mapping->tree_lock);
++	if (unlikely(cookie->locked))
++		spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
+ 
+ 	/*
+ 	 * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
+@@ -382,12 +382,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
+ /**
+  * unlocked_inode_to_wb_end - end inode wb access transaction
+  * @inode: target inode
+- * @locked: *@lockedp from unlocked_inode_to_wb_begin()
++ * @cookie: @cookie from unlocked_inode_to_wb_begin()
+  */
+-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
++static inline void unlocked_inode_to_wb_end(struct inode *inode,
++					    struct wb_lock_cookie *cookie)
+ {
+-	if (unlikely(locked))
+-		spin_unlock_irq(&inode->i_mapping->tree_lock);
++	if (unlikely(cookie->locked))
++		spin_unlock_irqrestore(&inode->i_mapping->tree_lock, cookie->flags);
+ 
+ 	rcu_read_unlock();
+ }
+@@ -434,12 +435,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+ }
+ 
+ static inline struct bdi_writeback *
+-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
++unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
+ {
+ 	return inode_to_wb(inode);
+ }
+ 
+-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
++static inline void unlocked_inode_to_wb_end(struct inode *inode,
++					    struct wb_lock_cookie *cookie)
+ {
+ }
+ 
+diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
+index bf18b95ed92d..17b18b91ebac 100644
+--- a/include/linux/blk_types.h
++++ b/include/linux/blk_types.h
+@@ -20,8 +20,13 @@ typedef void (bio_end_io_t) (struct bio *);
+ 
+ /*
+  * Block error status values.  See block/blk-core:blk_errors for the details.
++ * Alpha cannot write a byte atomically, so we need to use 32-bit value.
+  */
++#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
++typedef u32 __bitwise blk_status_t;
++#else
+ typedef u8 __bitwise blk_status_t;
++#endif
+ #define	BLK_STS_OK 0
+ #define BLK_STS_NOTSUPP		((__force blk_status_t)1)
+ #define BLK_STS_TIMEOUT		((__force blk_status_t)2)
+diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
+index d3f264a5b04d..ceb96ecab96e 100644
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -17,9 +17,6 @@
+  */
+ #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+ 
+-#define randomized_struct_fields_start	struct {
+-#define randomized_struct_fields_end	};
+-
+ /* all clang versions usable with the kernel support KASAN ABI version 5 */
+ #define KASAN_ABI_VERSION 5
+ 
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index e2c7f4369eff..b4bf73f5e38f 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -242,6 +242,9 @@
+ #if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
+ #define __randomize_layout __attribute__((randomize_layout))
+ #define __no_randomize_layout __attribute__((no_randomize_layout))
++/* This anon struct can add padding, so only enable it under randstruct. */
++#define randomized_struct_fields_start	struct {
++#define randomized_struct_fields_end	} __randomize_layout;
+ #endif
+ 
+ #endif /* GCC_VERSION >= 40500 */
+@@ -256,15 +259,6 @@
+  */
+ #define __visible	__attribute__((externally_visible))
+ 
+-/*
+- * RANDSTRUCT_PLUGIN wants to use an anonymous struct, but it is only
+- * possible since GCC 4.6. To provide as much build testing coverage
+- * as possible, this is used for all GCC 4.6+ builds, and not just on
+- * RANDSTRUCT_PLUGIN builds.
+- */
+-#define randomized_struct_fields_start	struct {
+-#define randomized_struct_fields_end	} __randomize_layout;
+-
+ #endif /* GCC_VERSION >= 40600 */
+ 
+ 
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 091a81cf330f..29b981b1694d 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -515,6 +515,12 @@ enum hid_type {
+ 	HID_TYPE_USBNONE
+ };
+ 
++enum hid_battery_status {
++	HID_BATTERY_UNKNOWN = 0,
++	HID_BATTERY_QUERIED,		/* Kernel explicitly queried battery strength */
++	HID_BATTERY_REPORTED,		/* Device sent unsolicited battery strength report */
++};
++
+ struct hid_driver;
+ struct hid_ll_driver;
+ 
+@@ -557,7 +563,8 @@ struct hid_device {							/* device report descriptor */
+ 	__s32 battery_max;
+ 	__s32 battery_report_type;
+ 	__s32 battery_report_id;
+-	bool battery_reported;
++	enum hid_battery_status battery_status;
++	bool battery_avoid_query;
+ #endif
+ 
+ 	unsigned int status;						/* see STAT flags above */
+@@ -851,7 +858,7 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
+ extern void hidinput_disconnect(struct hid_device *);
+ 
+ int hid_set_field(struct hid_field *, unsigned, __s32);
+-int hid_input_report(struct hid_device *, int type, u8 *, int, int);
++int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
+ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
+ struct hid_field *hidinput_get_led_field(struct hid_device *hid);
+ unsigned int hidinput_count_leds(struct hid_device *hid);
+@@ -1102,13 +1109,13 @@ static inline void hid_hw_wait(struct hid_device *hdev)
+  *
+  * @report: the report we want to know the length
+  */
+-static inline int hid_report_len(struct hid_report *report)
++static inline u32 hid_report_len(struct hid_report *report)
+ {
+ 	/* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
+ 	return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ }
+ 
+-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
+ 		int interrupt);
+ 
+ /* HID quirks API */
+diff --git a/include/linux/hmm.h b/include/linux/hmm.h
+index 325017ad9311..36dd21fe5caf 100644
+--- a/include/linux/hmm.h
++++ b/include/linux/hmm.h
+@@ -498,23 +498,16 @@ struct hmm_device {
+ struct hmm_device *hmm_device_new(void *drvdata);
+ void hmm_device_put(struct hmm_device *hmm_device);
+ #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
+-#endif /* IS_ENABLED(CONFIG_HMM) */
+ 
+ /* Below are for HMM internal use only! Not to be used by device driver! */
+-#if IS_ENABLED(CONFIG_HMM_MIRROR)
+ void hmm_mm_destroy(struct mm_struct *mm);
+ 
+ static inline void hmm_mm_init(struct mm_struct *mm)
+ {
+ 	mm->hmm = NULL;
+ }
+-#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
+-static inline void hmm_mm_destroy(struct mm_struct *mm) {}
+-static inline void hmm_mm_init(struct mm_struct *mm) {}
+-#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
+-
+-
+ #else /* IS_ENABLED(CONFIG_HMM) */
+ static inline void hmm_mm_destroy(struct mm_struct *mm) {}
+ static inline void hmm_mm_init(struct mm_struct *mm) {}
++#endif /* IS_ENABLED(CONFIG_HMM) */
+ #endif /* LINUX_HMM_H */
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index ad06d42adb1a..95a2d748e978 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2604,6 +2604,7 @@ enum mf_action_page_type {
+ 	MF_MSG_POISONED_HUGE,
+ 	MF_MSG_HUGE,
+ 	MF_MSG_FREE_HUGE,
++	MF_MSG_NON_PMD_HUGE,
+ 	MF_MSG_UNMAP_FAILED,
+ 	MF_MSG_DIRTY_SWAPCACHE,
+ 	MF_MSG_CLEAN_SWAPCACHE,
+diff --git a/include/sound/pcm_oss.h b/include/sound/pcm_oss.h
+index 760c969d885d..12bbf8c81112 100644
+--- a/include/sound/pcm_oss.h
++++ b/include/sound/pcm_oss.h
+@@ -57,6 +57,7 @@ struct snd_pcm_oss_runtime {
+ 	char *buffer;				/* vmallocated period */
+ 	size_t buffer_used;			/* used length from period buffer */
+ 	struct mutex params_lock;
++	atomic_t rw_ref;		/* concurrent read/write accesses */
+ #ifdef CONFIG_SND_PCM_OSS_PLUGINS
+ 	struct snd_pcm_plugin *plugin_first;
+ 	struct snd_pcm_plugin *plugin_last;
+diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h
+index c34f4490d025..26ee91300e3e 100644
+--- a/include/uapi/linux/random.h
++++ b/include/uapi/linux/random.h
+@@ -35,6 +35,9 @@
+ /* Clear the entropy pool and associated counters.  (Superuser only.) */
+ #define RNDCLEARPOOL	_IO( 'R', 0x06 )
+ 
++/* Reseed CRNG.  (Superuser only.) */
++#define RNDRESEEDCRNG	_IO( 'R', 0x07 )
++
+ struct rand_pool_info {
+ 	int	entropy_count;
+ 	int	buf_size;
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 93e0e3a4d009..f68420b1ad93 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -203,6 +203,12 @@ static int __shm_open(struct vm_area_struct *vma)
+ 	if (IS_ERR(shp))
+ 		return PTR_ERR(shp);
+ 
++	if (shp->shm_file != sfd->file) {
++		/* ID was reused */
++		shm_unlock(shp);
++		return -EINVAL;
++	}
++
+ 	shp->shm_atim = ktime_get_real_seconds();
+ 	shp->shm_lprid = task_tgid_vnr(current);
+ 	shp->shm_nattch++;
+@@ -431,8 +437,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
+ 	int ret;
+ 
+ 	/*
+-	 * In case of remap_file_pages() emulation, the file can represent
+-	 * removed IPC ID: propogate shm_lock() error to caller.
++	 * In case of remap_file_pages() emulation, the file can represent an
++	 * IPC ID that was removed, and possibly even reused by another shm
++	 * segment already.  Propagate this case as an error to caller.
+ 	 */
+ 	ret = __shm_open(vma);
+ 	if (ret)
+@@ -456,6 +463,7 @@ static int shm_release(struct inode *ino, struct file *file)
+ 	struct shm_file_data *sfd = shm_file_data(file);
+ 
+ 	put_ipc_ns(sfd->ns);
++	fput(sfd->file);
+ 	shm_file_data(file) = NULL;
+ 	kfree(sfd);
+ 	return 0;
+@@ -1402,7 +1410,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
+ 	file->f_mapping = shp->shm_file->f_mapping;
+ 	sfd->id = shp->shm_perm.id;
+ 	sfd->ns = get_ipc_ns(ns);
+-	sfd->file = shp->shm_file;
++	/*
++	 * We need to take a reference to the real shm file to prevent the
++	 * pointer from becoming stale in cases where the lifetime of the outer
++	 * file extends beyond that of the shm segment.  It's not usually
++	 * possible, but it can happen during remap_file_pages() emulation as
++	 * that unmaps the memory, then does ->mmap() via file reference only.
++	 * We'll deny the ->mmap() if the shm segment was since removed, but to
++	 * detect shm ID reuse we need to compare the file pointers.
++	 */
++	sfd->file = get_file(shp->shm_file);
+ 	sfd->vm_ops = NULL;
+ 
+ 	err = security_mmap_file(file, prot, flags);
+diff --git a/kernel/resource.c b/kernel/resource.c
+index e270b5048988..2af6c03858b9 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -651,7 +651,8 @@ static int __find_resource(struct resource *root, struct resource *old,
+ 			alloc.start = constraint->alignf(constraint->alignf_data, &avail,
+ 					size, constraint->align);
+ 			alloc.end = alloc.start + size - 1;
+-			if (resource_contains(&avail, &alloc)) {
++			if (alloc.start <= alloc.end &&
++			    resource_contains(&avail, &alloc)) {
+ 				new->start = alloc.start;
+ 				new->end = alloc.end;
+ 				return 0;
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index dcf1c4dd3efe..7ac7b08b563a 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -1136,6 +1136,11 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
+ 	struct buffer_page *bpage, *tmp;
+ 	long i;
+ 
++	/* Check if the available memory is there first */
++	i = si_mem_available();
++	if (i < nr_pages)
++		return -ENOMEM;
++
+ 	for (i = 0; i < nr_pages; i++) {
+ 		struct page *page;
+ 		/*
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index 268029ae1be6..df08863e6d53 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -608,7 +608,7 @@ static int probes_seq_show(struct seq_file *m, void *v)
+ 
+ 	/* Don't print "0x  (null)" when offset is 0 */
+ 	if (tu->offset) {
+-		seq_printf(m, "0x%px", (void *)tu->offset);
++		seq_printf(m, "0x%0*lx", (int)(sizeof(void *) * 2), tu->offset);
+ 	} else {
+ 		switch (sizeof(void *)) {
+ 		case 4:
+diff --git a/lib/swiotlb.c b/lib/swiotlb.c
+index c43ec2271469..44f7eb408fdb 100644
+--- a/lib/swiotlb.c
++++ b/lib/swiotlb.c
+@@ -732,7 +732,7 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ 		goto out_warn;
+ 
+ 	*dma_handle = swiotlb_phys_to_dma(dev, phys_addr);
+-	if (dma_coherent_ok(dev, *dma_handle, size))
++	if (!dma_coherent_ok(dev, *dma_handle, size))
+ 		goto out_unmap;
+ 
+ 	memset(phys_to_virt(phys_addr), 0, size);
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index d7a708f82559..89f8a4a4b770 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -2591,6 +2591,8 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
+ 			case 's':
+ 			case 'F':
+ 			case 'f':
++			case 'x':
++			case 'K':
+ 				save_arg(void *);
+ 				break;
+ 			default:
+@@ -2765,6 +2767,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
+ 			case 's':
+ 			case 'F':
+ 			case 'f':
++			case 'x':
++			case 'K':
+ 				process = true;
+ 				break;
+ 			default:
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 693f62212a59..787ff18663bf 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -785,7 +785,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
+ 	VM_BUG_ON_PAGE(!PageLocked(new), new);
+ 	VM_BUG_ON_PAGE(new->mapping, new);
+ 
+-	error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
++	error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
+ 	if (!error) {
+ 		struct address_space *mapping = old->mapping;
+ 		void (*freepage)(struct page *);
+@@ -841,7 +841,7 @@ static int __add_to_page_cache_locked(struct page *page,
+ 			return error;
+ 	}
+ 
+-	error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
++	error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
+ 	if (error) {
+ 		if (!huge)
+ 			mem_cgroup_cancel_charge(page, memcg, false);
+@@ -1584,8 +1584,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
+ 		if (fgp_flags & FGP_ACCESSED)
+ 			__SetPageReferenced(page);
+ 
+-		err = add_to_page_cache_lru(page, mapping, offset,
+-				gfp_mask & GFP_RECLAIM_MASK);
++		err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
+ 		if (unlikely(err)) {
+ 			put_page(page);
+ 			page = NULL;
+@@ -2388,7 +2387,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
+ 		if (!page)
+ 			return -ENOMEM;
+ 
+-		ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL);
++		ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
+ 		if (ret == 0)
+ 			ret = mapping->a_ops->readpage(file, page);
+ 		else if (ret == -EEXIST)
+diff --git a/mm/hmm.c b/mm/hmm.c
+index 320545b98ff5..91d3f062dd28 100644
+--- a/mm/hmm.c
++++ b/mm/hmm.c
+@@ -277,7 +277,8 @@ static int hmm_pfns_bad(unsigned long addr,
+ 			unsigned long end,
+ 			struct mm_walk *walk)
+ {
+-	struct hmm_range *range = walk->private;
++	struct hmm_vma_walk *hmm_vma_walk = walk->private;
++	struct hmm_range *range = hmm_vma_walk->range;
+ 	hmm_pfn_t *pfns = range->pfns;
+ 	unsigned long i;
+ 
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 293721f5da70..2d6b35234926 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -1131,6 +1131,13 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
+ 	} else {
+ 		newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage),
+ 					       vma->vm_page_prot));
++		/*
++		 * We're replacing an anonymous page with a zero page, which is
++		 * not anonymous. We need to do proper accounting otherwise we
++		 * will get wrong values in /proc, and a BUG message in dmesg
++		 * when tearing down the mm.
++		 */
++		dec_mm_counter(mm, MM_ANONPAGES);
+ 	}
+ 
+ 	flush_cache_page(vma, addr, pte_pfn(*ptep));
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 8291b75f42c8..2d4bf647cf01 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -502,6 +502,7 @@ static const char * const action_page_types[] = {
+ 	[MF_MSG_POISONED_HUGE]		= "huge page already hardware poisoned",
+ 	[MF_MSG_HUGE]			= "huge page",
+ 	[MF_MSG_FREE_HUGE]		= "free huge page",
++	[MF_MSG_NON_PMD_HUGE]		= "non-pmd-sized huge page",
+ 	[MF_MSG_UNMAP_FAILED]		= "unmapping failed page",
+ 	[MF_MSG_DIRTY_SWAPCACHE]	= "dirty swapcache page",
+ 	[MF_MSG_CLEAN_SWAPCACHE]	= "clean swapcache page",
+@@ -1084,6 +1085,21 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
+ 		return 0;
+ 	}
+ 
++	/*
++	 * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so
++	 * simply disable it. In order to make it work properly, we need
++	 * make sure that:
++	 *  - conversion of a pud that maps an error hugetlb into hwpoison
++	 *    entry properly works, and
++	 *  - other mm code walking over page table is aware of pud-aligned
++	 *    hwpoison entries.
++	 */
++	if (huge_page_size(page_hstate(head)) > PMD_SIZE) {
++		action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED);
++		res = -EBUSY;
++		goto out;
++	}
++
+ 	if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
+ 		action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
+ 		res = -EBUSY;
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index 586f31261c83..8369572e1f7d 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -2501,13 +2501,13 @@ void account_page_redirty(struct page *page)
+ 	if (mapping && mapping_cap_account_dirty(mapping)) {
+ 		struct inode *inode = mapping->host;
+ 		struct bdi_writeback *wb;
+-		bool locked;
++		struct wb_lock_cookie cookie = {};
+ 
+-		wb = unlocked_inode_to_wb_begin(inode, &locked);
++		wb = unlocked_inode_to_wb_begin(inode, &cookie);
+ 		current->nr_dirtied--;
+ 		dec_node_page_state(page, NR_DIRTIED);
+ 		dec_wb_stat(wb, WB_DIRTIED);
+-		unlocked_inode_to_wb_end(inode, locked);
++		unlocked_inode_to_wb_end(inode, &cookie);
+ 	}
+ }
+ EXPORT_SYMBOL(account_page_redirty);
+@@ -2613,15 +2613,15 @@ void __cancel_dirty_page(struct page *page)
+ 	if (mapping_cap_account_dirty(mapping)) {
+ 		struct inode *inode = mapping->host;
+ 		struct bdi_writeback *wb;
+-		bool locked;
++		struct wb_lock_cookie cookie = {};
+ 
+ 		lock_page_memcg(page);
+-		wb = unlocked_inode_to_wb_begin(inode, &locked);
++		wb = unlocked_inode_to_wb_begin(inode, &cookie);
+ 
+ 		if (TestClearPageDirty(page))
+ 			account_page_cleaned(page, mapping, wb);
+ 
+-		unlocked_inode_to_wb_end(inode, locked);
++		unlocked_inode_to_wb_end(inode, &cookie);
+ 		unlock_page_memcg(page);
+ 	} else {
+ 		ClearPageDirty(page);
+@@ -2653,7 +2653,7 @@ int clear_page_dirty_for_io(struct page *page)
+ 	if (mapping && mapping_cap_account_dirty(mapping)) {
+ 		struct inode *inode = mapping->host;
+ 		struct bdi_writeback *wb;
+-		bool locked;
++		struct wb_lock_cookie cookie = {};
+ 
+ 		/*
+ 		 * Yes, Virginia, this is indeed insane.
+@@ -2690,14 +2690,14 @@ int clear_page_dirty_for_io(struct page *page)
+ 		 * always locked coming in here, so we get the desired
+ 		 * exclusion.
+ 		 */
+-		wb = unlocked_inode_to_wb_begin(inode, &locked);
++		wb = unlocked_inode_to_wb_begin(inode, &cookie);
+ 		if (TestClearPageDirty(page)) {
+ 			dec_lruvec_page_state(page, NR_FILE_DIRTY);
+ 			dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
+ 			dec_wb_stat(wb, WB_RECLAIMABLE);
+ 			ret = 1;
+ 		}
+-		unlocked_inode_to_wb_end(inode, locked);
++		unlocked_inode_to_wb_end(inode, &cookie);
+ 		return ret;
+ 	}
+ 	return TestClearPageDirty(page);
+diff --git a/mm/slab.c b/mm/slab.c
+index 9095c3945425..a76006aae857 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -4074,7 +4074,8 @@ static void cache_reap(struct work_struct *w)
+ 	next_reap_node();
+ out:
+ 	/* Set up the next iteration */
+-	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
++	schedule_delayed_work_on(smp_processor_id(), work,
++				round_jiffies_relative(REAPTIMEOUT_AC));
+ }
+ 
+ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
+diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
+index fc97fc3ed637..0178ea878b75 100644
+--- a/net/sunrpc/rpc_pipe.c
++++ b/net/sunrpc/rpc_pipe.c
+@@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry)
+ 	struct dentry *clnt_dir = pipe_dentry->d_parent;
+ 	struct dentry *gssd_dir = clnt_dir->d_parent;
+ 
++	dget(pipe_dentry);
+ 	__rpc_rmpipe(d_inode(clnt_dir), pipe_dentry);
+ 	__rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1);
+ 	__rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
+diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
+index f0855a959a27..4bc0f4d94a01 100644
+--- a/net/sunrpc/xprtrdma/rpc_rdma.c
++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
+@@ -1366,7 +1366,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
+ 
+ 	trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
+ 
+-	queue_work_on(req->rl_cpu, rpcrdma_receive_wq, &rep->rr_work);
++	queue_work(rpcrdma_receive_wq, &rep->rr_work);
+ 	return;
+ 
+ out_badstatus:
+diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
+index 4b1ecfe979cf..f86021e3b853 100644
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -52,7 +52,6 @@
+ #include <linux/slab.h>
+ #include <linux/seq_file.h>
+ #include <linux/sunrpc/addr.h>
+-#include <linux/smp.h>
+ 
+ #include "xprt_rdma.h"
+ 
+@@ -651,7 +650,6 @@ xprt_rdma_allocate(struct rpc_task *task)
+ 	if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags))
+ 		goto out_fail;
+ 
+-	req->rl_cpu = smp_processor_id();
+ 	req->rl_connect_cookie = 0;	/* our reserved value */
+ 	rpcrdma_set_xprtdata(rqst, req);
+ 	rqst->rq_buffer = req->rl_sendbuf->rg_base;
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index e6f84a6434a0..25b0ecbd37e2 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -250,7 +250,6 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
+ 		wait_for_completion(&ia->ri_remove_done);
+ 
+ 		ia->ri_id = NULL;
+-		ia->ri_pd = NULL;
+ 		ia->ri_device = NULL;
+ 		/* Return 1 to ensure the core destroys the id. */
+ 		return 1;
+@@ -445,7 +444,9 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
+ 		ia->ri_id->qp = NULL;
+ 	}
+ 	ib_free_cq(ep->rep_attr.recv_cq);
++	ep->rep_attr.recv_cq = NULL;
+ 	ib_free_cq(ep->rep_attr.send_cq);
++	ep->rep_attr.send_cq = NULL;
+ 
+ 	/* The ULP is responsible for ensuring all DMA
+ 	 * mappings and MRs are gone.
+@@ -458,6 +459,8 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
+ 		rpcrdma_dma_unmap_regbuf(req->rl_recvbuf);
+ 	}
+ 	rpcrdma_mrs_destroy(buf);
++	ib_dealloc_pd(ia->ri_pd);
++	ia->ri_pd = NULL;
+ 
+ 	/* Allow waiters to continue */
+ 	complete(&ia->ri_remove_done);
+@@ -628,14 +631,16 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
+ {
+ 	cancel_delayed_work_sync(&ep->rep_connect_worker);
+ 
+-	if (ia->ri_id->qp) {
++	if (ia->ri_id && ia->ri_id->qp) {
+ 		rpcrdma_ep_disconnect(ep, ia);
+ 		rdma_destroy_qp(ia->ri_id);
+ 		ia->ri_id->qp = NULL;
+ 	}
+ 
+-	ib_free_cq(ep->rep_attr.recv_cq);
+-	ib_free_cq(ep->rep_attr.send_cq);
++	if (ep->rep_attr.recv_cq)
++		ib_free_cq(ep->rep_attr.recv_cq);
++	if (ep->rep_attr.send_cq)
++		ib_free_cq(ep->rep_attr.send_cq);
+ }
+ 
+ /* Re-establish a connection after a device removal event.
+diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
+index 69883a960a3f..430a6de8300e 100644
+--- a/net/sunrpc/xprtrdma/xprt_rdma.h
++++ b/net/sunrpc/xprtrdma/xprt_rdma.h
+@@ -334,7 +334,6 @@ enum {
+ struct rpcrdma_buffer;
+ struct rpcrdma_req {
+ 	struct list_head	rl_list;
+-	int			rl_cpu;
+ 	unsigned int		rl_connect_cookie;
+ 	struct rpcrdma_buffer	*rl_buffer;
+ 	struct rpcrdma_rep	*rl_reply;
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 441405081195..1980f68246cb 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -823,8 +823,25 @@ static int choose_rate(struct snd_pcm_substream *substream,
+ 	return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
+ }
+ 
+-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+-				     bool trylock)
++/* parameter locking: returns immediately if tried during streaming */
++static int lock_params(struct snd_pcm_runtime *runtime)
++{
++	if (mutex_lock_interruptible(&runtime->oss.params_lock))
++		return -ERESTARTSYS;
++	if (atomic_read(&runtime->oss.rw_ref)) {
++		mutex_unlock(&runtime->oss.params_lock);
++		return -EBUSY;
++	}
++	return 0;
++}
++
++static void unlock_params(struct snd_pcm_runtime *runtime)
++{
++	mutex_unlock(&runtime->oss.params_lock);
++}
++
++/* call with params_lock held */
++static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 	struct snd_pcm_hw_params *params, *sparams;
+@@ -838,11 +855,8 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+ 	const struct snd_mask *sformat_mask;
+ 	struct snd_mask mask;
+ 
+-	if (trylock) {
+-		if (!(mutex_trylock(&runtime->oss.params_lock)))
+-			return -EAGAIN;
+-	} else if (mutex_lock_interruptible(&runtime->oss.params_lock))
+-		return -ERESTARTSYS;
++	if (!runtime->oss.params)
++		return 0;
+ 	sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
+ 	params = kmalloc(sizeof(*params), GFP_KERNEL);
+ 	sparams = kmalloc(sizeof(*sparams), GFP_KERNEL);
+@@ -1068,6 +1082,23 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+ 	kfree(sw_params);
+ 	kfree(params);
+ 	kfree(sparams);
++	return err;
++}
++
++/* this one takes the lock by itself */
++static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
++				     bool trylock)
++{
++	struct snd_pcm_runtime *runtime = substream->runtime;
++	int err;
++
++	if (trylock) {
++		if (!(mutex_trylock(&runtime->oss.params_lock)))
++			return -EAGAIN;
++	} else if (mutex_lock_interruptible(&runtime->oss.params_lock))
++		return -ERESTARTSYS;
++
++	err = snd_pcm_oss_change_params_locked(substream);
+ 	mutex_unlock(&runtime->oss.params_lock);
+ 	return err;
+ }
+@@ -1096,6 +1127,10 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
+ 	return 0;
+ }
+ 
++/* call with params_lock held */
++/* NOTE: this always call PREPARE unconditionally no matter whether
++ * runtime->oss.prepare is set or not
++ */
+ static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream)
+ {
+ 	int err;
+@@ -1120,14 +1155,35 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
+ 	struct snd_pcm_runtime *runtime;
+ 	int err;
+ 
+-	if (substream == NULL)
+-		return 0;
+ 	runtime = substream->runtime;
+ 	if (runtime->oss.params) {
+ 		err = snd_pcm_oss_change_params(substream, false);
+ 		if (err < 0)
+ 			return err;
+ 	}
++	if (runtime->oss.prepare) {
++		if (mutex_lock_interruptible(&runtime->oss.params_lock))
++			return -ERESTARTSYS;
++		err = snd_pcm_oss_prepare(substream);
++		mutex_unlock(&runtime->oss.params_lock);
++		if (err < 0)
++			return err;
++	}
++	return 0;
++}
++
++/* call with params_lock held */
++static int snd_pcm_oss_make_ready_locked(struct snd_pcm_substream *substream)
++{
++	struct snd_pcm_runtime *runtime;
++	int err;
++
++	runtime = substream->runtime;
++	if (runtime->oss.params) {
++		err = snd_pcm_oss_change_params_locked(substream);
++		if (err < 0)
++			return err;
++	}
+ 	if (runtime->oss.prepare) {
+ 		err = snd_pcm_oss_prepare(substream);
+ 		if (err < 0)
+@@ -1332,13 +1388,15 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
+ 	if (atomic_read(&substream->mmap_count))
+ 		return -ENXIO;
+ 
+-	if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
+-		return tmp;
++	atomic_inc(&runtime->oss.rw_ref);
+ 	while (bytes > 0) {
+ 		if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+ 			tmp = -ERESTARTSYS;
+ 			break;
+ 		}
++		tmp = snd_pcm_oss_make_ready_locked(substream);
++		if (tmp < 0)
++			goto err;
+ 		if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
+ 			tmp = bytes;
+ 			if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
+@@ -1394,6 +1452,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
+ 		}
+ 		tmp = 0;
+ 	}
++	atomic_dec(&runtime->oss.rw_ref);
+ 	return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
+ }
+ 
+@@ -1439,13 +1498,15 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
+ 	if (atomic_read(&substream->mmap_count))
+ 		return -ENXIO;
+ 
+-	if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
+-		return tmp;
++	atomic_inc(&runtime->oss.rw_ref);
+ 	while (bytes > 0) {
+ 		if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+ 			tmp = -ERESTARTSYS;
+ 			break;
+ 		}
++		tmp = snd_pcm_oss_make_ready_locked(substream);
++		if (tmp < 0)
++			goto err;
+ 		if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
+ 			if (runtime->oss.buffer_used == 0) {
+ 				tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
+@@ -1486,6 +1547,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
+ 		}
+ 		tmp = 0;
+ 	}
++	atomic_dec(&runtime->oss.rw_ref);
+ 	return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
+ }
+ 
+@@ -1501,10 +1563,12 @@ static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file)
+ 			continue;
+ 		runtime = substream->runtime;
+ 		snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
++		mutex_lock(&runtime->oss.params_lock);
+ 		runtime->oss.prepare = 1;
+ 		runtime->oss.buffer_used = 0;
+ 		runtime->oss.prev_hw_ptr_period = 0;
+ 		runtime->oss.period_ptr = 0;
++		mutex_unlock(&runtime->oss.params_lock);
+ 	}
+ 	return 0;
+ }
+@@ -1590,9 +1654,13 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
+ 			goto __direct;
+ 		if ((err = snd_pcm_oss_make_ready(substream)) < 0)
+ 			return err;
++		atomic_inc(&runtime->oss.rw_ref);
++		if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
++			atomic_dec(&runtime->oss.rw_ref);
++			return -ERESTARTSYS;
++		}
+ 		format = snd_pcm_oss_format_from(runtime->oss.format);
+ 		width = snd_pcm_format_physical_width(format);
+-		mutex_lock(&runtime->oss.params_lock);
+ 		if (runtime->oss.buffer_used > 0) {
+ #ifdef OSS_DEBUG
+ 			pcm_dbg(substream->pcm, "sync: buffer_used\n");
+@@ -1602,10 +1670,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
+ 						   runtime->oss.buffer + runtime->oss.buffer_used,
+ 						   size);
+ 			err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes);
+-			if (err < 0) {
+-				mutex_unlock(&runtime->oss.params_lock);
+-				return err;
+-			}
++			if (err < 0)
++				goto unlock;
+ 		} else if (runtime->oss.period_ptr > 0) {
+ #ifdef OSS_DEBUG
+ 			pcm_dbg(substream->pcm, "sync: period_ptr\n");
+@@ -1615,10 +1681,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
+ 						   runtime->oss.buffer,
+ 						   size * 8 / width);
+ 			err = snd_pcm_oss_sync1(substream, size);
+-			if (err < 0) {
+-				mutex_unlock(&runtime->oss.params_lock);
+-				return err;
+-			}
++			if (err < 0)
++				goto unlock;
+ 		}
+ 		/*
+ 		 * The ALSA's period might be a bit large than OSS one.
+@@ -1632,7 +1696,11 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
+ 			else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
+ 				snd_pcm_lib_writev(substream, NULL, size);
+ 		}
++unlock:
+ 		mutex_unlock(&runtime->oss.params_lock);
++		atomic_dec(&runtime->oss.rw_ref);
++		if (err < 0)
++			return err;
+ 		/*
+ 		 * finish sync: drain the buffer
+ 		 */
+@@ -1643,7 +1711,9 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
+ 		substream->f_flags = saved_f_flags;
+ 		if (err < 0)
+ 			return err;
++		mutex_lock(&runtime->oss.params_lock);
+ 		runtime->oss.prepare = 1;
++		mutex_unlock(&runtime->oss.params_lock);
+ 	}
+ 
+ 	substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
+@@ -1654,8 +1724,10 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
+ 		err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+ 		if (err < 0)
+ 			return err;
++		mutex_lock(&runtime->oss.params_lock);
+ 		runtime->oss.buffer_used = 0;
+ 		runtime->oss.prepare = 1;
++		mutex_unlock(&runtime->oss.params_lock);
+ 	}
+ 	return 0;
+ }
+@@ -1667,6 +1739,8 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
+ 	for (idx = 1; idx >= 0; --idx) {
+ 		struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
+ 		struct snd_pcm_runtime *runtime;
++		int err;
++
+ 		if (substream == NULL)
+ 			continue;
+ 		runtime = substream->runtime;
+@@ -1674,10 +1748,14 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
+ 			rate = 1000;
+ 		else if (rate > 192000)
+ 			rate = 192000;
++		err = lock_params(runtime);
++		if (err < 0)
++			return err;
+ 		if (runtime->oss.rate != rate) {
+ 			runtime->oss.params = 1;
+ 			runtime->oss.rate = rate;
+ 		}
++		unlock_params(runtime);
+ 	}
+ 	return snd_pcm_oss_get_rate(pcm_oss_file);
+ }
+@@ -1702,13 +1780,19 @@ static int snd_pcm_oss_set_channels(struct snd_pcm_oss_file *pcm_oss_file, unsig
+ 	for (idx = 1; idx >= 0; --idx) {
+ 		struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
+ 		struct snd_pcm_runtime *runtime;
++		int err;
++
+ 		if (substream == NULL)
+ 			continue;
+ 		runtime = substream->runtime;
++		err = lock_params(runtime);
++		if (err < 0)
++			return err;
+ 		if (runtime->oss.channels != channels) {
+ 			runtime->oss.params = 1;
+ 			runtime->oss.channels = channels;
+ 		}
++		unlock_params(runtime);
+ 	}
+ 	return snd_pcm_oss_get_channels(pcm_oss_file);
+ }
+@@ -1781,6 +1865,7 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
+ static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format)
+ {
+ 	int formats, idx;
++	int err;
+ 	
+ 	if (format != AFMT_QUERY) {
+ 		formats = snd_pcm_oss_get_formats(pcm_oss_file);
+@@ -1794,10 +1879,14 @@ static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int for
+ 			if (substream == NULL)
+ 				continue;
+ 			runtime = substream->runtime;
++			err = lock_params(runtime);
++			if (err < 0)
++				return err;
+ 			if (runtime->oss.format != format) {
+ 				runtime->oss.params = 1;
+ 				runtime->oss.format = format;
+ 			}
++			unlock_params(runtime);
+ 		}
+ 	}
+ 	return snd_pcm_oss_get_format(pcm_oss_file);
+@@ -1817,8 +1906,6 @@ static int snd_pcm_oss_set_subdivide1(struct snd_pcm_substream *substream, int s
+ {
+ 	struct snd_pcm_runtime *runtime;
+ 
+-	if (substream == NULL)
+-		return 0;
+ 	runtime = substream->runtime;
+ 	if (subdivide == 0) {
+ 		subdivide = runtime->oss.subdivision;
+@@ -1842,9 +1929,17 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int
+ 
+ 	for (idx = 1; idx >= 0; --idx) {
+ 		struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
++		struct snd_pcm_runtime *runtime;
++
+ 		if (substream == NULL)
+ 			continue;
+-		if ((err = snd_pcm_oss_set_subdivide1(substream, subdivide)) < 0)
++		runtime = substream->runtime;
++		err = lock_params(runtime);
++		if (err < 0)
++			return err;
++		err = snd_pcm_oss_set_subdivide1(substream, subdivide);
++		unlock_params(runtime);
++		if (err < 0)
+ 			return err;
+ 	}
+ 	return err;
+@@ -1854,8 +1949,6 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
+ {
+ 	struct snd_pcm_runtime *runtime;
+ 
+-	if (substream == NULL)
+-		return 0;
+ 	runtime = substream->runtime;
+ 	if (runtime->oss.subdivision || runtime->oss.fragshift)
+ 		return -EINVAL;
+@@ -1875,9 +1968,17 @@ static int snd_pcm_oss_set_fragment(struct snd_pcm_oss_file *pcm_oss_file, unsig
+ 
+ 	for (idx = 1; idx >= 0; --idx) {
+ 		struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
++		struct snd_pcm_runtime *runtime;
++
+ 		if (substream == NULL)
+ 			continue;
+-		if ((err = snd_pcm_oss_set_fragment1(substream, val)) < 0)
++		runtime = substream->runtime;
++		err = lock_params(runtime);
++		if (err < 0)
++			return err;
++		err = snd_pcm_oss_set_fragment1(substream, val);
++		unlock_params(runtime);
++		if (err < 0)
+ 			return err;
+ 	}
+ 	return err;
+@@ -1961,6 +2062,9 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
+ 	}
+       	if (psubstream) {
+       		runtime = psubstream->runtime;
++		cmd = 0;
++		if (mutex_lock_interruptible(&runtime->oss.params_lock))
++			return -ERESTARTSYS;
+ 		if (trigger & PCM_ENABLE_OUTPUT) {
+ 			if (runtime->oss.trigger)
+ 				goto _skip1;
+@@ -1978,13 +2082,19 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
+ 			cmd = SNDRV_PCM_IOCTL_DROP;
+ 			runtime->oss.prepare = 1;
+ 		}
+-		err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
+-		if (err < 0)
+-			return err;
+-	}
+  _skip1:
++		mutex_unlock(&runtime->oss.params_lock);
++		if (cmd) {
++			err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
++			if (err < 0)
++				return err;
++		}
++	}
+ 	if (csubstream) {
+       		runtime = csubstream->runtime;
++		cmd = 0;
++		if (mutex_lock_interruptible(&runtime->oss.params_lock))
++			return -ERESTARTSYS;
+ 		if (trigger & PCM_ENABLE_INPUT) {
+ 			if (runtime->oss.trigger)
+ 				goto _skip2;
+@@ -1999,11 +2109,14 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
+ 			cmd = SNDRV_PCM_IOCTL_DROP;
+ 			runtime->oss.prepare = 1;
+ 		}
+-		err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
+-		if (err < 0)
+-			return err;
+-	}
+  _skip2:
++		mutex_unlock(&runtime->oss.params_lock);
++		if (cmd) {
++			err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
++			if (err < 0)
++				return err;
++		}
++	}
+ 	return 0;
+ }
+ 
+@@ -2255,6 +2368,7 @@ static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream,
+ 	runtime->oss.maxfrags = 0;
+ 	runtime->oss.subdivision = 0;
+ 	substream->pcm_release = snd_pcm_oss_release_substream;
++	atomic_set(&runtime->oss.rw_ref, 0);
+ }
+ 
+ static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file)
+diff --git a/sound/core/pcm.c b/sound/core/pcm.c
+index 09ee8c6b9f75..66ac89aad681 100644
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -28,6 +28,7 @@
+ #include <sound/core.h>
+ #include <sound/minors.h>
+ #include <sound/pcm.h>
++#include <sound/timer.h>
+ #include <sound/control.h>
+ #include <sound/info.h>
+ 
+@@ -1054,8 +1055,13 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
+ 	snd_free_pages((void*)runtime->control,
+ 		       PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)));
+ 	kfree(runtime->hw_constraints.rules);
+-	kfree(runtime);
++	/* Avoid concurrent access to runtime via PCM timer interface */
++	if (substream->timer)
++		spin_lock_irq(&substream->timer->lock);
+ 	substream->runtime = NULL;
++	if (substream->timer)
++		spin_unlock_irq(&substream->timer->lock);
++	kfree(runtime);
+ 	put_pid(substream->pid);
+ 	substream->pid = NULL;
+ 	substream->pstr->substream_opened--;
+diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
+index f69764d7cdd7..e30e30ba6e39 100644
+--- a/sound/core/rawmidi_compat.c
++++ b/sound/core/rawmidi_compat.c
+@@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
+ 	struct snd_rawmidi_params params;
+ 	unsigned int val;
+ 
+-	if (rfile->output == NULL)
+-		return -EINVAL;
+ 	if (get_user(params.stream, &src->stream) ||
+ 	    get_user(params.buffer_size, &src->buffer_size) ||
+ 	    get_user(params.avail_min, &src->avail_min) ||
+@@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
+ 	params.no_active_sensing = val;
+ 	switch (params.stream) {
+ 	case SNDRV_RAWMIDI_STREAM_OUTPUT:
++		if (!rfile->output)
++			return -EINVAL;
+ 		return snd_rawmidi_output_params(rfile->output, &params);
+ 	case SNDRV_RAWMIDI_STREAM_INPUT:
++		if (!rfile->input)
++			return -EINVAL;
+ 		return snd_rawmidi_input_params(rfile->input, &params);
+ 	}
+ 	return -EINVAL;
+@@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
+ 	int err;
+ 	struct snd_rawmidi_status status;
+ 
+-	if (rfile->output == NULL)
+-		return -EINVAL;
+ 	if (get_user(status.stream, &src->stream))
+ 		return -EFAULT;
+ 
+ 	switch (status.stream) {
+ 	case SNDRV_RAWMIDI_STREAM_OUTPUT:
++		if (!rfile->output)
++			return -EINVAL;
+ 		err = snd_rawmidi_output_status(rfile->output, &status);
+ 		break;
+ 	case SNDRV_RAWMIDI_STREAM_INPUT:
++		if (!rfile->input)
++			return -EINVAL;
+ 		err = snd_rawmidi_input_status(rfile->input, &status);
+ 		break;
+ 	default:
+@@ -112,16 +116,18 @@ static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
+ 	int err;
+ 	struct snd_rawmidi_status status;
+ 
+-	if (rfile->output == NULL)
+-		return -EINVAL;
+ 	if (get_user(status.stream, &src->stream))
+ 		return -EFAULT;
+ 
+ 	switch (status.stream) {
+ 	case SNDRV_RAWMIDI_STREAM_OUTPUT:
++		if (!rfile->output)
++			return -EINVAL;
+ 		err = snd_rawmidi_output_status(rfile->output, &status);
+ 		break;
+ 	case SNDRV_RAWMIDI_STREAM_INPUT:
++		if (!rfile->input)
++			return -EINVAL;
+ 		err = snd_rawmidi_input_status(rfile->input, &status);
+ 		break;
+ 	default:
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index c507c69029e3..738e1fe90312 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1645,7 +1645,8 @@ static void azx_check_snoop_available(struct azx *chip)
+ 		 */
+ 		u8 val;
+ 		pci_read_config_byte(chip->pci, 0x42, &val);
+-		if (!(val & 0x80) && chip->pci->revision == 0x30)
++		if (!(val & 0x80) && (chip->pci->revision == 0x30 ||
++				      chip->pci->revision == 0x20))
+ 			snoop = false;
+ 	}
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index aef1f52db7d9..fc77bf7a1544 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6370,6 +6370,8 @@ static const struct hda_fixup alc269_fixups[] = {
+ 			{ 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ 			{ }
+ 		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_HEADSET_MIC
+ 	},
+ };
+ 
+@@ -6573,6 +6575,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
++	SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ 	SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ 	SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
+index 9b341c23f62b..5e80867d09ef 100644
+--- a/sound/soc/codecs/ssm2602.c
++++ b/sound/soc/codecs/ssm2602.c
+@@ -54,10 +54,17 @@ struct ssm2602_priv {
+  * using 2 wire for device control, so we cache them instead.
+  * There is no point in caching the reset register
+  */
+-static const u16 ssm2602_reg[SSM2602_CACHEREGNUM] = {
+-	0x0097, 0x0097, 0x0079, 0x0079,
+-	0x000a, 0x0008, 0x009f, 0x000a,
+-	0x0000, 0x0000
++static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = {
++	{ .reg = 0x00, .def = 0x0097 },
++	{ .reg = 0x01, .def = 0x0097 },
++	{ .reg = 0x02, .def = 0x0079 },
++	{ .reg = 0x03, .def = 0x0079 },
++	{ .reg = 0x04, .def = 0x000a },
++	{ .reg = 0x05, .def = 0x0008 },
++	{ .reg = 0x06, .def = 0x009f },
++	{ .reg = 0x07, .def = 0x000a },
++	{ .reg = 0x08, .def = 0x0000 },
++	{ .reg = 0x09, .def = 0x0000 }
+ };
+ 
+ 
+@@ -620,8 +627,8 @@ const struct regmap_config ssm2602_regmap_config = {
+ 	.volatile_reg = ssm2602_register_volatile,
+ 
+ 	.cache_type = REGCACHE_RBTREE,
+-	.reg_defaults_raw = ssm2602_reg,
+-	.num_reg_defaults_raw = ARRAY_SIZE(ssm2602_reg),
++	.reg_defaults = ssm2602_reg,
++	.num_reg_defaults = ARRAY_SIZE(ssm2602_reg),
+ };
+ EXPORT_SYMBOL_GPL(ssm2602_regmap_config);
+ 
+diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
+index 01a50413c66f..782c580b7aa3 100644
+--- a/sound/soc/soc-topology.c
++++ b/sound/soc/soc-topology.c
+@@ -523,6 +523,7 @@ static void remove_widget(struct snd_soc_component *comp,
+ 				kfree(se->dobj.control.dtexts[j]);
+ 
+ 			kfree(se);
++			kfree(w->kcontrol_news[i].name);
+ 		}
+ 		kfree(w->kcontrol_news);
+ 	} else {
+@@ -540,6 +541,7 @@ static void remove_widget(struct snd_soc_component *comp,
+ 			 */
+ 			kfree((void *)kcontrol->private_value);
+ 			snd_ctl_remove(card, kcontrol);
++			kfree(w->kcontrol_news[i].name);
+ 		}
+ 		kfree(w->kcontrol_news);
+ 	}
+@@ -1233,7 +1235,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
+ 		dev_dbg(tplg->dev, " adding DAPM widget mixer control %s at %d\n",
+ 			mc->hdr.name, i);
+ 
+-		kc[i].name = mc->hdr.name;
++		kc[i].name = kstrdup(mc->hdr.name, GFP_KERNEL);
++		if (kc[i].name == NULL)
++			goto err_str;
+ 		kc[i].private_value = (long)sm;
+ 		kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ 		kc[i].access = mc->hdr.access;
+@@ -1278,8 +1282,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
+ err_str:
+ 	kfree(sm);
+ err:
+-	for (--i; i >= 0; i--)
++	for (--i; i >= 0; i--) {
+ 		kfree((void *)kc[i].private_value);
++		kfree(kc[i].name);
++	}
+ 	kfree(kc);
+ 	return NULL;
+ }
+@@ -1310,7 +1316,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
+ 		dev_dbg(tplg->dev, " adding DAPM widget enum control %s\n",
+ 			ec->hdr.name);
+ 
+-		kc[i].name = ec->hdr.name;
++		kc[i].name = kstrdup(ec->hdr.name, GFP_KERNEL);
++		if (kc[i].name == NULL)
++			goto err_se;
+ 		kc[i].private_value = (long)se;
+ 		kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ 		kc[i].access = ec->hdr.access;
+@@ -1386,6 +1394,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
+ 			kfree(se->dobj.control.dtexts[j]);
+ 
+ 		kfree(se);
++		kfree(kc[i].name);
+ 	}
+ err:
+ 	kfree(kc);
+@@ -1424,7 +1433,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dbytes_create(
+ 			"ASoC: adding bytes kcontrol %s with access 0x%x\n",
+ 			be->hdr.name, be->hdr.access);
+ 
+-		kc[i].name = be->hdr.name;
++		kc[i].name = kstrdup(be->hdr.name, GFP_KERNEL);
++		if (kc[i].name == NULL)
++			goto err;
+ 		kc[i].private_value = (long)sbe;
+ 		kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ 		kc[i].access = be->hdr.access;
+@@ -1454,8 +1465,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dbytes_create(
+ 	return kc;
+ 
+ err:
+-	for (--i; i >= 0; i--)
++	for (--i; i >= 0; i--) {
+ 		kfree((void *)kc[i].private_value);
++		kfree(kc[i].name);
++	}
+ 
+ 	kfree(kc);
+ 	return NULL;
+diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
+index 6d7cde56a355..e2cf55c53ea8 100644
+--- a/sound/usb/line6/midi.c
++++ b/sound/usb/line6/midi.c
+@@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
+ 	}
+ 
+ 	usb_fill_int_urb(urb, line6->usbdev,
+-			 usb_sndbulkpipe(line6->usbdev,
++			 usb_sndintpipe(line6->usbdev,
+ 					 line6->properties->ep_ctrl_w),
+ 			 transfer_buffer, length, midi_sent, line6,
+ 			 line6->interval);
+diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
+index 465095355666..a8f07243aa9f 100644
+--- a/virt/kvm/arm/vgic/vgic-its.c
++++ b/virt/kvm/arm/vgic/vgic-its.c
+@@ -316,21 +316,24 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
+ 	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ 	struct vgic_irq *irq;
+ 	u32 *intids;
+-	int irq_count = dist->lpi_list_count, i = 0;
++	int irq_count, i = 0;
+ 
+ 	/*
+-	 * We use the current value of the list length, which may change
+-	 * after the kmalloc. We don't care, because the guest shouldn't
+-	 * change anything while the command handling is still running,
+-	 * and in the worst case we would miss a new IRQ, which one wouldn't
+-	 * expect to be covered by this command anyway.
++	 * There is an obvious race between allocating the array and LPIs
++	 * being mapped/unmapped. If we ended up here as a result of a
++	 * command, we're safe (locks are held, preventing another
++	 * command). If coming from another path (such as enabling LPIs),
++	 * we must be careful not to overrun the array.
+ 	 */
++	irq_count = READ_ONCE(dist->lpi_list_count);
+ 	intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
+ 	if (!intids)
+ 		return -ENOMEM;
+ 
+ 	spin_lock(&dist->lpi_list_lock);
+ 	list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
++		if (i == irq_count)
++			break;
+ 		/* We don't need to "get" the IRQ, as we hold the list lock. */
+ 		if (irq->target_vcpu != vcpu)
+ 			continue;


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-04-12 12:21 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-04-12 12:21 UTC (permalink / raw
  To: gentoo-commits

commit:     c4ffccb9e10f09f2ccef41fbdaecef944b8fca90
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Apr 12 12:21:13 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Apr 12 12:21:13 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c4ffccb9

Linux patch 4.16.2

 0000_README             |   4 +
 1001_linux-4.16.2.patch | 369 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 373 insertions(+)

diff --git a/0000_README b/0000_README
index c464ac8..d6bf216 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-4.16.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.16.1
 
+Patch:  1001_linux-4.16.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-4.16.2.patch b/1001_linux-4.16.2.patch
new file mode 100644
index 0000000..b36457a
--- /dev/null
+++ b/1001_linux-4.16.2.patch
@@ -0,0 +1,369 @@
+diff --git a/Makefile b/Makefile
+index 1773c718074e..f0040b05df30 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index 9fe85300e7b6..5754116a6a4d 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -5087,7 +5087,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	INIT_WORK(&hw->restart_work, sky2_restart);
+ 
+ 	pci_set_drvdata(pdev, hw);
+-	pdev->d3_delay = 150;
++	pdev->d3_delay = 200;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+index 39abac678b71..99bb679a9801 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+@@ -71,10 +71,11 @@
+ /* CPP address to retrieve the data from */
+ #define NSP_BUFFER		0x10
+ #define   NSP_BUFFER_CPP	GENMASK_ULL(63, 40)
+-#define   NSP_BUFFER_PCIE	GENMASK_ULL(39, 38)
+-#define   NSP_BUFFER_ADDRESS	GENMASK_ULL(37, 0)
++#define   NSP_BUFFER_ADDRESS	GENMASK_ULL(39, 0)
+ 
+ #define NSP_DFLT_BUFFER		0x18
++#define   NSP_DFLT_BUFFER_CPP	GENMASK_ULL(63, 40)
++#define   NSP_DFLT_BUFFER_ADDRESS	GENMASK_ULL(39, 0)
+ 
+ #define NSP_DFLT_BUFFER_CONFIG	0x20
+ #define   NSP_DFLT_BUFFER_SIZE_MB	GENMASK_ULL(7, 0)
+@@ -427,8 +428,8 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
+ 	if (err < 0)
+ 		return err;
+ 
+-	cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8;
+-	cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg);
++	cpp_id = FIELD_GET(NSP_DFLT_BUFFER_CPP, reg) << 8;
++	cpp_buf = FIELD_GET(NSP_DFLT_BUFFER_ADDRESS, reg);
+ 
+ 	if (in_buf && in_size) {
+ 		err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size);
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 6dde9a0cfe76..9b70a3af678e 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -464,7 +464,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	po->chan.mtu = dst_mtu(&rt->dst);
+ 	if (!po->chan.mtu)
+ 		po->chan.mtu = PPP_MRU;
+-	ip_rt_put(rt);
+ 	po->chan.mtu -= PPTP_HEADER_OVERHEAD;
+ 
+ 	po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
+diff --git a/drivers/sbus/char/Kconfig b/drivers/sbus/char/Kconfig
+index a785aa7660c3..bf3c5f735614 100644
+--- a/drivers/sbus/char/Kconfig
++++ b/drivers/sbus/char/Kconfig
+@@ -72,7 +72,8 @@ config DISPLAY7SEG
+ 
+ config ORACLE_DAX
+ 	tristate "Oracle Data Analytics Accelerator"
+-	default m if SPARC64
++	depends on SPARC64
++	default m
+ 	help
+ 	 Driver for Oracle Data Analytics Accelerator, which is
+ 	 a coprocessor that performs database operations in hardware.
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index f7e83f6d2e64..236452ebbd9e 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -29,6 +29,7 @@
+ #include <linux/net_tstamp.h>
+ #include <linux/etherdevice.h>
+ #include <linux/ethtool.h>
++#include <linux/phy.h>
+ #include <net/arp.h>
+ #include <net/switchdev.h>
+ 
+@@ -665,8 +666,11 @@ static int vlan_ethtool_get_ts_info(struct net_device *dev,
+ {
+ 	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ 	const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
++	struct phy_device *phydev = vlan->real_dev->phydev;
+ 
+-	if (ops->get_ts_info) {
++	if (phydev && phydev->drv && phydev->drv->ts_info) {
++		 return phydev->drv->ts_info(phydev, info);
++	} else if (ops->get_ts_info) {
+ 		return ops->get_ts_info(vlan->real_dev, info);
+ 	} else {
+ 		info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+diff --git a/net/core/dev.c b/net/core/dev.c
+index ef0cc6ea5f8d..c4aa2941dbfd 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1027,7 +1027,7 @@ bool dev_valid_name(const char *name)
+ {
+ 	if (*name == '\0')
+ 		return false;
+-	if (strlen(name) >= IFNAMSIZ)
++	if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
+ 		return false;
+ 	if (!strcmp(name, ".") || !strcmp(name, ".."))
+ 		return false;
+diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
+index 70de7895e5b8..053731473c99 100644
+--- a/net/dsa/dsa_priv.h
++++ b/net/dsa/dsa_priv.h
+@@ -126,6 +126,7 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
+ 	struct dsa_port *cpu_dp = dev->dsa_ptr;
+ 	struct dsa_switch_tree *dst = cpu_dp->dst;
+ 	struct dsa_switch *ds;
++	struct dsa_port *slave_port;
+ 
+ 	if (device < 0 || device >= DSA_MAX_SWITCHES)
+ 		return NULL;
+@@ -137,7 +138,12 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
+ 	if (port < 0 || port >= ds->num_ports)
+ 		return NULL;
+ 
+-	return ds->ports[port].slave;
++	slave_port = &ds->ports[port];
++
++	if (unlikely(slave_port->type != DSA_PORT_TYPE_USER))
++		return NULL;
++
++	return slave_port->slave;
+ }
+ 
+ /* port.c */
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index f28f06c91ead..7333db17c581 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -437,7 +437,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
+ 	/*unsigned long now; */
+ 	struct net *net = dev_net(dev);
+ 
+-	rt = ip_route_output(net, sip, tip, 0, 0);
++	rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev));
+ 	if (IS_ERR(rt))
+ 		return 1;
+ 	if (rt->dst.dev != dev) {
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index a7fd1c5a2a14..57478d68248d 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -253,13 +253,14 @@ static struct net_device *__ip_tunnel_create(struct net *net,
+ 	struct net_device *dev;
+ 	char name[IFNAMSIZ];
+ 
+-	if (parms->name[0])
++	err = -E2BIG;
++	if (parms->name[0]) {
++		if (!dev_valid_name(parms->name))
++			goto failed;
+ 		strlcpy(name, parms->name, IFNAMSIZ);
+-	else {
+-		if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
+-			err = -E2BIG;
++	} else {
++		if (strlen(ops->kind) > (IFNAMSIZ - 3))
+ 			goto failed;
+-		}
+ 		strlcpy(name, ops->kind, IFNAMSIZ);
+ 		strncat(name, "%d", 2);
+ 	}
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 1bbd0930063e..197fcae855ca 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -335,11 +335,13 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
+ 	if (t || !create)
+ 		return t;
+ 
+-	if (parms->name[0])
++	if (parms->name[0]) {
++		if (!dev_valid_name(parms->name))
++			return NULL;
+ 		strlcpy(name, parms->name, IFNAMSIZ);
+-	else
++	} else {
+ 		strcpy(name, "ip6gre%d");
+-
++	}
+ 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+ 			   ip6gre_tunnel_setup);
+ 	if (!dev)
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 5cb18c8ba9b2..4065ae0c32a0 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -375,6 +375,11 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
+ static inline int ip6_forward_finish(struct net *net, struct sock *sk,
+ 				     struct sk_buff *skb)
+ {
++	struct dst_entry *dst = skb_dst(skb);
++
++	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
++	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
++
+ 	return dst_output(net, sk, skb);
+ }
+ 
+@@ -569,8 +574,6 @@ int ip6_forward(struct sk_buff *skb)
+ 
+ 	hdr->hop_limit--;
+ 
+-	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
+-	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
+ 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
+ 		       net, NULL, skb, skb->dev, dst->dev,
+ 		       ip6_forward_finish);
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 6e0f21eed88a..179313b0926c 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -297,13 +297,16 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
+ 	struct net_device *dev;
+ 	struct ip6_tnl *t;
+ 	char name[IFNAMSIZ];
+-	int err = -ENOMEM;
++	int err = -E2BIG;
+ 
+-	if (p->name[0])
++	if (p->name[0]) {
++		if (!dev_valid_name(p->name))
++			goto failed;
+ 		strlcpy(name, p->name, IFNAMSIZ);
+-	else
++	} else {
+ 		sprintf(name, "ip6tnl%%d");
+-
++	}
++	err = -ENOMEM;
+ 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+ 			   ip6_tnl_dev_setup);
+ 	if (!dev)
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index ce18cd20389d..3726dc797847 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -212,10 +212,13 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
+ 	char name[IFNAMSIZ];
+ 	int err;
+ 
+-	if (p->name[0])
++	if (p->name[0]) {
++		if (!dev_valid_name(p->name))
++			goto failed;
+ 		strlcpy(name, p->name, IFNAMSIZ);
+-	else
++	} else {
+ 		sprintf(name, "ip6_vti%%d");
++	}
+ 
+ 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
+ 	if (!dev)
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 0195598f7bb5..e85791854c87 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -250,11 +250,13 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
+ 	if (!create)
+ 		goto failed;
+ 
+-	if (parms->name[0])
++	if (parms->name[0]) {
++		if (!dev_valid_name(parms->name))
++			goto failed;
+ 		strlcpy(name, parms->name, IFNAMSIZ);
+-	else
++	} else {
+ 		strcpy(name, "sit%d");
+-
++	}
+ 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+ 			   ipip6_tunnel_setup);
+ 	if (!dev)
+diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
+index 9d2cabf1dc7e..f3eee5326307 100644
+--- a/net/sched/act_bpf.c
++++ b/net/sched/act_bpf.c
+@@ -248,10 +248,14 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
+ 
+ static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
+ {
+-	if (cfg->is_ebpf)
+-		bpf_prog_put(cfg->filter);
+-	else
+-		bpf_prog_destroy(cfg->filter);
++	struct bpf_prog *filter = cfg->filter;
++
++	if (filter) {
++		if (cfg->is_ebpf)
++			bpf_prog_put(filter);
++		else
++			bpf_prog_destroy(filter);
++	}
+ 
+ 	kfree(cfg->bpf_ops);
+ 	kfree(cfg->bpf_name);
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index ed8b6a24b9e9..bac47b5d18fd 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -489,6 +489,7 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
+ 				RCU_INIT_POINTER(*kp, key->next);
+ 
+ 				tcf_unbind_filter(tp, &key->res);
++				idr_remove(&ht->handle_idr, key->handle);
+ 				tcf_exts_get_net(&key->exts);
+ 				call_rcu(&key->rcu, u32_delete_key_freepf_rcu);
+ 				return 0;
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index e35d4f73d2df..f6d3d0c1e133 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -728,8 +728,10 @@ static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
+ 			sctp_v6_map_v4(addr);
+ 	}
+ 
+-	if (addr->sa.sa_family == AF_INET)
++	if (addr->sa.sa_family == AF_INET) {
++		memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+ 		return sizeof(struct sockaddr_in);
++	}
+ 	return sizeof(struct sockaddr_in6);
+ }
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index bf271f8c2dc9..7160c2e9b768 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -354,11 +354,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
+ 	if (!opt->pf->af_supported(addr->sa.sa_family, opt))
+ 		return NULL;
+ 
+-	/* V4 mapped address are really of AF_INET family */
+-	if (addr->sa.sa_family == AF_INET6 &&
+-	    ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
+-	    !opt->pf->af_supported(AF_INET, opt))
+-		return NULL;
++	if (addr->sa.sa_family == AF_INET6) {
++		if (len < SIN6_LEN_RFC2133)
++			return NULL;
++		/* V4 mapped address are really of AF_INET family */
++		if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
++		    !opt->pf->af_supported(AF_INET, opt))
++			return NULL;
++	}
+ 
+ 	/* If we get this far, af is valid. */
+ 	af = sctp_get_af_specific(addr->sa.sa_family);


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-04-08 14:33 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-04-08 14:33 UTC (permalink / raw
  To: gentoo-commits

commit:     54a987e8d15fbef1c13b71b424ebf95aa5905d57
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Apr  8 14:33:13 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Apr  8 14:33:13 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=54a987e8

Linux patch 4.16.1

 0000_README             |    4 +
 1000_linux-4.16.1.patch | 1197 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1201 insertions(+)

diff --git a/0000_README b/0000_README
index 01553d4..c464ac8 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-4.16.1.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.16.1
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1000_linux-4.16.1.patch b/1000_linux-4.16.1.patch
new file mode 100644
index 0000000..e02e1ee
--- /dev/null
+++ b/1000_linux-4.16.1.patch
@@ -0,0 +1,1197 @@
+diff --git a/Documentation/devicetree/bindings/serial/8250.txt b/Documentation/devicetree/bindings/serial/8250.txt
+index dad3b2ec66d4..aeb6db4e35c3 100644
+--- a/Documentation/devicetree/bindings/serial/8250.txt
++++ b/Documentation/devicetree/bindings/serial/8250.txt
+@@ -24,6 +24,7 @@ Required properties:
+ 	- "ti,da830-uart"
+ 	- "aspeed,ast2400-vuart"
+ 	- "aspeed,ast2500-vuart"
++	- "nuvoton,npcm750-uart"
+ 	- "serial" if the port type is unknown.
+ - reg : offset and length of the register set for the device.
+ - interrupts : should contain uart interrupt.
+diff --git a/Makefile b/Makefile
+index 363dd096e46e..1773c718074e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 16
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+ 
+diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
+index 30ef8e291271..c9919c2b7ad1 100644
+--- a/arch/arm/crypto/Makefile
++++ b/arch/arm/crypto/Makefile
+@@ -54,6 +54,7 @@ crct10dif-arm-ce-y	:= crct10dif-ce-core.o crct10dif-ce-glue.o
+ crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
+ chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
+ 
++ifdef REGENERATE_ARM_CRYPTO
+ quiet_cmd_perl = PERL    $@
+       cmd_perl = $(PERL) $(<) > $(@)
+ 
+@@ -62,5 +63,6 @@ $(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl
+ 
+ $(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl
+ 	$(call cmd,perl)
++endif
+ 
+ .PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S
+diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
+index cee9b8d9830b..dfe651bdf993 100644
+--- a/arch/arm64/crypto/Makefile
++++ b/arch/arm64/crypto/Makefile
+@@ -67,6 +67,7 @@ CFLAGS_aes-glue-ce.o	:= -DUSE_V8_CRYPTO_EXTENSIONS
+ $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
+ 	$(call if_changed_rule,cc_o_c)
+ 
++ifdef REGENERATE_ARM64_CRYPTO
+ quiet_cmd_perlasm = PERLASM $@
+       cmd_perlasm = $(PERL) $(<) void $(@)
+ 
+@@ -75,5 +76,6 @@ $(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl
+ 
+ $(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl
+ 	$(call cmd,perlasm)
++endif
+ 
+ .PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S
+diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
+index dbea6020ffe7..575292a33bdf 100644
+--- a/arch/x86/crypto/cast5_avx_glue.c
++++ b/arch/x86/crypto/cast5_avx_glue.c
+@@ -66,8 +66,6 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+ 	void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
+ 	int err;
+ 
+-	fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
+-
+ 	err = blkcipher_walk_virt(desc, walk);
+ 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ 
+@@ -79,6 +77,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+ 
+ 		/* Process multi-block batch */
+ 		if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
++			fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
+ 			do {
+ 				fn(ctx, wdst, wsrc);
+ 
+diff --git a/block/bio.c b/block/bio.c
+index e1708db48258..53e0f0a1ed94 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -43,9 +43,9 @@
+  * break badly! cannot be bigger than what you can fit into an
+  * unsigned short
+  */
+-#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
++#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
+ static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
+-	BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
++	BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
+ };
+ #undef BV
+ 
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index 266fc1d64f61..c03cc177870b 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -92,13 +92,14 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
+ 
+ 	if (nbytes && walk->offset & alignmask && !err) {
+ 		walk->offset = ALIGN(walk->offset, alignmask + 1);
+-		walk->data += walk->offset;
+-
+ 		nbytes = min(nbytes,
+ 			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
+ 		walk->entrylen -= nbytes;
+ 
+-		return nbytes;
++		if (nbytes) {
++			walk->data += walk->offset;
++			return nbytes;
++		}
+ 	}
+ 
+ 	if (walk->flags & CRYPTO_ALG_ASYNC)
+diff --git a/crypto/lrw.c b/crypto/lrw.c
+index cbbd7c50ad19..1d813a6d3fec 100644
+--- a/crypto/lrw.c
++++ b/crypto/lrw.c
+@@ -313,7 +313,7 @@ static void exit_crypt(struct skcipher_request *req)
+ 	rctx->left = 0;
+ 
+ 	if (rctx->ext)
+-		kfree(rctx->ext);
++		kzfree(rctx->ext);
+ }
+ 
+ static int do_encrypt(struct skcipher_request *req, int err)
+diff --git a/crypto/testmgr.h b/crypto/testmgr.h
+index 6044f6906bd6..69fb51e7b6f1 100644
+--- a/crypto/testmgr.h
++++ b/crypto/testmgr.h
+@@ -548,7 +548,7 @@ static const struct akcipher_testvec rsa_tv_template[] = {
+ static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
+ 	{
+ 	.key =
+-	"\x30\x82\x03\x1f\x02\x01\x10\x02\x82\x01\x01\x00\xd7\x1e\x77\x82"
++	"\x30\x82\x03\x1f\x02\x01\x00\x02\x82\x01\x01\x00\xd7\x1e\x77\x82"
+ 	"\x8c\x92\x31\xe7\x69\x02\xa2\xd5\x5c\x78\xde\xa2\x0c\x8f\xfe\x28"
+ 	"\x59\x31\xdf\x40\x9c\x60\x61\x06\xb9\x2f\x62\x40\x80\x76\xcb\x67"
+ 	"\x4a\xb5\x59\x56\x69\x17\x07\xfa\xf9\x4c\xbd\x6c\x37\x7a\x46\x7d"
+@@ -597,8 +597,8 @@ static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
+ 	"\xfe\xf8\x27\x1b\xd6\x55\x60\x5e\x48\xb7\x6d\x9a\xa8\x37\xf9\x7a"
+ 	"\xde\x1b\xcd\x5d\x1a\x30\xd4\xe9\x9e\x5b\x3c\x15\xf8\x9c\x1f\xda"
+ 	"\xd1\x86\x48\x55\xce\x83\xee\x8e\x51\xc7\xde\x32\x12\x47\x7d\x46"
+-	"\xb8\x35\xdf\x41\x02\x01\x30\x02\x01\x30\x02\x01\x30\x02\x01\x30"
+-	"\x02\x01\x30",
++	"\xb8\x35\xdf\x41\x02\x01\x00\x02\x01\x00\x02\x01\x00\x02\x01\x00"
++	"\x02\x01\x00",
+ 	.key_len = 804,
+ 	/*
+ 	 * m is SHA256 hash of following message:
+diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
+index 52ec5174bcb1..e7cb0c6ade81 100644
+--- a/drivers/base/arch_topology.c
++++ b/drivers/base/arch_topology.c
+@@ -169,11 +169,11 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+ }
+ 
+ #ifdef CONFIG_CPU_FREQ
+-static cpumask_var_t cpus_to_visit __initdata;
+-static void __init parsing_done_workfn(struct work_struct *work);
+-static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn);
++static cpumask_var_t cpus_to_visit;
++static void parsing_done_workfn(struct work_struct *work);
++static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+ 
+-static int __init
++static int
+ init_cpu_capacity_callback(struct notifier_block *nb,
+ 			   unsigned long val,
+ 			   void *data)
+@@ -209,7 +209,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
+ 	return 0;
+ }
+ 
+-static struct notifier_block init_cpu_capacity_notifier __initdata = {
++static struct notifier_block init_cpu_capacity_notifier = {
+ 	.notifier_call = init_cpu_capacity_callback,
+ };
+ 
+@@ -242,7 +242,7 @@ static int __init register_cpufreq_notifier(void)
+ }
+ core_initcall(register_cpufreq_notifier);
+ 
+-static void __init parsing_done_workfn(struct work_struct *work)
++static void parsing_done_workfn(struct work_struct *work)
+ {
+ 	cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
+ 					 CPUFREQ_POLICY_NOTIFIER);
+diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
+index 40b9fb247010..47a4127a6067 100644
+--- a/drivers/bluetooth/hci_bcm.c
++++ b/drivers/bluetooth/hci_bcm.c
+@@ -1080,6 +1080,7 @@ static const struct hci_uart_proto bcm_proto = {
+ #ifdef CONFIG_ACPI
+ static const struct acpi_device_id bcm_acpi_match[] = {
+ 	{ "BCM2E1A", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
++	{ "BCM2E38", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ 	{ "BCM2E39", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ 	{ "BCM2E3A", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ 	{ "BCM2E3D", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+@@ -1092,12 +1093,17 @@ static const struct acpi_device_id bcm_acpi_match[] = {
+ 	{ "BCM2E67", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ 	{ "BCM2E71", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ 	{ "BCM2E72", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
++	{ "BCM2E74", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ 	{ "BCM2E7B", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ 	{ "BCM2E7C", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ 	{ "BCM2E7E", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
++	{ "BCM2E83", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
++	{ "BCM2E84", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
++	{ "BCM2E90", (kernel_ulong_t)&acpi_bcm_int_last_gpios },
+ 	{ "BCM2E95", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
+ 	{ "BCM2E96", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
+ 	{ "BCM2EA4", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
++	{ "BCM2EAA", (kernel_ulong_t)&acpi_bcm_int_first_gpios },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
+diff --git a/drivers/char/mem.c b/drivers/char/mem.c
+index 052011bcf100..ffeb60d3434c 100644
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -137,7 +137,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+ 
+ 	while (count > 0) {
+ 		unsigned long remaining;
+-		int allowed;
++		int allowed, probe;
+ 
+ 		sz = size_inside_page(p, count);
+ 
+@@ -160,9 +160,9 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+ 			if (!ptr)
+ 				goto failed;
+ 
+-			err = probe_kernel_read(bounce, ptr, sz);
++			probe = probe_kernel_read(bounce, ptr, sz);
+ 			unxlate_dev_mem_ptr(p, ptr);
+-			if (err)
++			if (probe)
+ 				goto failed;
+ 
+ 			remaining = copy_to_user(buf, bounce, sz);
+diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
+index e843cf410373..361e750f9cba 100644
+--- a/drivers/crypto/caam/ctrl.c
++++ b/drivers/crypto/caam/ctrl.c
+@@ -815,9 +815,6 @@ static int caam_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ caam_remove:
+-#ifdef CONFIG_DEBUG_FS
+-	debugfs_remove_recursive(ctrlpriv->dfs_root);
+-#endif
+ 	caam_remove(pdev);
+ 	return ret;
+ 
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+index 60fc0fa26fd3..26687f318de6 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+@@ -46,7 +46,7 @@ static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
+ 	}
+ 
+ 	/* Update result area if supplied */
+-	if (req->result)
++	if (req->result && rctx->final)
+ 		memcpy(req->result, rctx->iv, digest_size);
+ 
+ e_free:
+diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
+index e6db8672d89c..05850dfd7940 100644
+--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
++++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
+@@ -60,10 +60,9 @@ static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
+ 
+ static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
+ {
+-	if (ccp_version() > CCP_VERSION(3, 0))
+-		return CCP5_RSA_MAXMOD;
+-	else
+-		return CCP_RSA_MAXMOD;
++	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
++
++	return ctx->u.rsa.n_len;
+ }
+ 
+ static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
+diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
+index 8b9b16d433f7..871c9628a2ee 100644
+--- a/drivers/crypto/ccp/ccp-crypto-sha.c
++++ b/drivers/crypto/ccp/ccp-crypto-sha.c
+@@ -47,7 +47,7 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
+ 	}
+ 
+ 	/* Update result area if supplied */
+-	if (req->result)
++	if (req->result && rctx->final)
+ 		memcpy(req->result, rctx->ctx, digest_size);
+ 
+ e_free:
+diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
+index 225e74a7f724..0dd3a7ac1dd1 100644
+--- a/drivers/crypto/inside-secure/safexcel.c
++++ b/drivers/crypto/inside-secure/safexcel.c
+@@ -894,7 +894,7 @@ static int safexcel_probe(struct platform_device *pdev)
+ 		return PTR_ERR(priv->base);
+ 	}
+ 
+-	priv->clk = of_clk_get(dev->of_node, 0);
++	priv->clk = devm_clk_get(&pdev->dev, NULL);
+ 	if (!IS_ERR(priv->clk)) {
+ 		ret = clk_prepare_enable(priv->clk);
+ 		if (ret) {
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 6882fa2f8bad..c805d0122c0b 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -832,8 +832,6 @@ struct talitos_ctx {
+ 	unsigned int keylen;
+ 	unsigned int enckeylen;
+ 	unsigned int authkeylen;
+-	dma_addr_t dma_buf;
+-	dma_addr_t dma_hw_context;
+ };
+ 
+ #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
+@@ -1130,10 +1128,10 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
+ 	return count;
+ }
+ 
+-static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+-		   unsigned int len, struct talitos_edesc *edesc,
+-		   struct talitos_ptr *ptr,
+-		   int sg_count, unsigned int offset, int tbl_off)
++static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
++			      unsigned int len, struct talitos_edesc *edesc,
++			      struct talitos_ptr *ptr, int sg_count,
++			      unsigned int offset, int tbl_off, int elen)
+ {
+ 	struct talitos_private *priv = dev_get_drvdata(dev);
+ 	bool is_sec1 = has_ftr_sec1(priv);
+@@ -1142,6 +1140,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ 		to_talitos_ptr(ptr, 0, 0, is_sec1);
+ 		return 1;
+ 	}
++	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
+ 	if (sg_count == 1) {
+ 		to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
+ 		return sg_count;
+@@ -1150,7 +1149,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ 		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
+ 		return sg_count;
+ 	}
+-	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
++	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
+ 					 &edesc->link_tbl[tbl_off]);
+ 	if (sg_count == 1) {
+ 		/* Only one segment now, so no link tbl needed*/
+@@ -1164,6 +1163,15 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ 	return sg_count;
+ }
+ 
++static int talitos_sg_map(struct device *dev, struct scatterlist *src,
++			  unsigned int len, struct talitos_edesc *edesc,
++			  struct talitos_ptr *ptr, int sg_count,
++			  unsigned int offset, int tbl_off)
++{
++	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
++				  tbl_off, 0);
++}
++
+ /*
+  * fill in and submit ipsec_esp descriptor
+  */
+@@ -1181,7 +1189,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ 	unsigned int ivsize = crypto_aead_ivsize(aead);
+ 	int tbl_off = 0;
+ 	int sg_count, ret;
+-	int sg_link_tbl_len;
++	int elen = 0;
+ 	bool sync_needed = false;
+ 	struct talitos_private *priv = dev_get_drvdata(dev);
+ 	bool is_sec1 = has_ftr_sec1(priv);
+@@ -1223,17 +1231,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ 	 * extent is bytes of HMAC postpended to ciphertext,
+ 	 * typically 12 for ipsec
+ 	 */
+-	sg_link_tbl_len = cryptlen;
+-
+-	if (is_ipsec_esp) {
+-		to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
+-
+-		if (desc->hdr & DESC_HDR_MODE1_MDEU_CICV)
+-			sg_link_tbl_len += authsize;
+-	}
++	if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
++		elen = authsize;
+ 
+-	ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
+-			     &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
++	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
++				 sg_count, areq->assoclen, tbl_off, elen);
+ 
+ 	if (ret > 1) {
+ 		tbl_off += ret;
+@@ -1690,9 +1692,30 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
+ 				       struct ahash_request *areq)
+ {
+ 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
++	struct talitos_private *priv = dev_get_drvdata(dev);
++	bool is_sec1 = has_ftr_sec1(priv);
++	struct talitos_desc *desc = &edesc->desc;
++	struct talitos_desc *desc2 = desc + 1;
++
++	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
++	if (desc->next_desc &&
++	    desc->ptr[5].ptr != desc2->ptr[5].ptr)
++		unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
+ 
+ 	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
+ 
++	/* When using hashctx-in, must unmap it. */
++	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
++		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
++					 DMA_TO_DEVICE);
++	else if (desc->next_desc)
++		unmap_single_talitos_ptr(dev, &desc2->ptr[1],
++					 DMA_TO_DEVICE);
++
++	if (is_sec1 && req_ctx->nbuf)
++		unmap_single_talitos_ptr(dev, &desc->ptr[3],
++					 DMA_TO_DEVICE);
++
+ 	if (edesc->dma_len)
+ 		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
+ 				 DMA_BIDIRECTIONAL);
+@@ -1766,8 +1789,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ 
+ 	/* hash context in */
+ 	if (!req_ctx->first || req_ctx->swinit) {
+-		to_talitos_ptr(&desc->ptr[1], ctx->dma_hw_context,
+-			       req_ctx->hw_context_size, is_sec1);
++		map_single_talitos_ptr(dev, &desc->ptr[1],
++				       req_ctx->hw_context_size,
++				       (char *)req_ctx->hw_context,
++				       DMA_TO_DEVICE);
+ 		req_ctx->swinit = 0;
+ 	}
+ 	/* Indicate next op is not the first. */
+@@ -1793,10 +1818,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ 	 * data in
+ 	 */
+ 	if (is_sec1 && req_ctx->nbuf) {
+-		dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
+-						    HASH_MAX_BLOCK_SIZE;
+-
+-		to_talitos_ptr(&desc->ptr[3], dma_buf, req_ctx->nbuf, is_sec1);
++		map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
++				       req_ctx->buf[req_ctx->buf_idx],
++				       DMA_TO_DEVICE);
+ 	} else {
+ 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
+ 					  &desc->ptr[3], sg_count, offset, 0);
+@@ -1812,8 +1836,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ 				       crypto_ahash_digestsize(tfm),
+ 				       areq->result, DMA_FROM_DEVICE);
+ 	else
+-		to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
+-			       req_ctx->hw_context_size, is_sec1);
++		map_single_talitos_ptr(dev, &desc->ptr[5],
++				       req_ctx->hw_context_size,
++				       req_ctx->hw_context, DMA_FROM_DEVICE);
+ 
+ 	/* last DWORD empty */
+ 
+@@ -1832,9 +1857,14 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ 		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
+ 		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
+ 
+-		to_talitos_ptr(&desc2->ptr[1], ctx->dma_hw_context,
+-			       req_ctx->hw_context_size, is_sec1);
+-
++		if (desc->ptr[1].ptr)
++			copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
++					 is_sec1);
++		else
++			map_single_talitos_ptr(dev, &desc2->ptr[1],
++					       req_ctx->hw_context_size,
++					       req_ctx->hw_context,
++					       DMA_TO_DEVICE);
+ 		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
+ 		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
+ 					  &desc2->ptr[3], sg_count, offset, 0);
+@@ -1842,8 +1872,10 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ 			sync_needed = true;
+ 		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
+ 		if (req_ctx->last)
+-			to_talitos_ptr(&desc->ptr[5], ctx->dma_hw_context,
+-				       req_ctx->hw_context_size, is_sec1);
++			map_single_talitos_ptr(dev, &desc->ptr[5],
++					       req_ctx->hw_context_size,
++					       req_ctx->hw_context,
++					       DMA_FROM_DEVICE);
+ 
+ 		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
+ 					   DMA_BIDIRECTIONAL);
+@@ -1881,12 +1913,8 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
+ static int ahash_init(struct ahash_request *areq)
+ {
+ 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+-	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+-	struct device *dev = ctx->dev;
+ 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ 	unsigned int size;
+-	struct talitos_private *priv = dev_get_drvdata(dev);
+-	bool is_sec1 = has_ftr_sec1(priv);
+ 
+ 	/* Initialize the context */
+ 	req_ctx->buf_idx = 0;
+@@ -1898,18 +1926,6 @@ static int ahash_init(struct ahash_request *areq)
+ 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+ 	req_ctx->hw_context_size = size;
+ 
+-	if (ctx->dma_hw_context)
+-		dma_unmap_single(dev, ctx->dma_hw_context, size,
+-				 DMA_BIDIRECTIONAL);
+-	ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
+-					     DMA_BIDIRECTIONAL);
+-	if (ctx->dma_buf)
+-		dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
+-				 DMA_TO_DEVICE);
+-	if (is_sec1)
+-		ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
+-					      sizeof(req_ctx->buf),
+-					      DMA_TO_DEVICE);
+ 	return 0;
+ }
+ 
+@@ -1920,9 +1936,6 @@ static int ahash_init(struct ahash_request *areq)
+ static int ahash_init_sha224_swinit(struct ahash_request *areq)
+ {
+ 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+-	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+-	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+-	struct device *dev = ctx->dev;
+ 
+ 	ahash_init(areq);
+ 	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
+@@ -1940,9 +1953,6 @@ static int ahash_init_sha224_swinit(struct ahash_request *areq)
+ 	req_ctx->hw_context[8] = 0;
+ 	req_ctx->hw_context[9] = 0;
+ 
+-	dma_sync_single_for_device(dev, ctx->dma_hw_context,
+-				   req_ctx->hw_context_size, DMA_TO_DEVICE);
+-
+ 	return 0;
+ }
+ 
+@@ -2046,13 +2056,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+ 	/* request SEC to INIT hash. */
+ 	if (req_ctx->first && !req_ctx->swinit)
+ 		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
+-	if (is_sec1) {
+-		dma_addr_t dma_buf = ctx->dma_buf + req_ctx->buf_idx *
+-						    HASH_MAX_BLOCK_SIZE;
+-
+-		dma_sync_single_for_device(dev, dma_buf,
+-					   req_ctx->nbuf, DMA_TO_DEVICE);
+-	}
+ 
+ 	/* When the tfm context has a keylen, it's an HMAC.
+ 	 * A first or last (ie. not middle) descriptor must request HMAC.
+@@ -2106,12 +2109,7 @@ static int ahash_export(struct ahash_request *areq, void *out)
+ {
+ 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ 	struct talitos_export_state *export = out;
+-	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+-	struct talitos_ctx *ctx = crypto_ahash_ctx(ahash);
+-	struct device *dev = ctx->dev;
+ 
+-	dma_sync_single_for_cpu(dev, ctx->dma_hw_context,
+-				req_ctx->hw_context_size, DMA_FROM_DEVICE);
+ 	memcpy(export->hw_context, req_ctx->hw_context,
+ 	       req_ctx->hw_context_size);
+ 	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
+@@ -2130,31 +2128,14 @@ static int ahash_import(struct ahash_request *areq, const void *in)
+ 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ 	const struct talitos_export_state *export = in;
+ 	unsigned int size;
+-	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+-	struct device *dev = ctx->dev;
+-	struct talitos_private *priv = dev_get_drvdata(dev);
+-	bool is_sec1 = has_ftr_sec1(priv);
+ 
+ 	memset(req_ctx, 0, sizeof(*req_ctx));
+ 	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ 			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+ 			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+ 	req_ctx->hw_context_size = size;
+-	if (ctx->dma_hw_context)
+-		dma_unmap_single(dev, ctx->dma_hw_context, size,
+-				 DMA_BIDIRECTIONAL);
+-
+ 	memcpy(req_ctx->hw_context, export->hw_context, size);
+-	ctx->dma_hw_context = dma_map_single(dev, req_ctx->hw_context, size,
+-					     DMA_BIDIRECTIONAL);
+-	if (ctx->dma_buf)
+-		dma_unmap_single(dev, ctx->dma_buf, sizeof(req_ctx->buf),
+-				 DMA_TO_DEVICE);
+ 	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
+-	if (is_sec1)
+-		ctx->dma_buf = dma_map_single(dev, req_ctx->buf,
+-					      sizeof(req_ctx->buf),
+-					      DMA_TO_DEVICE);
+ 	req_ctx->swinit = export->swinit;
+ 	req_ctx->first = export->first;
+ 	req_ctx->last = export->last;
+@@ -3064,27 +3045,6 @@ static void talitos_cra_exit(struct crypto_tfm *tfm)
+ 		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+ }
+ 
+-static void talitos_cra_exit_ahash(struct crypto_tfm *tfm)
+-{
+-	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+-	struct device *dev = ctx->dev;
+-	unsigned int size;
+-
+-	talitos_cra_exit(tfm);
+-
+-	size = (crypto_ahash_digestsize(__crypto_ahash_cast(tfm)) <=
+-		SHA256_DIGEST_SIZE)
+-	       ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+-	       : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+-
+-	if (ctx->dma_hw_context)
+-		dma_unmap_single(dev, ctx->dma_hw_context, size,
+-				 DMA_BIDIRECTIONAL);
+-	if (ctx->dma_buf)
+-		dma_unmap_single(dev, ctx->dma_buf, HASH_MAX_BLOCK_SIZE * 2,
+-				 DMA_TO_DEVICE);
+-}
+-
+ /*
+  * given the alg's descriptor header template, determine whether descriptor
+  * type and primary/secondary execution units required match the hw
+@@ -3183,7 +3143,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
+ 	case CRYPTO_ALG_TYPE_AHASH:
+ 		alg = &t_alg->algt.alg.hash.halg.base;
+ 		alg->cra_init = talitos_cra_init_ahash;
+-		alg->cra_exit = talitos_cra_exit_ahash;
++		alg->cra_exit = talitos_cra_exit;
+ 		alg->cra_type = &crypto_ahash_type;
+ 		t_alg->algt.alg.hash.init = ahash_init;
+ 		t_alg->algt.alg.hash.update = ahash_update;
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index dbe57da8c1a1..4a3bc168a4a7 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -2544,13 +2544,31 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4],
+ }
+ 
+ static int alps_update_dual_info_ss4_v2(unsigned char otp[][4],
+-				       struct alps_data *priv)
++					struct alps_data *priv,
++					struct psmouse *psmouse)
+ {
+ 	bool is_dual = false;
++	int reg_val = 0;
++	struct ps2dev *ps2dev = &psmouse->ps2dev;
+ 
+-	if (IS_SS4PLUS_DEV(priv->dev_id))
++	if (IS_SS4PLUS_DEV(priv->dev_id)) {
+ 		is_dual = (otp[0][0] >> 4) & 0x01;
+ 
++		if (!is_dual) {
++			/* For support TrackStick of Thinkpad L/E series */
++			if (alps_exit_command_mode(psmouse) == 0 &&
++				alps_enter_command_mode(psmouse) == 0) {
++				reg_val = alps_command_mode_read_reg(psmouse,
++									0xD7);
++			}
++			alps_exit_command_mode(psmouse);
++			ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE);
++
++			if (reg_val == 0x0C || reg_val == 0x1D)
++				is_dual = true;
++		}
++	}
++
+ 	if (is_dual)
+ 		priv->flags |= ALPS_DUALPOINT |
+ 					ALPS_DUALPOINT_WITH_PRESSURE;
+@@ -2573,7 +2591,7 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
+ 
+ 	alps_update_btn_info_ss4_v2(otp, priv);
+ 
+-	alps_update_dual_info_ss4_v2(otp, priv);
++	alps_update_dual_info_ss4_v2(otp, priv, psmouse);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 6cbbdc6e9687..b353d494ad40 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -530,6 +530,20 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
+ 	{ }
+ };
+ 
++static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = {
++	{
++		/*
++		 * Sony Vaio VGN-CS series require MUX or the touch sensor
++		 * buttons will disturb touchpad operation
++		 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"),
++		},
++	},
++	{ }
++};
++
+ /*
+  * On some Asus laptops, just running self tests cause problems.
+  */
+@@ -620,6 +634,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
+ 		},
+ 	},
++	{
++		/* Lenovo ThinkPad L460 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
++		},
++	},
+ 	{
+ 		/* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+ 		.matches = {
+@@ -1163,6 +1184,9 @@ static int __init i8042_platform_init(void)
+ 	if (dmi_check_system(i8042_dmi_nomux_table))
+ 		i8042_nomux = true;
+ 
++	if (dmi_check_system(i8042_dmi_forcemux_table))
++		i8042_nomux = false;
++
+ 	if (dmi_check_system(i8042_dmi_notimeout_table))
+ 		i8042_notimeout = true;
+ 
+diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
+index 127f8a0c098b..0c2e628e8723 100644
+--- a/drivers/media/usb/usbtv/usbtv-core.c
++++ b/drivers/media/usb/usbtv/usbtv-core.c
+@@ -112,6 +112,8 @@ static int usbtv_probe(struct usb_interface *intf,
+ 	return 0;
+ 
+ usbtv_audio_fail:
++	/* we must not free at this point */
++	usb_get_dev(usbtv->udev);
+ 	usbtv_video_free(usbtv);
+ 
+ usbtv_video_fail:
+diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
+index 758dc73602d5..7bb013644aeb 100644
+--- a/drivers/misc/mei/main.c
++++ b/drivers/misc/mei/main.c
+@@ -507,7 +507,6 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
+ 		break;
+ 
+ 	default:
+-		dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd);
+ 		rets = -ENOIOCTLCMD;
+ 	}
+ 
+diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
+index 489492b608cf..380916bff9e0 100644
+--- a/drivers/parport/parport_pc.c
++++ b/drivers/parport/parport_pc.c
+@@ -2646,6 +2646,7 @@ enum parport_pc_pci_cards {
+ 	netmos_9901,
+ 	netmos_9865,
+ 	quatech_sppxp100,
++	wch_ch382l,
+ };
+ 
+ 
+@@ -2708,6 +2709,7 @@ static struct parport_pc_pci {
+ 	/* netmos_9901 */               { 1, { { 0, -1 }, } },
+ 	/* netmos_9865 */               { 1, { { 0, -1 }, } },
+ 	/* quatech_sppxp100 */		{ 1, { { 0, 1 }, } },
++	/* wch_ch382l */		{ 1, { { 2, -1 }, } },
+ };
+ 
+ static const struct pci_device_id parport_pc_pci_tbl[] = {
+@@ -2797,6 +2799,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
+ 	/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
+ 	{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
+ 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
++	/* WCH CH382L PCI-E single parallel port card */
++	{ 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
+ 	{ 0, } /* terminate list */
+ };
+ MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
+diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c
+index fdfcdea25867..16590dfaafa4 100644
+--- a/drivers/siox/siox-core.c
++++ b/drivers/siox/siox-core.c
+@@ -594,7 +594,7 @@ static ssize_t device_add_store(struct device *dev,
+ 	size_t inbytes = 0, outbytes = 0;
+ 	u8 statustype = 0;
+ 
+-	ret = sscanf(buf, "%20s %zu %zu %hhu", type, &inbytes,
++	ret = sscanf(buf, "%19s %zu %zu %hhu", type, &inbytes,
+ 		     &outbytes, &statustype);
+ 	if (ret != 3 && ret != 4)
+ 		return -EINVAL;
+diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+index 5d610af6799f..9753042b7e1f 100644
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
+@@ -1275,6 +1275,8 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status)
+ 		ack |= NISTC_INTA_ACK_AI_START;
+ 	if (a_status & NISTC_AI_STATUS1_STOP)
+ 		ack |= NISTC_INTA_ACK_AI_STOP;
++	if (a_status & NISTC_AI_STATUS1_OVER)
++		ack |= NISTC_INTA_ACK_AI_ERR;
+ 	if (ack)
+ 		ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG);
+ }
+diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
+index 160b8906d9b9..9835b1c1cbe1 100644
+--- a/drivers/tty/serial/8250/8250_of.c
++++ b/drivers/tty/serial/8250/8250_of.c
+@@ -316,6 +316,7 @@ static const struct of_device_id of_platform_serial_table[] = {
+ 	{ .compatible = "mrvl,mmp-uart",
+ 		.data = (void *)PORT_XSCALE, },
+ 	{ .compatible = "ti,da830-uart", .data = (void *)PORT_DA830, },
++	{ .compatible = "nuvoton,npcm750-uart", .data = (void *)PORT_NPCM, },
+ 	{ /* end of list */ },
+ };
+ MODULE_DEVICE_TABLE(of, of_platform_serial_table);
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 1328c7e70108..804c1af6fd33 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -47,6 +47,10 @@
+ #define UART_EXAR_SLEEP		0x8b	/* Sleep mode */
+ #define UART_EXAR_DVID		0x8d	/* Device identification */
+ 
++/* Nuvoton NPCM timeout register */
++#define UART_NPCM_TOR          7
++#define UART_NPCM_TOIE         BIT(7)  /* Timeout Interrupt Enable */
++
+ /*
+  * Debugging.
+  */
+@@ -293,6 +297,15 @@ static const struct serial8250_config uart_config[] = {
+ 				  UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
+ 		.flags		= UART_CAP_FIFO,
+ 	},
++	[PORT_NPCM] = {
++		.name		= "Nuvoton 16550",
++		.fifo_size	= 16,
++		.tx_loadsz	= 16,
++		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
++				  UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
++		.rxtrig_bytes	= {1, 4, 8, 14},
++		.flags		= UART_CAP_FIFO,
++	},
+ };
+ 
+ /* Uart divisor latch read */
+@@ -2140,6 +2153,15 @@ int serial8250_do_startup(struct uart_port *port)
+ 				UART_DA830_PWREMU_MGMT_FREE);
+ 	}
+ 
++	if (port->type == PORT_NPCM) {
++		/*
++		 * Nuvoton calls the scratch register 'UART_TOR' (timeout
++		 * register). Enable it, and set TIOC (timeout interrupt
++		 * comparator) to be 0x20 for correct operation.
++		 */
++		serial_port_out(port, UART_NPCM_TOR, UART_NPCM_TOIE | 0x20);
++	}
++
+ #ifdef CONFIG_SERIAL_8250_RSA
+ 	/*
+ 	 * If this is an RSA port, see if we can kick it up to the
+@@ -2462,6 +2484,15 @@ static unsigned int xr17v35x_get_divisor(struct uart_8250_port *up,
+ 	return quot_16 >> 4;
+ }
+ 
++/* Nuvoton NPCM UARTs have a custom divisor calculation */
++static unsigned int npcm_get_divisor(struct uart_8250_port *up,
++		unsigned int baud)
++{
++	struct uart_port *port = &up->port;
++
++	return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2;
++}
++
+ static unsigned int serial8250_get_divisor(struct uart_8250_port *up,
+ 					   unsigned int baud,
+ 					   unsigned int *frac)
+@@ -2482,6 +2513,8 @@ static unsigned int serial8250_get_divisor(struct uart_8250_port *up,
+ 		quot = 0x8002;
+ 	else if (up->port.type == PORT_XR17V35X)
+ 		quot = xr17v35x_get_divisor(up, baud, frac);
++	else if (up->port.type == PORT_NPCM)
++		quot = npcm_get_divisor(up, baud);
+ 	else
+ 		quot = uart_get_divisor(port, baud);
+ 
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index b4e57c5a8bba..f97251f39c26 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -1354,6 +1354,11 @@ static void csi_m(struct vc_data *vc)
+ 		case 3:
+ 			vc->vc_italic = 1;
+ 			break;
++		case 21:
++			/*
++			 * No console drivers support double underline, so
++			 * convert it to a single underline.
++			 */
+ 		case 4:
+ 			vc->vc_underline = 1;
+ 			break;
+@@ -1389,7 +1394,6 @@ static void csi_m(struct vc_data *vc)
+ 			vc->vc_disp_ctrl = 1;
+ 			vc->vc_toggle_meta = 1;
+ 			break;
+-		case 21:
+ 		case 22:
+ 			vc->vc_intensity = 1;
+ 			break;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 06d502b3e913..de1e759dd512 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
+ 	{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+ 	{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
++	{ USB_DEVICE(0x155A, 0x1006) },	/* ELDAT Easywave RX09 */
+ 	{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+ 	{ USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
+ 	{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index f58c4ff6b387..87202ad5a50d 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -769,6 +769,7 @@ static const struct usb_device_id id_table_combined[] = {
+ 		.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
+ 	{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
+ 	{ USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
++	{ USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) },
+ 	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
+ 	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
+ 	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
+@@ -931,6 +932,7 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_FHE_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
+ 	{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 8b4ecd2bd297..975d02666c5a 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -923,6 +923,9 @@
+ /*
+  * RT Systems programming cables for various ham radios
+  */
++/* This device uses the VID of FTDI */
++#define RTSYSTEMS_USB_VX8_PID   0x9e50  /* USB-VX8 USB to 7 pin modular plug for Yaesu VX-8 radio */
++
+ #define RTSYSTEMS_VID		0x2100	/* Vendor ID */
+ #define RTSYSTEMS_USB_S03_PID	0x9001	/* RTS-03 USB to Serial Adapter */
+ #define RTSYSTEMS_USB_59_PID	0x9e50	/* USB-59 USB to 8 pin plug */
+@@ -1441,6 +1444,12 @@
+  */
+ #define FTDI_CINTERION_MC55I_PID	0xA951
+ 
++/*
++ * Product: FirmwareHubEmulator
++ * Manufacturer: Harman Becker Automotive Systems
++ */
++#define FTDI_FHE_PID		0xA9A0
++
+ /*
+  * Product: Comet Caller ID decoder
+  * Manufacturer: Crucible Technologies
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index f53470112670..c7b75dd58fad 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1262,6 +1262,8 @@ static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
+ 		list_del(&sums->list);
+ 		kfree(sums);
+ 	}
++	if (ret < 0)
++		return ret;
+ 	return 1;
+ }
+ 
+@@ -1394,10 +1396,23 @@ static noinline int run_delalloc_nocow(struct inode *inode,
+ 				goto out_check;
+ 			if (btrfs_extent_readonly(fs_info, disk_bytenr))
+ 				goto out_check;
+-			if (btrfs_cross_ref_exist(root, ino,
+-						  found_key.offset -
+-						  extent_offset, disk_bytenr))
++			ret = btrfs_cross_ref_exist(root, ino,
++						    found_key.offset -
++						    extent_offset, disk_bytenr);
++			if (ret) {
++				/*
++				 * ret could be -EIO if the above fails to read
++				 * metadata.
++				 */
++				if (ret < 0) {
++					if (cow_start != (u64)-1)
++						cur_offset = cow_start;
++					goto error;
++				}
++
++				WARN_ON_ONCE(nolock);
+ 				goto out_check;
++			}
+ 			disk_bytenr += extent_offset;
+ 			disk_bytenr += cur_offset - found_key.offset;
+ 			num_bytes = min(end + 1, extent_end) - cur_offset;
+@@ -1415,10 +1430,22 @@ static noinline int run_delalloc_nocow(struct inode *inode,
+ 			 * this ensure that csum for a given extent are
+ 			 * either valid or do not exist.
+ 			 */
+-			if (csum_exist_in_range(fs_info, disk_bytenr,
+-						num_bytes)) {
++			ret = csum_exist_in_range(fs_info, disk_bytenr,
++						  num_bytes);
++			if (ret) {
+ 				if (!nolock)
+ 					btrfs_end_write_no_snapshotting(root);
++
++				/*
++				 * ret could be -EIO if the above fails to read
++				 * metadata.
++				 */
++				if (ret < 0) {
++					if (cow_start != (u64)-1)
++						cur_offset = cow_start;
++					goto error;
++				}
++				WARN_ON_ONCE(nolock);
+ 				goto out_check;
+ 			}
+ 			if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) {
+diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
+index 5f11fbdc27f8..1ee46f492267 100644
+--- a/include/linux/bitmap.h
++++ b/include/linux/bitmap.h
+@@ -302,12 +302,20 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
+ 		__bitmap_complement(dst, src, nbits);
+ }
+ 
++#ifdef __LITTLE_ENDIAN
++#define BITMAP_MEM_ALIGNMENT 8
++#else
++#define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long))
++#endif
++#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
++
+ static inline int bitmap_equal(const unsigned long *src1,
+ 			const unsigned long *src2, unsigned int nbits)
+ {
+ 	if (small_const_nbits(nbits))
+ 		return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
+-	if (__builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8))
++	if (__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
++	    IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
+ 		return !memcmp(src1, src2, nbits / 8);
+ 	return __bitmap_equal(src1, src2, nbits);
+ }
+@@ -358,8 +366,10 @@ static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
+ {
+ 	if (__builtin_constant_p(nbits) && nbits == 1)
+ 		__set_bit(start, map);
+-	else if (__builtin_constant_p(start & 7) && IS_ALIGNED(start, 8) &&
+-		 __builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8))
++	else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
++		 IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
++		 __builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
++		 IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
+ 		memset((char *)map + start / 8, 0xff, nbits / 8);
+ 	else
+ 		__bitmap_set(map, start, nbits);
+@@ -370,8 +380,10 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
+ {
+ 	if (__builtin_constant_p(nbits) && nbits == 1)
+ 		__clear_bit(start, map);
+-	else if (__builtin_constant_p(start & 7) && IS_ALIGNED(start, 8) &&
+-		 __builtin_constant_p(nbits & 7) && IS_ALIGNED(nbits, 8))
++	else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
++		 IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
++		 __builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
++		 IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
+ 		memset((char *)map + start / 8, 0, nbits / 8);
+ 	else
+ 		__bitmap_clear(map, start, nbits);
+diff --git a/include/linux/compat.h b/include/linux/compat.h
+index 16c3027074a2..6970e7922c69 100644
+--- a/include/linux/compat.h
++++ b/include/linux/compat.h
+@@ -222,6 +222,8 @@ typedef struct compat_siginfo {
+ #ifdef __ARCH_SI_TRAPNO
+ 			int _trapno;	/* TRAP # which caused the signal */
+ #endif
++#define __COMPAT_ADDR_BND_PKEY_PAD  (__alignof__(compat_uptr_t) < sizeof(short) ? \
++				     sizeof(short) : __alignof__(compat_uptr_t))
+ 			union {
+ 				/*
+ 				 * used when si_code=BUS_MCEERR_AR or
+@@ -230,13 +232,13 @@ typedef struct compat_siginfo {
+ 				short int _addr_lsb;	/* Valid LSB of the reported address. */
+ 				/* used when si_code=SEGV_BNDERR */
+ 				struct {
+-					compat_uptr_t _dummy_bnd;
++					char _dummy_bnd[__COMPAT_ADDR_BND_PKEY_PAD];
+ 					compat_uptr_t _lower;
+ 					compat_uptr_t _upper;
+ 				} _addr_bnd;
+ 				/* used when si_code=SEGV_PKUERR */
+ 				struct {
+-					compat_uptr_t _dummy_pkey;
++					char _dummy_pkey[__COMPAT_ADDR_BND_PKEY_PAD];
+ 					u32 _pkey;
+ 				} _addr_pkey;
+ 			};
+diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
+index 99c902e460c2..65d405770b95 100644
+--- a/include/uapi/asm-generic/siginfo.h
++++ b/include/uapi/asm-generic/siginfo.h
+@@ -94,6 +94,9 @@ typedef struct siginfo {
+ 			unsigned int _flags;	/* see ia64 si_flags */
+ 			unsigned long _isr;	/* isr */
+ #endif
++
++#define __ADDR_BND_PKEY_PAD  (__alignof__(void *) < sizeof(short) ? \
++			      sizeof(short) : __alignof__(void *))
+ 			union {
+ 				/*
+ 				 * used when si_code=BUS_MCEERR_AR or
+@@ -102,13 +105,13 @@ typedef struct siginfo {
+ 				short _addr_lsb; /* LSB of the reported address */
+ 				/* used when si_code=SEGV_BNDERR */
+ 				struct {
+-					void *_dummy_bnd;
++					char _dummy_bnd[__ADDR_BND_PKEY_PAD];
+ 					void __user *_lower;
+ 					void __user *_upper;
+ 				} _addr_bnd;
+ 				/* used when si_code=SEGV_PKUERR */
+ 				struct {
+-					void *_dummy_pkey;
++					char _dummy_pkey[__ADDR_BND_PKEY_PAD];
+ 					__u32 _pkey;
+ 				} _addr_pkey;
+ 			};
+diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
+index 1c8413f93e3d..dce5f9dae121 100644
+--- a/include/uapi/linux/serial_core.h
++++ b/include/uapi/linux/serial_core.h
+@@ -76,6 +76,9 @@
+ #define PORT_SUNZILOG	38
+ #define PORT_SUNSAB	39
+ 
++/* Nuvoton UART */
++#define PORT_NPCM	40
++
+ /* Intel EG20 */
+ #define PORT_PCH_8LINE	44
+ #define PORT_PCH_2LINE	45


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-03-09 19:24 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-03-09 19:24 UTC (permalink / raw
  To: gentoo-commits

commit:     f26ecf0e5e9fa5427d934a4ef32bad161bf73070
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Mar  9 19:24:47 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Mar  9 19:24:47 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f26ecf0e

Update cpu optimization patch for 4.16

 ...able-additional-cpu-optimizations-for-gcc.patch | 96 +++++++++++-----------
 1 file changed, 48 insertions(+), 48 deletions(-)

diff --git a/5010_enable-additional-cpu-optimizations-for-gcc.patch b/5010_enable-additional-cpu-optimizations-for-gcc.patch
index c68d072..1aba143 100644
--- a/5010_enable-additional-cpu-optimizations-for-gcc.patch
+++ b/5010_enable-additional-cpu-optimizations-for-gcc.patch
@@ -43,7 +43,7 @@ changes. Note that upstream is using the deprecated 'match=atom' flags when I
 believe it should use the newer 'march=bonnell' flag for atom processors.[2]
 
 It is not recommended to compile on Atom-CPUs with the 'native' option.[4] The
-recommendation is use to the 'atom' option instead.
+recommendation is to use the 'atom' option instead.
 
 BENEFITS
 Small but real speed increases are measurable using a make endpoint comparing
@@ -66,9 +66,9 @@ REFERENCES
 4. https://github.com/graysky2/kernel_gcc_patch/issues/15
 5. http://www.linuxforge.net/docs/linux/linux-gcc.php
 
---- a/arch/x86/include/asm/module.h	2017-08-02 11:41:47.442200461 -0400
-+++ b/arch/x86/include/asm/module.h	2017-08-02 12:14:21.204358744 -0400
-@@ -15,6 +15,24 @@
+--- a/arch/x86/include/asm/module.h	2018-02-25 21:50:41.000000000 -0500
++++ b/arch/x86/include/asm/module.h	2018-02-26 15:37:52.684596240 -0500
+@@ -25,6 +25,24 @@ struct mod_arch_specific {
  #define MODULE_PROC_FAMILY "586MMX "
  #elif defined CONFIG_MCORE2
  #define MODULE_PROC_FAMILY "CORE2 "
@@ -93,7 +93,7 @@ REFERENCES
  #elif defined CONFIG_MATOM
  #define MODULE_PROC_FAMILY "ATOM "
  #elif defined CONFIG_M686
-@@ -33,6 +51,26 @@
+@@ -43,6 +61,26 @@ struct mod_arch_specific {
  #define MODULE_PROC_FAMILY "K7 "
  #elif defined CONFIG_MK8
  #define MODULE_PROC_FAMILY "K8 "
@@ -120,9 +120,9 @@ REFERENCES
  #elif defined CONFIG_MELAN
  #define MODULE_PROC_FAMILY "ELAN "
  #elif defined CONFIG_MCRUSOE
---- a/arch/x86/Kconfig.cpu	2017-08-02 11:41:47.443200463 -0400
-+++ b/arch/x86/Kconfig.cpu	2017-08-02 12:14:37.108956741 -0400
-@@ -115,6 +115,7 @@ config MPENTIUMM
+--- a/arch/x86/Kconfig.cpu	2018-02-25 21:50:41.000000000 -0500
++++ b/arch/x86/Kconfig.cpu	2018-02-26 15:46:09.886742109 -0500
+@@ -116,6 +116,7 @@ config MPENTIUMM
  config MPENTIUM4
  	bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
  	depends on X86_32
@@ -130,7 +130,7 @@ REFERENCES
  	---help---
  	  Select this for Intel Pentium 4 chips.  This includes the
  	  Pentium 4, Pentium D, P4-based Celeron and Xeon, and
-@@ -147,9 +148,8 @@ config MPENTIUM4
+@@ -148,9 +149,8 @@ config MPENTIUM4
  		-Paxville
  		-Dempsey
  
@@ -141,7 +141,7 @@ REFERENCES
  	depends on X86_32
  	---help---
  	  Select this for an AMD K6-family processor.  Enables use of
-@@ -157,7 +157,7 @@ config MK6
+@@ -158,7 +158,7 @@ config MK6
  	  flags to GCC.
  
  config MK7
@@ -150,7 +150,7 @@ REFERENCES
  	depends on X86_32
  	---help---
  	  Select this for an AMD Athlon K7-family processor.  Enables use of
-@@ -165,12 +165,83 @@ config MK7
+@@ -166,12 +166,83 @@ config MK7
  	  flags to GCC.
  
  config MK8
@@ -235,7 +235,7 @@ REFERENCES
  config MCRUSOE
  	bool "Crusoe"
  	depends on X86_32
-@@ -252,6 +323,7 @@ config MVIAC7
+@@ -253,6 +324,7 @@ config MVIAC7
  
  config MPSC
  	bool "Intel P4 / older Netburst based Xeon"
@@ -243,7 +243,7 @@ REFERENCES
  	depends on X86_64
  	---help---
  	  Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
-@@ -261,8 +333,19 @@ config MPSC
+@@ -262,8 +334,19 @@ config MPSC
  	  using the cpu family field
  	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
  
@@ -264,7 +264,7 @@ REFERENCES
  	---help---
  
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -270,14 +353,79 @@ config MCORE2
+@@ -271,14 +354,79 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
  
@@ -350,7 +350,7 @@ REFERENCES
  
  config GENERIC_CPU
  	bool "Generic-x86-64"
-@@ -286,6 +434,19 @@ config GENERIC_CPU
+@@ -287,6 +435,19 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
  
@@ -359,7 +359,7 @@ REFERENCES
 + ---help---
 +
 +   GCC 4.2 and above support -march=native, which automatically detects
-+   the optimum settings to use based on your processor. -march=native 
++   the optimum settings to use based on your processor. -march=native
 +   also detects and applies additional settings beyond -march specific
 +   to your CPU, (eg. -msse4). Unless you have a specific reason not to
 +   (e.g. distcc cross-compiling), you should probably be using
@@ -370,7 +370,7 @@ REFERENCES
  endchoice
  
  config X86_GENERIC
-@@ -310,7 +471,7 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -311,7 +472,7 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
  	int
  	default "7" if MPENTIUM4 || MPSC
@@ -379,7 +379,7 @@ REFERENCES
  	default "4" if MELAN || M486 || MGEODEGX1
  	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
  
-@@ -341,45 +502,46 @@ config X86_ALIGNMENT_16
+@@ -342,35 +503,36 @@ config X86_ALIGNMENT_16
  
  config X86_INTEL_USERCOPY
  	def_bool y
@@ -433,10 +433,7 @@ REFERENCES
  
  config X86_CMPXCHG64
  	def_bool y
--	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
-+	depends on X86_PAE || X86_64 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
- 
- # this should be set for all -march=.. options where the compiler
+@@ -380,7 +542,7 @@ config X86_CMPXCHG64
  # generates cmov.
  config X86_CMOV
  	def_bool y
@@ -445,9 +442,9 @@ REFERENCES
  
  config X86_MINIMUM_CPU_FAMILY
  	int
---- a/arch/x86/Makefile	2017-08-02 11:41:47.443200463 -0400
-+++ b/arch/x86/Makefile	2017-08-02 12:14:46.373727353 -0400
-@@ -121,13 +121,40 @@ else
+--- a/arch/x86/Makefile	2018-02-25 21:50:41.000000000 -0500
++++ b/arch/x86/Makefile	2018-02-26 15:37:52.685596255 -0500
+@@ -124,13 +124,40 @@ else
  	KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
  
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
@@ -491,40 +488,43 @@ REFERENCES
          cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
          KBUILD_CFLAGS += $(cflags-y)
  
---- a/arch/x86/Makefile_32.cpu	2017-08-02 11:41:47.444200464 -0400
-+++ b/arch/x86/Makefile_32.cpu	2017-08-02 12:23:41.636760695 -0400
-@@ -22,7 +22,18 @@ cflags-$(CONFIG_MK6)		+= -march=k6
+--- a/arch/x86/Makefile_32.cpu	2018-02-25 21:50:41.000000000 -0500
++++ b/arch/x86/Makefile_32.cpu	2018-02-26 15:37:52.686596269 -0500
+@@ -23,7 +23,18 @@ cflags-$(CONFIG_MK6)		+= -march=k6
  # Please note, that patches that add -march=athlon-xp and friends are pointless.
  # They make zero difference whatsosever to performance at this time.
  cflags-$(CONFIG_MK7)		+= -march=athlon
-+cflags-$(CONFIG_MNATIVE) 	+= $(call cc-option,-march=native)
++cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
  cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,-march=athlon)
-+cflags-$(CONFIG_MK8SSE3) 	+= $(call cc-option,-march=k8-sse3,-march=athlon)
-+cflags-$(CONFIG_MK10) 		+= $(call cc-option,-march=amdfam10,-march=athlon)
-+cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon)
-+cflags-$(CONFIG_MBOBCAT) 	+= $(call cc-option,-march=btver1,-march=athlon)
-+cflags-$(CONFIG_MJAGUAR) 	+= $(call cc-option,-march=btver2,-march=athlon)
-+cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon)
-+cflags-$(CONFIG_MPILEDRIVER) 	+= $(call cc-option,-march=bdver2,-march=athlon)
-+cflags-$(CONFIG_MSTEAMROLLER) 	+= $(call cc-option,-march=bdver3,-march=athlon)
-+cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4,-march=athlon)
-+cflags-$(CONFIG_MZEN) 		+= $(call cc-option,-march=znver1,-march=athlon)
++cflags-$(CONFIG_MK8SSE3)		+= $(call cc-option,-march=k8-sse3,-march=athlon)
++cflags-$(CONFIG_MK10)	+= $(call cc-option,-march=amdfam10,-march=athlon)
++cflags-$(CONFIG_MBARCELONA)	+= $(call cc-option,-march=barcelona,-march=athlon)
++cflags-$(CONFIG_MBOBCAT)	+= $(call cc-option,-march=btver1,-march=athlon)
++cflags-$(CONFIG_MJAGUAR)	+= $(call cc-option,-march=btver2,-march=athlon)
++cflags-$(CONFIG_MBULLDOZER)	+= $(call cc-option,-march=bdver1,-march=athlon)
++cflags-$(CONFIG_MPILEDRIVER)	+= $(call cc-option,-march=bdver2,-march=athlon)
++cflags-$(CONFIG_MSTEAMROLLER)	+= $(call cc-option,-march=bdver3,-march=athlon)
++cflags-$(CONFIG_MEXCAVATOR)	+= $(call cc-option,-march=bdver4,-march=athlon)
++cflags-$(CONFIG_MZEN)	+= $(call cc-option,-march=znver1,-march=athlon)
  cflags-$(CONFIG_MCRUSOE)	+= -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
  cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
  cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
-@@ -31,9 +42,12 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
+@@ -32,8 +43,16 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
  cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
  cflags-$(CONFIG_MVIAC7)		+= -march=i686
  cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
 -cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
 -	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
--
-+cflags-$(CONFIG_MNEHALEM) 	+= -march=i686 $(call tune,nehalem)
-+cflags-$(CONFIG_MWESTMERE) 	+= -march=i686 $(call tune,westmere)
-+cflags-$(CONFIG_MSILVERMONT) += -march=i686 $(call tune,silvermont)
-+cflags-$(CONFIG_MSANDYBRIDGE) += -march=i686 $(call tune,sandybridge)
-+cflags-$(CONFIG_MIVYBRIDGE) += -march=i686 $(call tune,ivybridge)
-+cflags-$(CONFIG_MHASWELL) 	+= -march=i686 $(call tune,haswell)
++cflags-$(CONFIG_MNEHALEM)	+= -march=i686 $(call tune,nehalem)
++cflags-$(CONFIG_MWESTMERE)	+= -march=i686 $(call tune,westmere)
++cflags-$(CONFIG_MSILVERMONT)	+= -march=i686 $(call tune,silvermont)
++cflags-$(CONFIG_MSANDYBRIDGE)	+= -march=i686 $(call tune,sandybridge)
++cflags-$(CONFIG_MIVYBRIDGE)	+= -march=i686 $(call tune,ivybridge)
++cflags-$(CONFIG_MHASWELL)	+= -march=i686 $(call tune,haswell)
++cflags-$(CONFIG_MBROADWELL)	+= -march=i686 $(call tune,broadwell)
++cflags-$(CONFIG_MSKYLAKE)	+= -march=i686 $(call tune,skylake)
++cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
++	$(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
+ 
  # AMD Elan support
  cflags-$(CONFIG_MELAN)		+= -march=i486
- 


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [gentoo-commits] proj/linux-patches:4.16 commit in: /
@ 2018-02-12 20:46 Mike Pagano
  0 siblings, 0 replies; 20+ messages in thread
From: Mike Pagano @ 2018-02-12 20:46 UTC (permalink / raw
  To: gentoo-commits

commit:     d454fc1494839a049aac49e61b5de4dd3c45891c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Feb 12 20:44:07 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Feb 12 20:44:07 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d454fc14

Patch to support for namespace user.pax.* on tmpfs.
Patch to enable link security restrictions by default.
Patch to enable poweroff on Mac Pro 11. See bug #601964.
Patch to add UAS disable quirk. See bug #640082.
Patch that enables swapping of the FN and left Control keys and some
additional on some apple keyboards. See bug #622902.
Patch to ennsure that /dev/root doesn't appear in /proc/mounts when
bootint without an initramfs. Bootsplash patch ported by Conrad Kostecki.
(Bug #637434).
Patch to enable control of the unaligned access control policy from sysctl
Patch that adds Gentoo Linux support config settings and defaults.
Patch that enables gcc >= v4.9 optimizations for additional CPUs.

 0000_README                                        |   36 +
 1500_XATTR_USER_PREFIX.patch                       |   69 +
 ...ble-link-security-restrictions-by-default.patch |   22 +
 2300_enable-poweroff-on-Mac-Pro-11.patch           |   76 +
 ...age-Disable-UAS-on-JMicron-SATA-enclosure.patch |   40 +
 2600_enable-key-swapping-for-apple-mac.patch       |  114 ++
 2900_dev-root-proc-mount-fix.patch                 |   38 +
 4200_fbcondecor.patch                              | 2095 ++++++++++++++++++++
 4400_alpha-sysctl-uac.patch                        |  142 ++
 ...able-additional-cpu-optimizations-for-gcc.patch |  530 +++++
 10 files changed, 3162 insertions(+)

diff --git a/0000_README b/0000_README
index 9018993..01553d4 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,42 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1500_XATTR_USER_PREFIX.patch
+From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
+Desc:   Support for namespace user.pax.* on tmpfs.
+
+Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
+From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
+Desc:   Enable link security restrictions by default.
+
+Patch:  2300_enable-poweroff-on-Mac-Pro-11.patch
+From:   http://kernel.ubuntu.com/git/ubuntu/ubuntu-xenial.git/patch/drivers/pci/quirks.c?id=5080ff61a438f3dd80b88b423e1a20791d8a774c
+Desc:   Workaround to enable poweroff on Mac Pro 11. See bug #601964.
+
+Patch:  2500_usb-storage-Disable-UAS-on-JMicron-SATA-enclosure.patch
+From:   https://bugzilla.redhat.com/show_bug.cgi?id=1260207#c5
+Desc:   Add UAS disable quirk. See bug #640082.
+
+Patch:  2600_enable-key-swapping-for-apple-mac.patch
+From:   https://github.com/free5lot/hid-apple-patched
+Desc:   This hid-apple patch enables swapping of the FN and left Control keys and some additional on some apple keyboards. See bug #622902
+
+Patch:  2900_dev-root-proc-mount-fix.patch
+From:   https://bugs.gentoo.org/show_bug.cgi?id=438380
+Desc:   Ensure that /dev/root doesn't appear in /proc/mounts when bootint without an initramfs.
+
+Patch:  4200_fbcondecor.patch
+From:   http://www.mepiscommunity.org/fbcondecor
+Desc:   Bootsplash ported by Conrad Kostecki. (Bug #637434)
+
+Patch:  4400_alpha-sysctl-uac.patch
+From:   Tobias Klausmann (klausman@gentoo.org) and http://bugs.gentoo.org/show_bug.cgi?id=217323 
+Desc:   Enable control of the unaligned access control policy from sysctl
+
 Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.
+
+Patch:  5010_enable-additional-cpu-optimizations-for-gcc.patch
+From:   https://github.com/graysky2/kernel_gcc_patch/
+Desc:   Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.

diff --git a/1500_XATTR_USER_PREFIX.patch b/1500_XATTR_USER_PREFIX.patch
new file mode 100644
index 0000000..bacd032
--- /dev/null
+++ b/1500_XATTR_USER_PREFIX.patch
@@ -0,0 +1,69 @@
+From: Anthony G. Basile <blueness@gentoo.org>
+
+This patch adds support for a restricted user-controlled namespace on
+tmpfs filesystem used to house PaX flags.  The namespace must be of the
+form user.pax.* and its value cannot exceed a size of 8 bytes.
+
+This is needed even on all Gentoo systems so that XATTR_PAX flags
+are preserved for users who might build packages using portage on
+a tmpfs system with a non-hardened kernel and then switch to a
+hardened kernel with XATTR_PAX enabled.
+
+The namespace is added to any user with Extended Attribute support
+enabled for tmpfs.  Users who do not enable xattrs will not have
+the XATTR_PAX flags preserved.
+
+diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
+index 1590c49..5eab462 100644
+--- a/include/uapi/linux/xattr.h
++++ b/include/uapi/linux/xattr.h
+@@ -73,5 +73,9 @@
+ #define XATTR_POSIX_ACL_DEFAULT  "posix_acl_default"
+ #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
+ 
++/* User namespace */
++#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
++#define XATTR_PAX_FLAGS_SUFFIX "flags"
++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
+ 
+ #endif /* _UAPI_LINUX_XATTR_H */
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 440e2a7..c377172 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2667,6 +2667,14 @@ static int shmem_xattr_handler_set(const struct xattr_handler *handler,
+ 	struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
+ 
+ 	name = xattr_full_name(handler, name);
++
++	if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
++		if (strcmp(name, XATTR_NAME_PAX_FLAGS))
++			return -EOPNOTSUPP;
++		if (size > 8)
++			return -EINVAL;
++	}
++
+ 	return simple_xattr_set(&info->xattrs, name, value, size, flags);
+ }
+ 
+@@ -2682,6 +2690,12 @@ static const struct xattr_handler shmem_trusted_xattr_handler = {
+ 	.set = shmem_xattr_handler_set,
+ };
+ 
++static const struct xattr_handler shmem_user_xattr_handler = {
++	.prefix = XATTR_USER_PREFIX,
++	.get = shmem_xattr_handler_get,
++	.set = shmem_xattr_handler_set,
++};
++
+ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ #ifdef CONFIG_TMPFS_POSIX_ACL
+ 	&posix_acl_access_xattr_handler,
+@@ -2689,6 +2703,7 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
+ #endif
+ 	&shmem_security_xattr_handler,
+ 	&shmem_trusted_xattr_handler,
++	&shmem_user_xattr_handler,
+ 	NULL
+ };
+ 

diff --git a/1510_fs-enable-link-security-restrictions-by-default.patch b/1510_fs-enable-link-security-restrictions-by-default.patch
new file mode 100644
index 0000000..639fb3c
--- /dev/null
+++ b/1510_fs-enable-link-security-restrictions-by-default.patch
@@ -0,0 +1,22 @@
+From: Ben Hutchings <ben@decadent.org.uk>
+Subject: fs: Enable link security restrictions by default
+Date: Fri, 02 Nov 2012 05:32:06 +0000
+Bug-Debian: https://bugs.debian.org/609455
+Forwarded: not-needed
+
+This reverts commit 561ec64ae67ef25cac8d72bb9c4bfc955edfd415
+('VFS: don't do protected {sym,hard}links by default').
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -651,8 +651,8 @@ static inline void put_link(struct namei
+ 	path_put(link);
+ }
+ 
+-int sysctl_protected_symlinks __read_mostly = 0;
+-int sysctl_protected_hardlinks __read_mostly = 0;
++int sysctl_protected_symlinks __read_mostly = 1;
++int sysctl_protected_hardlinks __read_mostly = 1;
+ 
+ /**
+  * may_follow_link - Check symlink following for unsafe situations

diff --git a/2300_enable-poweroff-on-Mac-Pro-11.patch b/2300_enable-poweroff-on-Mac-Pro-11.patch
new file mode 100644
index 0000000..063f2a1
--- /dev/null
+++ b/2300_enable-poweroff-on-Mac-Pro-11.patch
@@ -0,0 +1,76 @@
+From 5080ff61a438f3dd80b88b423e1a20791d8a774c Mon Sep 17 00:00:00 2001
+From: Chen Yu <yu.c.chen@intel.com>
+Date: Fri, 19 Aug 2016 10:25:57 -0700
+Subject: UBUNTU: SAUCE: PCI: Workaround to enable poweroff on Mac Pro 11
+
+BugLink: http://bugs.launchpad.net/bugs/1587714
+
+People reported that they can not do a poweroff nor a
+suspend to ram on their Mac Pro 11. After some investigations
+it was found that, once the PCI bridge 0000:00:1c.0 reassigns its
+mm windows to ([mem 0x7fa00000-0x7fbfffff] and
+[mem 0x7fc00000-0x7fdfffff 64bit pref]), the region of ACPI
+io resource 0x1804 becomes unaccessible immediately, where the
+ACPI Sleep register is located, as a result neither poweroff(S5)
+nor suspend to ram(S3) works.
+
+As suggested by Bjorn, further testing shows that, there is an
+unreported device may be (using) conflict with above aperture,
+which brings unpredictable result such as the failure of accessing
+the io port, which blocks the poweroff(S5). Besides if we reassign
+the memory aperture to the other place, the poweroff works again.
+
+As we do not find any resource declared in _CRS which contain above
+memory aperture, and Mac OS does not use this pci bridge neither, we
+choose a simple workaround to clear the hotplug flag(suggested by
+Yinghai Lu), thus do not allocate any resource for this pci bridge,
+and thereby no conflict anymore.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=103211
+Cc: Bjorn Helgaas <bhelgaas@google.com>
+Cc: Rafael J. Wysocki <rafael@kernel.org>
+Cc: Lukas Wunner <lukas@wunner.de>
+Signed-off-by: Chen Yu <yu.c.chen@intel.com>
+Reference: https://patchwork.kernel.org/patch/9289777/
+Signed-off-by: Kamal Mostafa <kamal@canonical.com>
+Acked-by: Brad Figg <brad.figg@canonical.com>
+Acked-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Tim Gardner <tim.gardner@canonical.com>
+---
+ drivers/pci/quirks.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 48cfaa0..23968b6 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -2750,6 +2750,26 @@ static void quirk_hotplug_bridge(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
+ 
+ /*
++ * Apple: Avoid programming the memory/io aperture of 00:1c.0
++ *
++ * BIOS does not declare any resource for 00:1c.0, but with
++ * hotplug flag set, thus the OS allocates:
++ * [mem 0x7fa00000 - 0x7fbfffff]
++ * [mem 0x7fc00000-0x7fdfffff 64bit pref]
++ * which is conflict with an unreported device, which
++ * causes unpredictable result such as accessing io port.
++ * So clear the hotplug flag to work around it.
++ */
++static void quirk_apple_mbp_poweroff(struct pci_dev *dev)
++{
++   if (dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") ||
++       dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5"))
++       dev->is_hotplug_bridge = 0;
++}
++
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
++
++/*
+  * This is a quirk for the Ricoh MMC controller found as a part of
+  * some mulifunction chips.
+ 
+-- 
+cgit v0.11.2
+

diff --git a/2500_usb-storage-Disable-UAS-on-JMicron-SATA-enclosure.patch b/2500_usb-storage-Disable-UAS-on-JMicron-SATA-enclosure.patch
new file mode 100644
index 0000000..0dd93ef
--- /dev/null
+++ b/2500_usb-storage-Disable-UAS-on-JMicron-SATA-enclosure.patch
@@ -0,0 +1,40 @@
+From d02a55182307c01136b599fd048b4679f259a84e Mon Sep 17 00:00:00 2001
+From: Laura Abbott <labbott@fedoraproject.org>
+Date: Tue, 8 Sep 2015 09:53:38 -0700
+Subject: [PATCH] usb-storage: Disable UAS on JMicron SATA enclosure
+
+Steve Ellis reported incorrect block sizes and alignement
+offsets with a SATA enclosure. Adding a quirk to disable
+UAS fixes the problems.
+
+Reported-by: Steven Ellis <sellis@redhat.com>
+Signed-off-by: Laura Abbott <labbott@fedoraproject.org>
+---
+ drivers/usb/storage/unusual_uas.h | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index c85ea53..216d93d 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -141,12 +141,15 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_ATA_1X),
+ 
+-/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
++/*
++ * Initially Reported-by: Takeo Nakayama <javhera@gmx.com>
++ * UAS Ignore Reported by Steven Ellis <sellis@redhat.com>
++ */
+ UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
+ 		"JMicron",
+ 		"JMS566",
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+-		US_FL_NO_REPORT_OPCODES),
++		US_FL_NO_REPORT_OPCODES | US_FL_IGNORE_UAS),
+ 
+ /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+ UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
+-- 
+2.4.3
+

diff --git a/2600_enable-key-swapping-for-apple-mac.patch b/2600_enable-key-swapping-for-apple-mac.patch
new file mode 100644
index 0000000..ab228d3
--- /dev/null
+++ b/2600_enable-key-swapping-for-apple-mac.patch
@@ -0,0 +1,114 @@
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -52,6 +52,22 @@
+ 		"(For people who want to keep Windows PC keyboard muscle memory. "
+ 		"[0] = as-is, Mac layout. 1 = swapped, Windows layout.)");
+ 
++static unsigned int swap_fn_leftctrl;
++module_param(swap_fn_leftctrl, uint, 0644);
++MODULE_PARM_DESC(swap_fn_leftctrl, "Swap the Fn and left Control keys. "
++		"(For people who want to keep PC keyboard muscle memory. "
++		"[0] = as-is, Mac layout, 1 = swapped, PC layout)");
++
++static unsigned int rightalt_as_rightctrl;
++module_param(rightalt_as_rightctrl, uint, 0644);
++MODULE_PARM_DESC(rightalt_as_rightctrl, "Use the right Alt key as a right Ctrl key. "
++		"[0] = as-is, Mac layout. 1 = Right Alt is right Ctrl");
++
++static unsigned int ejectcd_as_delete;
++module_param(ejectcd_as_delete, uint, 0644);
++MODULE_PARM_DESC(ejectcd_as_delete, "Use Eject-CD key as Delete key. "
++		"([0] = disabled, 1 = enabled)");
++
+ struct apple_sc {
+ 	unsigned long quirks;
+ 	unsigned int fn_on;
+@@ -164,6 +180,21 @@
+ 	{ }
+ };
+ 
++static const struct apple_key_translation swapped_fn_leftctrl_keys[] = {
++	{ KEY_FN, KEY_LEFTCTRL },
++	{ }
++};
++
++static const struct apple_key_translation rightalt_as_rightctrl_keys[] = {
++	{ KEY_RIGHTALT, KEY_RIGHTCTRL },
++	{ }
++};
++
++static const struct apple_key_translation ejectcd_as_delete_keys[] = {
++	{ KEY_EJECTCD,	KEY_DELETE },
++	{ }
++};
++
+ static const struct apple_key_translation *apple_find_translation(
+ 		const struct apple_key_translation *table, u16 from)
+ {
+@@ -183,9 +214,11 @@
+ 	struct apple_sc *asc = hid_get_drvdata(hid);
+ 	const struct apple_key_translation *trans, *table;
+ 
+-	if (usage->code == KEY_FN) {
++	u16 fn_keycode = (swap_fn_leftctrl) ? (KEY_LEFTCTRL) : (KEY_FN);
++
++	if (usage->code == fn_keycode) {
+ 		asc->fn_on = !!value;
+-		input_event(input, usage->type, usage->code, value);
++		input_event(input, usage->type, KEY_FN, value);
+ 		return 1;
+ 	}
+ 
+@@ -264,6 +297,30 @@
+ 		}
+ 	}
+ 
++	if (swap_fn_leftctrl) {
++		trans = apple_find_translation(swapped_fn_leftctrl_keys, usage->code);
++		if (trans) {
++			input_event(input, usage->type, trans->to, value);
++			return 1;
++		}
++	}
++
++	if (ejectcd_as_delete) {
++		trans = apple_find_translation(ejectcd_as_delete_keys, usage->code);
++		if (trans) {
++			input_event(input, usage->type, trans->to, value);
++			return 1;
++		}
++	}
++
++	if (rightalt_as_rightctrl) {
++		trans = apple_find_translation(rightalt_as_rightctrl_keys, usage->code);
++		if (trans) {
++			input_event(input, usage->type, trans->to, value);
++			return 1;
++		}
++	}
++
+ 	return 0;
+ }
+ 
+@@ -327,6 +384,21 @@
+ 
+ 	for (trans = apple_iso_keyboard; trans->from; trans++)
+ 		set_bit(trans->to, input->keybit);
++
++	if (swap_fn_leftctrl) {
++		for (trans = swapped_fn_leftctrl_keys; trans->from; trans++)
++			set_bit(trans->to, input->keybit);
++	}
++
++	if (ejectcd_as_delete) {
++		for (trans = ejectcd_as_delete_keys; trans->from; trans++)
++			set_bit(trans->to, input->keybit);
++	}
++
++        if (rightalt_as_rightctrl) {
++		for (trans = rightalt_as_rightctrl_keys; trans->from; trans++)
++			set_bit(trans->to, input->keybit);
++	}
+ }
+ 
+ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,

diff --git a/2900_dev-root-proc-mount-fix.patch b/2900_dev-root-proc-mount-fix.patch
new file mode 100644
index 0000000..60af1eb
--- /dev/null
+++ b/2900_dev-root-proc-mount-fix.patch
@@ -0,0 +1,38 @@
+--- a/init/do_mounts.c	2015-08-19 10:27:16.753852576 -0400
++++ b/init/do_mounts.c	2015-08-19 10:34:25.473850353 -0400
+@@ -490,7 +490,11 @@ void __init change_floppy(char *fmt, ...
+ 	va_start(args, fmt);
+ 	vsprintf(buf, fmt, args);
+ 	va_end(args);
+-	fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++	if (saved_root_name[0])
++		fd = sys_open(saved_root_name, O_RDWR | O_NDELAY, 0);
++	else
++		fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++
+ 	if (fd >= 0) {
+ 		sys_ioctl(fd, FDEJECT, 0);
+ 		sys_close(fd);
+@@ -534,11 +538,17 @@ void __init mount_root(void)
+ #endif
+ #ifdef CONFIG_BLOCK
+ 	{
+-		int err = create_dev("/dev/root", ROOT_DEV);
+-
+-		if (err < 0)
+-			pr_emerg("Failed to create /dev/root: %d\n", err);
+-		mount_block_root("/dev/root", root_mountflags);
++		if (saved_root_name[0] == '/') {
++	       	int err = create_dev(saved_root_name, ROOT_DEV);
++			if (err < 0)
++				pr_emerg("Failed to create %s: %d\n", saved_root_name, err);
++			mount_block_root(saved_root_name, root_mountflags);
++		} else {
++			int err = create_dev("/dev/root", ROOT_DEV);
++			if (err < 0)
++				pr_emerg("Failed to create /dev/root: %d\n", err);
++			mount_block_root("/dev/root", root_mountflags);
++		}
+ 	}
+ #endif
+ }

diff --git a/4200_fbcondecor.patch b/4200_fbcondecor.patch
new file mode 100644
index 0000000..7151d0f
--- /dev/null
+++ b/4200_fbcondecor.patch
@@ -0,0 +1,2095 @@
+diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
+index fe85e7c5907a..22309308ba56 100644
+--- a/Documentation/fb/00-INDEX
++++ b/Documentation/fb/00-INDEX
+@@ -23,6 +23,8 @@ ep93xx-fb.txt
+ 	- info on the driver for EP93xx LCD controller.
+ fbcon.txt
+ 	- intro to and usage guide for the framebuffer console (fbcon).
++fbcondecor.txt
++	- info on the Framebuffer Console Decoration
+ framebuffer.txt
+ 	- introduction to frame buffer devices.
+ gxfb.txt
+diff --git a/Documentation/fb/fbcondecor.txt b/Documentation/fb/fbcondecor.txt
+new file mode 100644
+index 000000000000..637209e11ccd
+--- /dev/null
++++ b/Documentation/fb/fbcondecor.txt
+@@ -0,0 +1,207 @@
++What is it?
++-----------
++
++The framebuffer decorations are a kernel feature which allows displaying a
++background picture on selected consoles.
++
++What do I need to get it to work?
++---------------------------------
++
++To get fbcondecor up-and-running you will have to:
++ 1) get a copy of splashutils [1] or a similar program
++ 2) get some fbcondecor themes
++ 3) build the kernel helper program
++ 4) build your kernel with the FB_CON_DECOR option enabled.
++
++To get fbcondecor operational right after fbcon initialization is finished, you
++will have to include a theme and the kernel helper into your initramfs image.
++Please refer to splashutils documentation for instructions on how to do that.
++
++[1] The splashutils package can be downloaded from:
++    http://github.com/alanhaggai/fbsplash
++
++The userspace helper
++--------------------
++
++The userspace fbcondecor helper (by default: /sbin/fbcondecor_helper) is called by the
++kernel whenever an important event occurs and the kernel needs some kind of
++job to be carried out. Important events include console switches and video
++mode switches (the kernel requests background images and configuration
++parameters for the current console). The fbcondecor helper must be accessible at
++all times. If it's not, fbcondecor will be switched off automatically.
++
++It's possible to set path to the fbcondecor helper by writing it to
++/proc/sys/kernel/fbcondecor.
++
++*****************************************************************************
++
++The information below is mostly technical stuff. There's probably no need to
++read it unless you plan to develop a userspace helper.
++
++The fbcondecor protocol
++-----------------------
++
++The fbcondecor protocol defines a communication interface between the kernel and
++the userspace fbcondecor helper.
++
++The kernel side is responsible for:
++
++ * rendering console text, using an image as a background (instead of a
++   standard solid color fbcon uses),
++ * accepting commands from the user via ioctls on the fbcondecor device,
++ * calling the userspace helper to set things up as soon as the fb subsystem
++   is initialized.
++
++The userspace helper is responsible for everything else, including parsing
++configuration files, decompressing the image files whenever the kernel needs
++it, and communicating with the kernel if necessary.
++
++The fbcondecor protocol specifies how communication is done in both ways:
++kernel->userspace and userspace->helper.
++
++Kernel -> Userspace
++-------------------
++
++The kernel communicates with the userspace helper by calling it and specifying
++the task to be done in a series of arguments.
++
++The arguments follow the pattern:
++<fbcondecor protocol version> <command> <parameters>
++
++All commands defined in fbcondecor protocol v2 have the following parameters:
++ virtual console
++ framebuffer number
++ theme
++
++Fbcondecor protocol v1 specified an additional 'fbcondecor mode' after the
++framebuffer number. Fbcondecor protocol v1 is deprecated and should not be used.
++
++Fbcondecor protocol v2 specifies the following commands:
++
++getpic
++------
++ The kernel issues this command to request image data. It's up to the
++ userspace  helper to find a background image appropriate for the specified
++ theme and the current resolution. The userspace helper should respond by
++ issuing the FBIOCONDECOR_SETPIC ioctl.
++
++init
++----
++ The kernel issues this command after the fbcondecor device is created and
++ the fbcondecor interface is initialized. Upon receiving 'init', the userspace
++ helper should parse the kernel command line (/proc/cmdline) or otherwise
++ decide whether fbcondecor is to be activated.
++
++ To activate fbcondecor on the first console the helper should issue the
++ FBIOCONDECOR_SETCFG, FBIOCONDECOR_SETPIC and FBIOCONDECOR_SETSTATE commands,
++ in the above-mentioned order.
++
++ When the userspace helper is called in an early phase of the boot process
++ (right after the initialization of fbcon), no filesystems will be mounted.
++ The helper program should mount sysfs and then create the appropriate
++ framebuffer, fbcondecor and tty0 devices (if they don't already exist) to get
++ current display settings and to be able to communicate with the kernel side.
++ It should probably also mount the procfs to be able to parse the kernel
++ command line parameters.
++
++ Note that the console sem is not held when the kernel calls fbcondecor_helper
++ with the 'init' command. The fbcondecor helper should perform all ioctls with
++ origin set to FBCON_DECOR_IO_ORIG_USER.
++
++modechange
++----------
++ The kernel issues this command on a mode change. The helper's response should
++ be similar to the response to the 'init' command. Note that this time the
++ console sem is held and all ioctls must be performed with origin set to
++ FBCON_DECOR_IO_ORIG_KERNEL.
++
++
++Userspace -> Kernel
++-------------------
++
++Userspace programs can communicate with fbcondecor via ioctls on the
++fbcondecor device. These ioctls are to be used by both the userspace helper
++(called only by the kernel) and userspace configuration tools (run by the users).
++
++The fbcondecor helper should set the origin field to FBCON_DECOR_IO_ORIG_KERNEL
++when doing the appropriate ioctls. All userspace configuration tools should
++use FBCON_DECOR_IO_ORIG_USER. Failure to set the appropriate value in the origin
++field when performing ioctls from the kernel helper will most likely result
++in a console deadlock.
++
++FBCON_DECOR_IO_ORIG_KERNEL instructs fbcondecor not to try to acquire the console
++semaphore. Not surprisingly, FBCON_DECOR_IO_ORIG_USER instructs it to acquire
++the console sem.
++
++The framebuffer console decoration provides the following ioctls (all defined in
++linux/fb.h):
++
++FBIOCONDECOR_SETPIC
++description: loads a background picture for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct fb_image*
++notes:
++If called for consoles other than the current foreground one, the picture data
++will be ignored.
++
++If the current virtual console is running in a 8-bpp mode, the cmap substruct
++of fb_image has to be filled appropriately: start should be set to 16 (first
++16 colors are reserved for fbcon), len to a value <= 240 and red, green and
++blue should point to valid cmap data. The transp field is ingored. The fields
++dx, dy, bg_color, fg_color in fb_image are ignored as well.
++
++FBIOCONDECOR_SETCFG
++description: sets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++notes: The structure has to be filled with valid data.
++
++FBIOCONDECOR_GETCFG
++description: gets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++
++FBIOCONDECOR_SETSTATE
++description: sets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++          values: 0 = disabled, 1 = enabled.
++
++FBIOCONDECOR_GETSTATE
++description: gets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++          values: as in FBIOCONDECOR_SETSTATE
++
++Info on used structures:
++
++Definition of struct vc_decor can be found in linux/console_decor.h. It's
++heavily commented. Note that the 'theme' field should point to a string
++no longer than FBCON_DECOR_THEME_LEN. When FBIOCONDECOR_GETCFG call is
++performed, the theme field should point to a char buffer of length
++FBCON_DECOR_THEME_LEN.
++
++Definition of struct fbcon_decor_iowrapper can be found in linux/fb.h.
++The fields in this struct have the following meaning:
++
++vc:
++Virtual console number.
++
++origin:
++Specifies if the ioctl is performed as a response to a kernel request. The
++fbcondecor helper should set this field to FBCON_DECOR_IO_ORIG_KERNEL, userspace
++programs should set it to FBCON_DECOR_IO_ORIG_USER. This field is necessary to
++avoid console semaphore deadlocks.
++
++data:
++Pointer to a data structure appropriate for the performed ioctl. Type of
++the data struct is specified in the ioctls description.
++
++*****************************************************************************
++
++Credit
++------
++
++Original 'bootsplash' project & implementation by:
++  Volker Poplawski <volker@poplawski.de>, Stefan Reinauer <stepan@suse.de>,
++  Steffen Winterfeldt <snwint@suse.de>, Michael Schroeder <mls@suse.de>,
++  Ken Wimer <wimer@suse.de>.
++
++Fbcondecor, fbcondecor protocol design, current implementation & docs by:
++  Michal Januszewski <michalj+fbcondecor@gmail.com>
++
+diff --git a/drivers/Makefile b/drivers/Makefile
+index 1d034b680431..9f41f2ea0c8b 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -23,6 +23,10 @@ obj-y				+= pci/dwc/
+ 
+ obj-$(CONFIG_PARISC)		+= parisc/
+ obj-$(CONFIG_RAPIDIO)		+= rapidio/
++# tty/ comes before char/ so that the VT console is the boot-time
++# default.
++obj-y				+= tty/
++obj-y				+= char/
+ obj-y				+= video/
+ obj-y				+= idle/
+ 
+@@ -53,11 +57,6 @@ obj-$(CONFIG_REGULATOR)		+= regulator/
+ # reset controllers early, since gpu drivers might rely on them to initialize
+ obj-$(CONFIG_RESET_CONTROLLER)	+= reset/
+ 
+-# tty/ comes before char/ so that the VT console is the boot-time
+-# default.
+-obj-y				+= tty/
+-obj-y				+= char/
+-
+ # iommu/ comes before gpu as gpu are using iommu controllers
+ obj-$(CONFIG_IOMMU_SUPPORT)	+= iommu/
+ 
+diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
+index 7f1f1fbcef9e..8439b618dfc0 100644
+--- a/drivers/video/console/Kconfig
++++ b/drivers/video/console/Kconfig
+@@ -151,6 +151,19 @@ config FRAMEBUFFER_CONSOLE_ROTATION
+          such that other users of the framebuffer will remain normally
+          oriented.
+ 
++config FB_CON_DECOR
++	bool "Support for the Framebuffer Console Decorations"
++	depends on FRAMEBUFFER_CONSOLE=y && !FB_TILEBLITTING
++	default n
++	---help---
++	  This option enables support for framebuffer console decorations which
++	  makes it possible to display images in the background of the system
++	  consoles.  Note that userspace utilities are necessary in order to take
++	  advantage of these features. Refer to Documentation/fb/fbcondecor.txt
++	  for more information.
++
++	  If unsure, say N.
++
+ config STI_CONSOLE
+         bool "STI text console"
+         depends on PARISC
+diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile
+index db07b784bd2c..3e369bd120b8 100644
+--- a/drivers/video/console/Makefile
++++ b/drivers/video/console/Makefile
+@@ -9,4 +9,5 @@ obj-$(CONFIG_STI_CONSOLE)         += sticon.o sticore.o
+ obj-$(CONFIG_VGA_CONSOLE)         += vgacon.o
+ obj-$(CONFIG_MDA_CONSOLE)         += mdacon.o
+ 
++obj-$(CONFIG_FB_CON_DECOR)     	  += fbcondecor.o cfbcondecor.o
+ obj-$(CONFIG_FB_STI)              += sticore.o
+diff --git a/drivers/video/console/cfbcondecor.c b/drivers/video/console/cfbcondecor.c
+new file mode 100644
+index 000000000000..b00960803edc
+--- /dev/null
++++ b/drivers/video/console/cfbcondecor.c
+@@ -0,0 +1,473 @@
++/*
++ *  linux/drivers/video/cfbcon_decor.c -- Framebuffer decor render functions
++ *
++ *  Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ *  Code based upon "Bootdecor" (C) 2001-2003
++ *       Volker Poplawski <volker@poplawski.de>,
++ *       Stefan Reinauer <stepan@suse.de>,
++ *       Steffen Winterfeldt <snwint@suse.de>,
++ *       Michael Schroeder <mls@suse.de>,
++ *       Ken Wimer <wimer@suse.de>.
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License.  See the file COPYING in the main directory of this archive for
++ *  more details.
++ */
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/selection.h>
++#include <linux/slab.h>
++#include <linux/vt_kern.h>
++#include <asm/irq.h>
++
++#include "../fbdev/core/fbcon.h"
++#include "fbcondecor.h"
++
++#define parse_pixel(shift, bpp, type)						\
++	do {									\
++		if (d & (0x80 >> (shift)))					\
++			dd2[(shift)] = fgx;					\
++		else								\
++			dd2[(shift)] = transparent ? *(type *)decor_src : bgx;	\
++		decor_src += (bpp);						\
++	} while (0)								\
++
++extern int get_color(struct vc_data *vc, struct fb_info *info,
++		     u16 c, int is_fg);
++
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc)
++{
++	int i, j, k;
++	int minlen = min(min(info->var.red.length, info->var.green.length),
++			     info->var.blue.length);
++	u32 col;
++
++	for (j = i = 0; i < 16; i++) {
++		k = color_table[i];
++
++		col = ((vc->vc_palette[j++]  >> (8-minlen))
++			<< info->var.red.offset);
++		col |= ((vc->vc_palette[j++] >> (8-minlen))
++			<< info->var.green.offset);
++		col |= ((vc->vc_palette[j++] >> (8-minlen))
++			<< info->var.blue.offset);
++			((u32 *)info->pseudo_palette)[k] = col;
++	}
++}
++
++void fbcon_decor_renderc(struct fb_info *info, int ypos, int xpos, int height,
++		      int width, u8 *src, u32 fgx, u32 bgx, u8 transparent)
++{
++	unsigned int x, y;
++	u32 dd;
++	int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++	unsigned int d = ypos * info->fix.line_length + xpos * bytespp;
++	unsigned int ds = (ypos * info->var.xres + xpos) * bytespp;
++	u16 dd2[4];
++
++	u8 *decor_src = (u8 *)(info->bgdecor.data + ds);
++	u8 *dst = (u8 *)(info->screen_base + d);
++
++	if ((ypos + height) > info->var.yres || (xpos + width) > info->var.xres)
++		return;
++
++	for (y = 0; y < height; y++) {
++		switch (info->var.bits_per_pixel) {
++
++		case 32:
++			for (x = 0; x < width; x++) {
++
++				if ((x & 7) == 0)
++					d = *src++;
++				if (d & 0x80)
++					dd = fgx;
++				else
++					dd = transparent ?
++					     *(u32 *)decor_src : bgx;
++
++				d <<= 1;
++				decor_src += 4;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++			break;
++		case 24:
++			for (x = 0; x < width; x++) {
++
++				if ((x & 7) == 0)
++					d = *src++;
++				if (d & 0x80)
++					dd = fgx;
++				else
++					dd = transparent ?
++					     (*(u32 *)decor_src & 0xffffff) : bgx;
++
++				d <<= 1;
++				decor_src += 3;
++#ifdef __LITTLE_ENDIAN
++				fb_writew(dd & 0xffff, dst);
++				dst += 2;
++				fb_writeb((dd >> 16), dst);
++#else
++				fb_writew(dd >> 8, dst);
++				dst += 2;
++				fb_writeb(dd & 0xff, dst);
++#endif
++				dst++;
++			}
++			break;
++		case 16:
++			for (x = 0; x < width; x += 2) {
++				if ((x & 7) == 0)
++					d = *src++;
++
++				parse_pixel(0, 2, u16);
++				parse_pixel(1, 2, u16);
++#ifdef __LITTLE_ENDIAN
++				dd = dd2[0] | (dd2[1] << 16);
++#else
++				dd = dd2[1] | (dd2[0] << 16);
++#endif
++				d <<= 2;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++			break;
++
++		case 8:
++			for (x = 0; x < width; x += 4) {
++				if ((x & 7) == 0)
++					d = *src++;
++
++				parse_pixel(0, 1, u8);
++				parse_pixel(1, 1, u8);
++				parse_pixel(2, 1, u8);
++				parse_pixel(3, 1, u8);
++
++#ifdef __LITTLE_ENDIAN
++				dd = dd2[0] | (dd2[1] << 8) | (dd2[2] << 16) | (dd2[3] << 24);
++#else
++				dd = dd2[3] | (dd2[2] << 8) | (dd2[1] << 16) | (dd2[0] << 24);
++#endif
++				d <<= 4;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++		}
++
++		dst += info->fix.line_length - width * bytespp;
++		decor_src += (info->var.xres - width) * bytespp;
++	}
++}
++
++#define cc2cx(a)						\
++	((info->fix.visual == FB_VISUAL_TRUECOLOR ||		\
++		info->fix.visual == FB_VISUAL_DIRECTCOLOR) ?	\
++			((u32 *)info->pseudo_palette)[a] : a)
++
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info,
++		   const unsigned short *s, int count, int yy, int xx)
++{
++	unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
++	struct fbcon_ops *ops = info->fbcon_par;
++	int fg_color, bg_color, transparent;
++	u8 *src;
++	u32 bgx, fgx;
++	u16 c = scr_readw(s);
++
++	fg_color = get_color(vc, info, c, 1);
++	bg_color = get_color(vc, info, c, 0);
++
++	/* Don't paint the background image if console is blanked */
++	transparent = ops->blank_state ? 0 :
++		(vc->vc_decor.bg_color == bg_color);
++
++	xx = xx * vc->vc_font.width + vc->vc_decor.tx;
++	yy = yy * vc->vc_font.height + vc->vc_decor.ty;
++
++	fgx = cc2cx(fg_color);
++	bgx = cc2cx(bg_color);
++
++	while (count--) {
++		c = scr_readw(s++);
++		src = vc->vc_font.data + (c & charmask) * vc->vc_font.height *
++		      ((vc->vc_font.width + 7) >> 3);
++
++		fbcon_decor_renderc(info, yy, xx, vc->vc_font.height,
++			       vc->vc_font.width, src, fgx, bgx, transparent);
++		xx += vc->vc_font.width;
++	}
++}
++
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor)
++{
++	int i;
++	unsigned int dsize, s_pitch;
++	struct fbcon_ops *ops = info->fbcon_par;
++	struct vc_data *vc;
++	u8 *src;
++
++	/* we really don't need any cursors while the console is blanked */
++	if (info->state != FBINFO_STATE_RUNNING || ops->blank_state)
++		return;
++
++	vc = vc_cons[ops->currcon].d;
++
++	src = kmalloc(64 + sizeof(struct fb_image), GFP_ATOMIC);
++	if (!src)
++		return;
++
++	s_pitch = (cursor->image.width + 7) >> 3;
++	dsize = s_pitch * cursor->image.height;
++	if (cursor->enable) {
++		switch (cursor->rop) {
++		case ROP_XOR:
++			for (i = 0; i < dsize; i++)
++				src[i] = cursor->image.data[i] ^ cursor->mask[i];
++			break;
++		case ROP_COPY:
++		default:
++			for (i = 0; i < dsize; i++)
++				src[i] = cursor->image.data[i] & cursor->mask[i];
++			break;
++		}
++	} else
++		memcpy(src, cursor->image.data, dsize);
++
++	fbcon_decor_renderc(info,
++			cursor->image.dy + vc->vc_decor.ty,
++			cursor->image.dx + vc->vc_decor.tx,
++			cursor->image.height,
++			cursor->image.width,
++			(u8 *)src,
++			cc2cx(cursor->image.fg_color),
++			cc2cx(cursor->image.bg_color),
++			cursor->image.bg_color == vc->vc_decor.bg_color);
++
++	kfree(src);
++}
++
++static void decorset(u8 *dst, int height, int width, int dstbytes,
++				u32 bgx, int bpp)
++{
++	int i;
++
++	if (bpp == 8)
++		bgx |= bgx << 8;
++	if (bpp == 16 || bpp == 8)
++		bgx |= bgx << 16;
++
++	while (height-- > 0) {
++		u8 *p = dst;
++
++		switch (bpp) {
++
++		case 32:
++			for (i = 0; i < width; i++) {
++				fb_writel(bgx, p); p += 4;
++			}
++			break;
++		case 24:
++			for (i = 0; i < width; i++) {
++#ifdef __LITTLE_ENDIAN
++				fb_writew((bgx & 0xffff), (u16 *)p); p += 2;
++				fb_writeb((bgx >> 16), p++);
++#else
++				fb_writew((bgx >> 8), (u16 *)p); p += 2;
++				fb_writeb((bgx & 0xff), p++);
++#endif
++			}
++			break;
++		case 16:
++			for (i = 0; i < width/4; i++) {
++				fb_writel(bgx, p); p += 4;
++				fb_writel(bgx, p); p += 4;
++			}
++			if (width & 2) {
++				fb_writel(bgx, p); p += 4;
++			}
++			if (width & 1)
++				fb_writew(bgx, (u16 *)p);
++			break;
++		case 8:
++			for (i = 0; i < width/4; i++) {
++				fb_writel(bgx, p); p += 4;
++			}
++
++			if (width & 2) {
++				fb_writew(bgx, p); p += 2;
++			}
++			if (width & 1)
++				fb_writeb(bgx, (u8 *)p);
++			break;
++
++		}
++		dst += dstbytes;
++	}
++}
++
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes,
++		   int srclinebytes, int bpp)
++{
++	int i;
++
++	while (height-- > 0) {
++		u32 *p = (u32 *)dst;
++		u32 *q = (u32 *)src;
++
++		switch (bpp) {
++
++		case 32:
++			for (i = 0; i < width; i++)
++				fb_writel(*q++, p++);
++			break;
++		case 24:
++			for (i = 0; i < (width * 3 / 4); i++)
++				fb_writel(*q++, p++);
++			if ((width * 3) % 4) {
++				if (width & 2) {
++					fb_writeb(*(u8 *)q, (u8 *)p);
++				} else if (width & 1) {
++					fb_writew(*(u16 *)q, (u16 *)p);
++					fb_writeb(*(u8 *)((u16 *)q + 1),
++							(u8 *)((u16 *)p + 2));
++				}
++			}
++			break;
++		case 16:
++			for (i = 0; i < width/4; i++) {
++				fb_writel(*q++, p++);
++				fb_writel(*q++, p++);
++			}
++			if (width & 2)
++				fb_writel(*q++, p++);
++			if (width & 1)
++				fb_writew(*(u16 *)q, (u16 *)p);
++			break;
++		case 8:
++			for (i = 0; i < width/4; i++)
++				fb_writel(*q++, p++);
++
++			if (width & 2) {
++				fb_writew(*(u16 *)q, (u16 *)p);
++				q = (u32 *) ((u16 *)q + 1);
++				p = (u32 *) ((u16 *)p + 1);
++			}
++			if (width & 1)
++				fb_writeb(*(u8 *)q, (u8 *)p);
++			break;
++		}
++
++		dst += linebytes;
++		src += srclinebytes;
++	}
++}
++
++static void decorfill(struct fb_info *info, int sy, int sx, int height,
++		       int width)
++{
++	int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++	int d  = sy * info->fix.line_length + sx * bytespp;
++	int ds = (sy * info->var.xres + sx) * bytespp;
++
++	fbcon_decor_copy((u8 *)(info->screen_base + d), (u8 *)(info->bgdecor.data + ds),
++		    height, width, info->fix.line_length, info->var.xres * bytespp,
++		    info->var.bits_per_pixel);
++}
++
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx,
++		    int height, int width)
++{
++	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
++	struct fbcon_ops *ops = info->fbcon_par;
++	u8 *dst;
++	int transparent, bg_color = attr_bgcol_ec(bgshift, vc, info);
++
++	transparent = (vc->vc_decor.bg_color == bg_color);
++	sy = sy * vc->vc_font.height + vc->vc_decor.ty;
++	sx = sx * vc->vc_font.width + vc->vc_decor.tx;
++	height *= vc->vc_font.height;
++	width *= vc->vc_font.width;
++
++	/* Don't paint the background image if console is blanked */
++	if (transparent && !ops->blank_state) {
++		decorfill(info, sy, sx, height, width);
++	} else {
++		dst = (u8 *)(info->screen_base + sy * info->fix.line_length +
++			     sx * ((info->var.bits_per_pixel + 7) >> 3));
++		decorset(dst, height, width, info->fix.line_length, cc2cx(bg_color),
++			  info->var.bits_per_pixel);
++	}
++}
++
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info,
++			    int bottom_only)
++{
++	unsigned int tw = vc->vc_cols*vc->vc_font.width;
++	unsigned int th = vc->vc_rows*vc->vc_font.height;
++
++	if (!bottom_only) {
++		/* top margin */
++		decorfill(info, 0, 0, vc->vc_decor.ty, info->var.xres);
++		/* left margin */
++		decorfill(info, vc->vc_decor.ty, 0, th, vc->vc_decor.tx);
++		/* right margin */
++		decorfill(info, vc->vc_decor.ty, vc->vc_decor.tx + tw, th,
++			   info->var.xres - vc->vc_decor.tx - tw);
++	}
++	decorfill(info, vc->vc_decor.ty + th, 0,
++		   info->var.yres - vc->vc_decor.ty - th, info->var.xres);
++}
++
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y,
++			   int sx, int dx, int width)
++{
++	u16 *d = (u16 *) (vc->vc_origin + vc->vc_size_row * y + dx * 2);
++	u16 *s = d + (dx - sx);
++	u16 *start = d;
++	u16 *ls = d;
++	u16 *le = d + width;
++	u16 c;
++	int x = dx;
++	u16 attr = 1;
++
++	do {
++		c = scr_readw(d);
++		if (attr != (c & 0xff00)) {
++			attr = c & 0xff00;
++			if (d > start) {
++				fbcon_decor_putcs(vc, info, start, d - start, y, x);
++				x += d - start;
++				start = d;
++			}
++		}
++		if (s >= ls && s < le && c == scr_readw(s)) {
++			if (d > start) {
++				fbcon_decor_putcs(vc, info, start, d - start, y, x);
++				x += d - start + 1;
++				start = d + 1;
++			} else {
++				x++;
++				start++;
++			}
++		}
++		s++;
++		d++;
++	} while (d < le);
++	if (d > start)
++		fbcon_decor_putcs(vc, info, start, d - start, y, x);
++}
++
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank)
++{
++	if (blank) {
++		decorset((u8 *)info->screen_base, info->var.yres, info->var.xres,
++			  info->fix.line_length, 0, info->var.bits_per_pixel);
++	} else {
++		update_screen(vc);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++}
++
+diff --git a/drivers/video/console/fbcondecor.c b/drivers/video/console/fbcondecor.c
+new file mode 100644
+index 000000000000..78288a497a60
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.c
+@@ -0,0 +1,549 @@
++/*
++ *  linux/drivers/video/console/fbcondecor.c -- Framebuffer console decorations
++ *
++ *  Copyright (C) 2004-2009 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ *  Code based upon "Bootsplash" (C) 2001-2003
++ *       Volker Poplawski <volker@poplawski.de>,
++ *       Stefan Reinauer <stepan@suse.de>,
++ *       Steffen Winterfeldt <snwint@suse.de>,
++ *       Michael Schroeder <mls@suse.de>,
++ *       Ken Wimer <wimer@suse.de>.
++ *
++ *  Compat ioctl support by Thorsten Klein <TK@Thorsten-Klein.de>.
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License.  See the file COPYING in the main directory of this archive for
++ *  more details.
++ *
++ */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/vt_kern.h>
++#include <linux/vmalloc.h>
++#include <linux/unistd.h>
++#include <linux/syscalls.h>
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/workqueue.h>
++#include <linux/kmod.h>
++#include <linux/miscdevice.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/compat.h>
++#include <linux/console.h>
++#include <linux/binfmts.h>
++#include <linux/uaccess.h>
++#include <asm/irq.h>
++
++#include "../fbdev/core/fbcon.h"
++#include "fbcondecor.h"
++
++extern signed char con2fb_map[];
++static int fbcon_decor_enable(struct vc_data *vc);
++
++static int initialized;
++
++char fbcon_decor_path[KMOD_PATH_LEN] = "/sbin/fbcondecor_helper";
++EXPORT_SYMBOL(fbcon_decor_path);
++
++int fbcon_decor_call_helper(char *cmd, unsigned short vc)
++{
++	char *envp[] = {
++		"HOME=/",
++		"PATH=/sbin:/bin",
++		NULL
++	};
++
++	char tfb[5];
++	char tcons[5];
++	unsigned char fb = (int) con2fb_map[vc];
++
++	char *argv[] = {
++		fbcon_decor_path,
++		"2",
++		cmd,
++		tcons,
++		tfb,
++		vc_cons[vc].d->vc_decor.theme,
++		NULL
++	};
++
++	snprintf(tfb, 5, "%d", fb);
++	snprintf(tcons, 5, "%d", vc);
++
++	return call_usermodehelper(fbcon_decor_path, argv, envp, UMH_WAIT_EXEC);
++}
++
++/* Disables fbcondecor on a virtual console; called with console sem held. */
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw)
++{
++	struct fb_info *info;
++
++	if (!vc->vc_decor.state)
++		return -EINVAL;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL)
++		return -EINVAL;
++
++	vc->vc_decor.state = 0;
++	vc_resize(vc, info->var.xres / vc->vc_font.width,
++		  info->var.yres / vc->vc_font.height);
++
++	if (fg_console == vc->vc_num && redraw) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++	}
++
++	printk(KERN_INFO "fbcondecor: switched decor state to 'off' on console %d\n",
++			 vc->vc_num);
++
++	return 0;
++}
++
++/* Enables fbcondecor on a virtual console; called with console sem held. */
++static int fbcon_decor_enable(struct vc_data *vc)
++{
++	struct fb_info *info;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (vc->vc_decor.twidth == 0 || vc->vc_decor.theight == 0 ||
++	    info == NULL || vc->vc_decor.state || (!info->bgdecor.data &&
++	    vc->vc_num == fg_console))
++		return -EINVAL;
++
++	vc->vc_decor.state = 1;
++	vc_resize(vc, vc->vc_decor.twidth / vc->vc_font.width,
++		  vc->vc_decor.theight / vc->vc_font.height);
++
++	if (fg_console == vc->vc_num) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++
++	printk(KERN_INFO "fbcondecor: switched decor state to 'on' on console %d\n",
++			 vc->vc_num);
++
++	return 0;
++}
++
++static inline int fbcon_decor_ioctl_dosetstate(struct vc_data *vc, unsigned int state, unsigned char origin)
++{
++	int ret;
++
++	console_lock();
++	if (!state)
++		ret = fbcon_decor_disable(vc, 1);
++	else
++		ret = fbcon_decor_enable(vc);
++	console_unlock();
++
++	return ret;
++}
++
++static inline void fbcon_decor_ioctl_dogetstate(struct vc_data *vc, unsigned int *state)
++{
++	*state = vc->vc_decor.state;
++}
++
++static int fbcon_decor_ioctl_dosetcfg(struct vc_data *vc, struct vc_decor *cfg, unsigned char origin)
++{
++	struct fb_info *info;
++	int len;
++	char *tmp;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL || !cfg->twidth || !cfg->theight ||
++	    cfg->tx + cfg->twidth  > info->var.xres ||
++	    cfg->ty + cfg->theight > info->var.yres)
++		return -EINVAL;
++
++	len = strnlen_user(cfg->theme, MAX_ARG_STRLEN);
++	if (!len || len > FBCON_DECOR_THEME_LEN)
++		return -EINVAL;
++	tmp = kmalloc(len, GFP_KERNEL);
++	if (!tmp)
++		return -ENOMEM;
++	if (copy_from_user(tmp, (void __user *)cfg->theme, len))
++		return -EFAULT;
++	cfg->theme = tmp;
++	cfg->state = 0;
++
++	console_lock();
++	if (vc->vc_decor.state)
++		fbcon_decor_disable(vc, 1);
++	kfree(vc->vc_decor.theme);
++	vc->vc_decor = *cfg;
++	console_unlock();
++
++	printk(KERN_INFO "fbcondecor: console %d using theme '%s'\n",
++			 vc->vc_num, vc->vc_decor.theme);
++	return 0;
++}
++
++static int fbcon_decor_ioctl_dogetcfg(struct vc_data *vc,
++					struct vc_decor *decor)
++{
++	char __user *tmp;
++
++	tmp = decor->theme;
++	*decor = vc->vc_decor;
++	decor->theme = tmp;
++
++	if (vc->vc_decor.theme) {
++		if (copy_to_user(tmp, vc->vc_decor.theme,
++					strlen(vc->vc_decor.theme) + 1))
++			return -EFAULT;
++	} else
++		if (put_user(0, tmp))
++			return -EFAULT;
++
++	return 0;
++}
++
++static int fbcon_decor_ioctl_dosetpic(struct vc_data *vc, struct fb_image *img,
++						unsigned char origin)
++{
++	struct fb_info *info;
++	int len;
++	u8 *tmp;
++
++	if (vc->vc_num != fg_console)
++		return -EINVAL;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL)
++		return -EINVAL;
++
++	if (img->width != info->var.xres || img->height != info->var.yres) {
++		printk(KERN_ERR "fbcondecor: picture dimensions mismatch\n");
++		printk(KERN_ERR "%dx%d vs %dx%d\n", img->width, img->height,
++				info->var.xres, info->var.yres);
++		return -EINVAL;
++	}
++
++	if (img->depth != info->var.bits_per_pixel) {
++		printk(KERN_ERR "fbcondecor: picture depth mismatch\n");
++		return -EINVAL;
++	}
++
++	if (img->depth == 8) {
++		if (!img->cmap.len || !img->cmap.red || !img->cmap.green ||
++		    !img->cmap.blue)
++			return -EINVAL;
++
++		tmp = vmalloc(img->cmap.len * 3 * 2);
++		if (!tmp)
++			return -ENOMEM;
++
++		if (copy_from_user(tmp,
++				(void __user *)img->cmap.red,
++						(img->cmap.len << 1)) ||
++			copy_from_user(tmp + (img->cmap.len << 1),
++				(void __user *)img->cmap.green,
++						(img->cmap.len << 1)) ||
++			copy_from_user(tmp + (img->cmap.len << 2),
++				(void __user *)img->cmap.blue,
++						(img->cmap.len << 1))) {
++			vfree(tmp);
++			return -EFAULT;
++		}
++
++		img->cmap.transp = NULL;
++		img->cmap.red = (u16 *)tmp;
++		img->cmap.green = img->cmap.red + img->cmap.len;
++		img->cmap.blue = img->cmap.green + img->cmap.len;
++	} else {
++		img->cmap.red = NULL;
++	}
++
++	len = ((img->depth + 7) >> 3) * img->width * img->height;
++
++	/*
++	 * Allocate an additional byte so that we never go outside of the
++	 * buffer boundaries in the rendering functions in a 24 bpp mode.
++	 */
++	tmp = vmalloc(len + 1);
++
++	if (!tmp)
++		goto out;
++
++	if (copy_from_user(tmp, (void __user *)img->data, len))
++		goto out;
++
++	img->data = tmp;
++
++	console_lock();
++
++	if (info->bgdecor.data)
++		vfree((u8 *)info->bgdecor.data);
++	if (info->bgdecor.cmap.red)
++		vfree(info->bgdecor.cmap.red);
++
++	info->bgdecor = *img;
++
++	if (fbcon_decor_active_vc(vc) && fg_console == vc->vc_num) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++
++	console_unlock();
++
++	return 0;
++
++out:
++	if (img->cmap.red)
++		vfree(img->cmap.red);
++
++	if (tmp)
++		vfree(tmp);
++	return -ENOMEM;
++}
++
++static long fbcon_decor_ioctl(struct file *filp, u_int cmd, u_long arg)
++{
++	struct fbcon_decor_iowrapper __user *wrapper = (void __user *) arg;
++	struct vc_data *vc = NULL;
++	unsigned short vc_num = 0;
++	unsigned char origin = 0;
++	void __user *data = NULL;
++
++	if (!access_ok(VERIFY_READ, wrapper,
++			sizeof(struct fbcon_decor_iowrapper)))
++		return -EFAULT;
++
++	__get_user(vc_num, &wrapper->vc);
++	__get_user(origin, &wrapper->origin);
++	__get_user(data, &wrapper->data);
++
++	if (!vc_cons_allocated(vc_num))
++		return -EINVAL;
++
++	vc = vc_cons[vc_num].d;
++
++	switch (cmd) {
++	case FBIOCONDECOR_SETPIC:
++	{
++		struct fb_image img;
++
++		if (copy_from_user(&img, (struct fb_image __user *)data, sizeof(struct fb_image)))
++			return -EFAULT;
++
++		return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++	}
++	case FBIOCONDECOR_SETCFG:
++	{
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++			return -EFAULT;
++
++		return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++	}
++	case FBIOCONDECOR_GETCFG:
++	{
++		int rval;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++			return -EFAULT;
++
++		rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++		if (copy_to_user(data, &cfg, sizeof(struct vc_decor)))
++			return -EFAULT;
++		return rval;
++	}
++	case FBIOCONDECOR_SETSTATE:
++	{
++		unsigned int state = 0;
++
++		if (get_user(state, (unsigned int __user *)data))
++			return -EFAULT;
++		return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++	}
++	case FBIOCONDECOR_GETSTATE:
++	{
++		unsigned int state = 0;
++
++		fbcon_decor_ioctl_dogetstate(vc, &state);
++		return put_user(state, (unsigned int __user *)data);
++	}
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++}
++
++#ifdef CONFIG_COMPAT
++
++static long fbcon_decor_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++	struct fbcon_decor_iowrapper32 __user *wrapper = (void __user *)arg;
++	struct vc_data *vc = NULL;
++	unsigned short vc_num = 0;
++	unsigned char origin = 0;
++	compat_uptr_t data_compat = 0;
++	void __user *data = NULL;
++
++	if (!access_ok(VERIFY_READ, wrapper,
++			sizeof(struct fbcon_decor_iowrapper32)))
++		return -EFAULT;
++
++	__get_user(vc_num, &wrapper->vc);
++	__get_user(origin, &wrapper->origin);
++	__get_user(data_compat, &wrapper->data);
++	data = compat_ptr(data_compat);
++
++	if (!vc_cons_allocated(vc_num))
++		return -EINVAL;
++
++	vc = vc_cons[vc_num].d;
++
++	switch (cmd) {
++	case FBIOCONDECOR_SETPIC32:
++	{
++		struct fb_image32 img_compat;
++		struct fb_image img;
++
++		if (copy_from_user(&img_compat, (struct fb_image32 __user *)data, sizeof(struct fb_image32)))
++			return -EFAULT;
++
++		fb_image_from_compat(img, img_compat);
++
++		return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++	}
++
++	case FBIOCONDECOR_SETCFG32:
++	{
++		struct vc_decor32 cfg_compat;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++			return -EFAULT;
++
++		vc_decor_from_compat(cfg, cfg_compat);
++
++		return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++	}
++
++	case FBIOCONDECOR_GETCFG32:
++	{
++		int rval;
++		struct vc_decor32 cfg_compat;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++			return -EFAULT;
++		cfg.theme = compat_ptr(cfg_compat.theme);
++
++		rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++		vc_decor_to_compat(cfg_compat, cfg);
++
++		if (copy_to_user((struct vc_decor32 __user *)data, &cfg_compat, sizeof(struct vc_decor32)))
++			return -EFAULT;
++		return rval;
++	}
++
++	case FBIOCONDECOR_SETSTATE32:
++	{
++		compat_uint_t state_compat = 0;
++		unsigned int state = 0;
++
++		if (get_user(state_compat, (compat_uint_t __user *)data))
++			return -EFAULT;
++
++		state = (unsigned int)state_compat;
++
++		return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++	}
++
++	case FBIOCONDECOR_GETSTATE32:
++	{
++		compat_uint_t state_compat = 0;
++		unsigned int state = 0;
++
++		fbcon_decor_ioctl_dogetstate(vc, &state);
++		state_compat = (compat_uint_t)state;
++
++		return put_user(state_compat, (compat_uint_t __user *)data);
++	}
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++}
++#else
++  #define fbcon_decor_compat_ioctl NULL
++#endif
++
++static struct file_operations fbcon_decor_ops = {
++	.owner = THIS_MODULE,
++	.unlocked_ioctl = fbcon_decor_ioctl,
++	.compat_ioctl = fbcon_decor_compat_ioctl
++};
++
++static struct miscdevice fbcon_decor_dev = {
++	.minor = MISC_DYNAMIC_MINOR,
++	.name = "fbcondecor",
++	.fops = &fbcon_decor_ops
++};
++
++void fbcon_decor_reset(void)
++{
++	int i;
++
++	for (i = 0; i < num_registered_fb; i++) {
++		registered_fb[i]->bgdecor.data = NULL;
++		registered_fb[i]->bgdecor.cmap.red = NULL;
++	}
++
++	for (i = 0; i < MAX_NR_CONSOLES && vc_cons[i].d; i++) {
++		vc_cons[i].d->vc_decor.state = vc_cons[i].d->vc_decor.twidth =
++						vc_cons[i].d->vc_decor.theight = 0;
++		vc_cons[i].d->vc_decor.theme = NULL;
++	}
++}
++
++int fbcon_decor_init(void)
++{
++	int i;
++
++	fbcon_decor_reset();
++
++	if (initialized)
++		return 0;
++
++	i = misc_register(&fbcon_decor_dev);
++	if (i) {
++		printk(KERN_ERR "fbcondecor: failed to register device\n");
++		return i;
++	}
++
++	fbcon_decor_call_helper("init", 0);
++	initialized = 1;
++	return 0;
++}
++
++int fbcon_decor_exit(void)
++{
++	fbcon_decor_reset();
++	return 0;
++}
+diff --git a/drivers/video/console/fbcondecor.h b/drivers/video/console/fbcondecor.h
+new file mode 100644
+index 000000000000..c49386c16695
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.h
+@@ -0,0 +1,77 @@
++/*
++ *  linux/drivers/video/console/fbcondecor.h -- Framebuffer Console Decoration headers
++ *
++ *  Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ */
++
++#ifndef __FBCON_DECOR_H
++#define __FBCON_DECOR_H
++
++#ifndef _LINUX_FB_H
++#include <linux/fb.h>
++#endif
++
++/* This is needed for vc_cons in fbcmap.c */
++#include <linux/vt_kern.h>
++
++struct fb_cursor;
++struct fb_info;
++struct vc_data;
++
++#ifdef CONFIG_FB_CON_DECOR
++/* fbcondecor.c */
++int fbcon_decor_init(void);
++int fbcon_decor_exit(void);
++int fbcon_decor_call_helper(char *cmd, unsigned short cons);
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw);
++
++/* cfbcondecor.c */
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx);
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor);
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width);
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only);
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank);
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width);
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes, int srclinesbytes, int bpp);
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc);
++
++/* vt.c */
++void acquire_console_sem(void);
++void release_console_sem(void);
++void do_unblank_screen(int entering_gfx);
++
++/* struct vc_data *y */
++#define fbcon_decor_active_vc(y) (y->vc_decor.state && y->vc_decor.theme)
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active_nores(x, y) (x->bgdecor.data && fbcon_decor_active_vc(y))
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active(x, y) (fbcon_decor_active_nores(x, y) &&	\
++				x->bgdecor.width == x->var.xres &&	\
++				x->bgdecor.height == x->var.yres &&	\
++				x->bgdecor.depth == x->var.bits_per_pixel)
++
++#else /* CONFIG_FB_CON_DECOR */
++
++static inline void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx) {}
++static inline void fbcon_decor_putc(struct vc_data *vc, struct fb_info *info, int c, int ypos, int xpos) {}
++static inline void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor) {}
++static inline void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) {}
++static inline void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) {}
++static inline void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank) {}
++static inline void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) {}
++static inline void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc) {}
++static inline int fbcon_decor_call_helper(char *cmd, unsigned short cons) { return 0; }
++static inline int fbcon_decor_init(void) { return 0; }
++static inline int fbcon_decor_exit(void) { return 0; }
++static inline int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw) { return 0; }
++
++#define fbcon_decor_active_vc(y) (0)
++#define fbcon_decor_active_nores(x, y) (0)
++#define fbcon_decor_active(x, y) (0)
++
++#endif /* CONFIG_FB_CON_DECOR */
++
++#endif /* __FBCON_DECOR_H */
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index 5e58f5ec0a28..1daa8c2cb2d8 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -1226,7 +1226,6 @@ config FB_MATROX
+ 	select FB_CFB_FILLRECT
+ 	select FB_CFB_COPYAREA
+ 	select FB_CFB_IMAGEBLIT
+-	select FB_TILEBLITTING
+ 	select FB_MACMODES if PPC_PMAC
+ 	---help---
+ 	  Say Y here if you have a Matrox Millennium, Matrox Millennium II,
+diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
+index 790900d646c0..3f940c93752c 100644
+--- a/drivers/video/fbdev/core/bitblit.c
++++ b/drivers/video/fbdev/core/bitblit.c
+@@ -18,6 +18,7 @@
+ #include <linux/console.h>
+ #include <asm/types.h>
+ #include "fbcon.h"
++#include "../../console/fbcondecor.h"
+ 
+ /*
+  * Accelerated handlers.
+@@ -55,6 +56,13 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ 	area.height = height * vc->vc_font.height;
+ 	area.width = width * vc->vc_font.width;
+ 
++	if (fbcon_decor_active(info, vc)) {
++		area.sx += vc->vc_decor.tx;
++		area.sy += vc->vc_decor.ty;
++		area.dx += vc->vc_decor.tx;
++		area.dy += vc->vc_decor.ty;
++	}
++
+ 	info->fbops->fb_copyarea(info, &area);
+ }
+ 
+@@ -379,11 +387,15 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+ 	cursor.image.depth = 1;
+ 	cursor.rop = ROP_XOR;
+ 
+-	if (info->fbops->fb_cursor)
+-		err = info->fbops->fb_cursor(info, &cursor);
++	if (fbcon_decor_active(info, vc)) {
++		fbcon_decor_cursor(info, &cursor);
++	} else {
++		if (info->fbops->fb_cursor)
++			err = info->fbops->fb_cursor(info, &cursor);
+ 
+-	if (err)
+-		soft_cursor(info, &cursor);
++		if (err)
++			soft_cursor(info, &cursor);
++	}
+ 
+ 	ops->cursor_reset = 0;
+ }
+diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
+index 68a113594808..21f977cb59d2 100644
+--- a/drivers/video/fbdev/core/fbcmap.c
++++ b/drivers/video/fbdev/core/fbcmap.c
+@@ -17,6 +17,8 @@
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
+ 
++#include "../../console/fbcondecor.h"
++
+ static u16 red2[] __read_mostly = {
+     0x0000, 0xaaaa
+ };
+@@ -256,9 +258,12 @@ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
+ 				break;
+ 		}
+ 	}
+-	if (rc == 0)
++	if (rc == 0) {
+ 		fb_copy_cmap(cmap, &info->cmap);
+-
++		if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++		    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
++			fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++	}
+ 	return rc;
+ }
+ 
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index 04612f938bab..95c349200078 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -80,6 +80,7 @@
+ #include <asm/irq.h>
+ 
+ #include "fbcon.h"
++#include "../../console/fbcondecor.h"
+ 
+ #ifdef FBCONDEBUG
+ #  define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
+@@ -95,7 +96,7 @@ enum {
+ 
+ static struct display fb_display[MAX_NR_CONSOLES];
+ 
+-static signed char con2fb_map[MAX_NR_CONSOLES];
++signed char con2fb_map[MAX_NR_CONSOLES];
+ static signed char con2fb_map_boot[MAX_NR_CONSOLES];
+ 
+ static int logo_lines;
+@@ -282,7 +283,7 @@ static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
+ 		!vt_force_oops_output(vc);
+ }
+ 
+-static int get_color(struct vc_data *vc, struct fb_info *info,
++int get_color(struct vc_data *vc, struct fb_info *info,
+ 	      u16 c, int is_fg)
+ {
+ 	int depth = fb_get_color_depth(&info->var, &info->fix);
+@@ -551,6 +552,9 @@ static int do_fbcon_takeover(int show_logo)
+ 		info_idx = -1;
+ 	} else {
+ 		fbcon_has_console_bind = 1;
++#ifdef CONFIG_FB_CON_DECOR
++		fbcon_decor_init();
++#endif
+ 	}
+ 
+ 	return err;
+@@ -1013,6 +1017,12 @@ static const char *fbcon_startup(void)
+ 	rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 	cols /= vc->vc_font.width;
+ 	rows /= vc->vc_font.height;
++
++	if (fbcon_decor_active(info, vc)) {
++		cols = vc->vc_decor.twidth / vc->vc_font.width;
++		rows = vc->vc_decor.theight / vc->vc_font.height;
++	}
++
+ 	vc_resize(vc, cols, rows);
+ 
+ 	DPRINTK("mode:   %s\n", info->fix.id);
+@@ -1042,7 +1052,7 @@ static void fbcon_init(struct vc_data *vc, int init)
+ 	cap = info->flags;
+ 
+ 	if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
+-	    (info->fix.type == FB_TYPE_TEXT))
++	    (info->fix.type == FB_TYPE_TEXT) || fbcon_decor_active(info, vc))
+ 		logo = 0;
+ 
+ 	if (var_to_display(p, &info->var, info))
+@@ -1275,6 +1285,11 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
+ 		fbcon_clear_margins(vc, 0);
+ 	}
+ 
++	if (fbcon_decor_active(info, vc)) {
++		fbcon_decor_clear(vc, info, sy, sx, height, width);
++		return;
++	}
++
+ 	/* Split blits that cross physical y_wrap boundary */
+ 
+ 	y_break = p->vrows - p->yscroll;
+@@ -1294,10 +1309,15 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
+ 	struct display *p = &fb_display[vc->vc_num];
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 
+-	if (!fbcon_is_inactive(vc, info))
+-		ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
+-			   get_color(vc, info, scr_readw(s), 1),
+-			   get_color(vc, info, scr_readw(s), 0));
++	if (!fbcon_is_inactive(vc, info)) {
++
++		if (fbcon_decor_active(info, vc))
++			fbcon_decor_putcs(vc, info, s, count, ypos, xpos);
++		else
++			ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
++				   get_color(vc, info, scr_readw(s), 1),
++				   get_color(vc, info, scr_readw(s), 0));
++	}
+ }
+ 
+ static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+@@ -1313,8 +1333,12 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
+ 	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 
+-	if (!fbcon_is_inactive(vc, info))
+-		ops->clear_margins(vc, info, margin_color, bottom_only);
++	if (!fbcon_is_inactive(vc, info)) {
++		if (fbcon_decor_active(info, vc))
++			fbcon_decor_clear_margins(vc, info, bottom_only);
++		else
++			ops->clear_margins(vc, info, margin_color, bottom_only);
++	}
+ }
+ 
+ static void fbcon_cursor(struct vc_data *vc, int mode)
+@@ -1835,7 +1859,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+ 			count = vc->vc_rows;
+ 		if (softback_top)
+ 			fbcon_softback_note(vc, t, count);
+-		if (logo_shown >= 0)
++		if (logo_shown >= 0 || fbcon_decor_active(info, vc))
+ 			goto redraw_up;
+ 		switch (p->scrollmode) {
+ 		case SCROLL_MOVE:
+@@ -1928,6 +1952,8 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
+ 			count = vc->vc_rows;
+ 		if (logo_shown >= 0)
+ 			goto redraw_down;
++		if (fbcon_decor_active(info, vc))
++			goto redraw_down;
+ 		switch (p->scrollmode) {
+ 		case SCROLL_MOVE:
+ 			fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+@@ -2076,6 +2102,13 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
+ 		}
+ 		return;
+ 	}
++
++	if (fbcon_decor_active(info, vc) && sy == dy && height == 1) {
++		/* must use slower redraw bmove to keep background pic intact */
++		fbcon_decor_bmove_redraw(vc, info, sy, sx, dx, width);
++		return;
++	}
++
+ 	ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ 		   height, width);
+ }
+@@ -2146,8 +2179,8 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
+ 	var.yres = virt_h * virt_fh;
+ 	x_diff = info->var.xres - var.xres;
+ 	y_diff = info->var.yres - var.yres;
+-	if (x_diff < 0 || x_diff > virt_fw ||
+-	    y_diff < 0 || y_diff > virt_fh) {
++	if ((x_diff < 0 || x_diff > virt_fw ||
++		y_diff < 0 || y_diff > virt_fh) && !vc->vc_decor.state) {
+ 		const struct fb_videomode *mode;
+ 
+ 		DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
+@@ -2183,6 +2216,22 @@ static int fbcon_switch(struct vc_data *vc)
+ 
+ 	info = registered_fb[con2fb_map[vc->vc_num]];
+ 	ops = info->fbcon_par;
++	prev_console = ops->currcon;
++	if (prev_console != -1)
++		old_info = registered_fb[con2fb_map[prev_console]];
++
++#ifdef CONFIG_FB_CON_DECOR
++	if (!fbcon_decor_active_vc(vc) && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++		struct vc_data *vc_curr = vc_cons[prev_console].d;
++
++		if (vc_curr && fbcon_decor_active_vc(vc_curr)) {
++			// Clear the screen to avoid displaying funky colors
++			// during palette updates.
++			memset((u8 *)info->screen_base + info->fix.line_length * info->var.yoffset,
++			       0, info->var.yres * info->fix.line_length);
++		}
++	}
++#endif
+ 
+ 	if (softback_top) {
+ 		if (softback_lines)
+@@ -2201,9 +2250,6 @@ static int fbcon_switch(struct vc_data *vc)
+ 		logo_shown = FBCON_LOGO_CANSHOW;
+ 	}
+ 
+-	prev_console = ops->currcon;
+-	if (prev_console != -1)
+-		old_info = registered_fb[con2fb_map[prev_console]];
+ 	/*
+ 	 * FIXME: If we have multiple fbdev's loaded, we need to
+ 	 * update all info->currcon.  Perhaps, we can place this
+@@ -2247,6 +2293,18 @@ static int fbcon_switch(struct vc_data *vc)
+ 			fbcon_del_cursor_timer(old_info);
+ 	}
+ 
++	if (fbcon_decor_active_vc(vc)) {
++		struct vc_data *vc_curr = vc_cons[prev_console].d;
++
++		if (!vc_curr->vc_decor.theme ||
++			strcmp(vc->vc_decor.theme, vc_curr->vc_decor.theme) ||
++			(fbcon_decor_active_nores(info, vc_curr) &&
++			 !fbcon_decor_active(info, vc_curr))) {
++			fbcon_decor_disable(vc, 0);
++			fbcon_decor_call_helper("modechange", vc->vc_num);
++		}
++	}
++
+ 	if (fbcon_is_inactive(vc, info) ||
+ 	    ops->blank_state != FB_BLANK_UNBLANK)
+ 		fbcon_del_cursor_timer(info);
+@@ -2355,15 +2413,20 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
+ 		}
+ 	}
+ 
+- 	if (!fbcon_is_inactive(vc, info)) {
++	if (!fbcon_is_inactive(vc, info)) {
+ 		if (ops->blank_state != blank) {
+ 			ops->blank_state = blank;
+ 			fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
+ 			ops->cursor_flash = (!blank);
+ 
+-			if (!(info->flags & FBINFO_MISC_USEREVENT))
+-				if (fb_blank(info, blank))
+-					fbcon_generic_blank(vc, info, blank);
++			if (!(info->flags & FBINFO_MISC_USEREVENT)) {
++				if (fb_blank(info, blank)) {
++					if (fbcon_decor_active(info, vc))
++						fbcon_decor_blank(vc, info, blank);
++					else
++						fbcon_generic_blank(vc, info, blank);
++				}
++			}
+ 		}
+ 
+ 		if (!blank)
+@@ -2546,13 +2609,22 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ 		set_vc_hi_font(vc, true);
+ 
+ 	if (resize) {
++		/* reset wrap/pan */
+ 		int cols, rows;
+ 
+ 		cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
++
++		if (fbcon_decor_active(info, vc)) {
++			info->var.xoffset = info->var.yoffset = p->yscroll = 0;
++			cols = vc->vc_decor.twidth;
++			rows = vc->vc_decor.theight;
++		}
+ 		cols /= w;
+ 		rows /= h;
++
+ 		vc_resize(vc, cols, rows);
++
+ 		if (con_is_visible(vc) && softback_buf)
+ 			fbcon_update_softback(vc);
+ 	} else if (con_is_visible(vc)
+@@ -2681,7 +2753,11 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
+ 	int i, j, k, depth;
+ 	u8 val;
+ 
+-	if (fbcon_is_inactive(vc, info))
++	if (fbcon_is_inactive(vc, info)
++#ifdef CONFIG_FB_CON_DECOR
++			|| vc->vc_num != fg_console
++#endif
++		)
+ 		return;
+ 
+ 	if (!con_is_visible(vc))
+@@ -2707,7 +2783,47 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
+ 	} else
+ 		fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
+ 
+-	fb_set_cmap(&palette_cmap, info);
++	if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++
++		u16 *red, *green, *blue;
++		int minlen = min(min(info->var.red.length, info->var.green.length),
++				     info->var.blue.length);
++
++		struct fb_cmap cmap = {
++			.start = 0,
++			.len = (1 << minlen),
++			.red = NULL,
++			.green = NULL,
++			.blue = NULL,
++			.transp = NULL
++		};
++
++		red = kmalloc(256 * sizeof(u16) * 3, GFP_KERNEL);
++
++		if (!red)
++			goto out;
++
++		green = red + 256;
++		blue = green + 256;
++		cmap.red = red;
++		cmap.green = green;
++		cmap.blue = blue;
++
++		for (i = 0; i < cmap.len; i++)
++			red[i] = green[i] = blue[i] = (0xffff * i)/(cmap.len-1);
++
++		fb_set_cmap(&cmap, info);
++		fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++		kfree(red);
++
++		return;
++
++	} else if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++		   info->var.bits_per_pixel == 8 && info->bgdecor.cmap.red != NULL)
++		fb_set_cmap(&info->bgdecor.cmap, info);
++
++out:	fb_set_cmap(&palette_cmap, info);
+ }
+ 
+ static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
+@@ -2932,7 +3048,14 @@ static void fbcon_modechanged(struct fb_info *info)
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 		cols /= vc->vc_font.width;
+ 		rows /= vc->vc_font.height;
+-		vc_resize(vc, cols, rows);
++
++		if (!fbcon_decor_active_nores(info, vc)) {
++			vc_resize(vc, cols, rows);
++		} else {
++			fbcon_decor_disable(vc, 0);
++			fbcon_decor_call_helper("modechange", vc->vc_num);
++		}
++
+ 		updatescrollmode(p, info, vc);
+ 		scrollback_max = 0;
+ 		scrollback_current = 0;
+@@ -2977,7 +3100,8 @@ static void fbcon_set_all_vcs(struct fb_info *info)
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 		cols /= vc->vc_font.width;
+ 		rows /= vc->vc_font.height;
+-		vc_resize(vc, cols, rows);
++		if (!fbcon_decor_active_nores(info, vc))
++			vc_resize(vc, cols, rows);
+ 	}
+ 
+ 	if (fg != -1)
+@@ -3618,6 +3742,7 @@ static void fbcon_exit(void)
+ 		}
+ 	}
+ 
++	fbcon_decor_exit();
+ 	fbcon_has_exited = 1;
+ }
+ 
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index f741ba8df01b..b0141433d249 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1253,15 +1253,6 @@ struct fb_fix_screeninfo32 {
+ 	u16			reserved[3];
+ };
+ 
+-struct fb_cmap32 {
+-	u32			start;
+-	u32			len;
+-	compat_caddr_t	red;
+-	compat_caddr_t	green;
+-	compat_caddr_t	blue;
+-	compat_caddr_t	transp;
+-};
+-
+ static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
+ 			  unsigned long arg)
+ {
+diff --git a/include/linux/console_decor.h b/include/linux/console_decor.h
+new file mode 100644
+index 000000000000..15143556c2aa
+--- /dev/null
++++ b/include/linux/console_decor.h
+@@ -0,0 +1,46 @@
++#ifndef _LINUX_CONSOLE_DECOR_H_
++#define _LINUX_CONSOLE_DECOR_H_ 1
++
++/* A structure used by the framebuffer console decorations (drivers/video/console/fbcondecor.c) */
++struct vc_decor {
++	__u8 bg_color;				/* The color that is to be treated as transparent */
++	__u8 state;				/* Current decor state: 0 = off, 1 = on */
++	__u16 tx, ty;				/* Top left corner coordinates of the text field */
++	__u16 twidth, theight;			/* Width and height of the text field */
++	char *theme;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++
++struct vc_decor32 {
++	__u8 bg_color;				/* The color that is to be treated as transparent */
++	__u8 state;				/* Current decor state: 0 = off, 1 = on */
++	__u16 tx, ty;				/* Top left corner coordinates of the text field */
++	__u16 twidth, theight;			/* Width and height of the text field */
++	compat_uptr_t theme;
++};
++
++#define vc_decor_from_compat(to, from) \
++	(to).bg_color = (from).bg_color; \
++	(to).state    = (from).state; \
++	(to).tx       = (from).tx; \
++	(to).ty       = (from).ty; \
++	(to).twidth   = (from).twidth; \
++	(to).theight  = (from).theight; \
++	(to).theme    = compat_ptr((from).theme)
++
++#define vc_decor_to_compat(to, from) \
++	(to).bg_color = (from).bg_color; \
++	(to).state    = (from).state; \
++	(to).tx       = (from).tx; \
++	(to).ty       = (from).ty; \
++	(to).twidth   = (from).twidth; \
++	(to).theight  = (from).theight; \
++	(to).theme    = ptr_to_compat((from).theme)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#endif
+diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
+index c0ec478ea5bf..8bfed6b21fc9 100644
+--- a/include/linux/console_struct.h
++++ b/include/linux/console_struct.h
+@@ -21,6 +21,7 @@ struct vt_struct;
+ struct uni_pagedir;
+ 
+ #define NPAR 16
++#include <linux/console_decor.h>
+ 
+ /*
+  * Example: vc_data of a console that was scrolled 3 lines down.
+@@ -141,6 +142,8 @@ struct vc_data {
+ 	struct uni_pagedir *vc_uni_pagedir;
+ 	struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
+ 	bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
++
++	struct vc_decor vc_decor;
+ 	/* additional information is in vt_kern.h */
+ };
+ 
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index bc24e48e396d..ad7d182c7545 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -239,6 +239,34 @@ struct fb_deferred_io {
+ };
+ #endif
+ 
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_image32 {
++	__u32 dx;			/* Where to place image */
++	__u32 dy;
++	__u32 width;			/* Size of image */
++	__u32 height;
++	__u32 fg_color;			/* Only used when a mono bitmap */
++	__u32 bg_color;
++	__u8  depth;			/* Depth of the image */
++	const compat_uptr_t data;	/* Pointer to image data */
++	struct fb_cmap32 cmap;		/* color map info */
++};
++
++#define fb_image_from_compat(to, from) \
++	(to).dx       = (from).dx; \
++	(to).dy       = (from).dy; \
++	(to).width    = (from).width; \
++	(to).height   = (from).height; \
++	(to).fg_color = (from).fg_color; \
++	(to).bg_color = (from).bg_color; \
++	(to).depth    = (from).depth; \
++	(to).data     = compat_ptr((from).data); \
++	fb_cmap_from_compat((to).cmap, (from).cmap)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /*
+  * Frame buffer operations
+  *
+@@ -509,6 +537,9 @@ struct fb_info {
+ #define FBINFO_STATE_SUSPENDED	1
+ 	u32 state;			/* Hardware state i.e suspend */
+ 	void *fbcon_par;                /* fbcon use-only private area */
++
++	struct fb_image bgdecor;
++
+ 	/* From here on everything is device dependent */
+ 	void *par;
+ 	/* we need the PCI or similar aperture base/size not
+diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
+index 6cd9b198b7c6..a228440649fa 100644
+--- a/include/uapi/linux/fb.h
++++ b/include/uapi/linux/fb.h
+@@ -9,6 +9,23 @@
+ 
+ #define FB_MAX			32	/* sufficient for now */
+ 
++struct fbcon_decor_iowrapper {
++	unsigned short vc;		/* Virtual console */
++	unsigned char origin;		/* Point of origin of the request */
++	void *data;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++struct fbcon_decor_iowrapper32 {
++	unsigned short vc;		/* Virtual console */
++	unsigned char origin;		/* Point of origin of the request */
++	compat_uptr_t data;
++};
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /* ioctls
+    0x46 is 'F'								*/
+ #define FBIOGET_VSCREENINFO	0x4600
+@@ -36,6 +53,25 @@
+ #define FBIOGET_DISPINFO        0x4618
+ #define FBIO_WAITFORVSYNC	_IOW('F', 0x20, __u32)
+ 
++#define FBIOCONDECOR_SETCFG	_IOWR('F', 0x19, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETCFG	_IOR('F', 0x1A, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETSTATE	_IOWR('F', 0x1B, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETSTATE	_IOR('F', 0x1C, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETPIC	_IOWR('F', 0x1D, struct fbcon_decor_iowrapper)
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#define FBIOCONDECOR_SETCFG32	_IOWR('F', 0x19, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETCFG32	_IOR('F', 0x1A, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETSTATE32	_IOWR('F', 0x1B, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETSTATE32	_IOR('F', 0x1C, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETPIC32	_IOWR('F', 0x1D, struct fbcon_decor_iowrapper32)
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#define FBCON_DECOR_THEME_LEN		128	/* Maximum length of a theme name */
++#define FBCON_DECOR_IO_ORIG_KERNEL	0	/* Kernel ioctl origin */
++#define FBCON_DECOR_IO_ORIG_USER	1	/* User ioctl origin */
++
+ #define FB_TYPE_PACKED_PIXELS		0	/* Packed Pixels	*/
+ #define FB_TYPE_PLANES			1	/* Non interleaved planes */
+ #define FB_TYPE_INTERLEAVED_PLANES	2	/* Interleaved planes	*/
+@@ -278,6 +314,29 @@ struct fb_var_screeninfo {
+ 	__u32 reserved[4];		/* Reserved for future compatibility */
+ };
+ 
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_cmap32 {
++	__u32 start;
++	__u32 len;			/* Number of entries */
++	compat_uptr_t red;		/* Red values	*/
++	compat_uptr_t green;
++	compat_uptr_t blue;
++	compat_uptr_t transp;		/* transparency, can be NULL */
++};
++
++#define fb_cmap_from_compat(to, from) \
++	(to).start  = (from).start; \
++	(to).len    = (from).len; \
++	(to).red    = compat_ptr((from).red); \
++	(to).green  = compat_ptr((from).green); \
++	(to).blue   = compat_ptr((from).blue); \
++	(to).transp = compat_ptr((from).transp)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++
+ struct fb_cmap {
+ 	__u32 start;			/* First entry	*/
+ 	__u32 len;			/* Number of entries */
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index d9c31bc2eaea..e33ac56cc32a 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -150,6 +150,10 @@ static const int cap_last_cap = CAP_LAST_CAP;
+ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
+ #endif
+ 
++#ifdef CONFIG_FB_CON_DECOR
++extern char fbcon_decor_path[];
++#endif
++
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
+@@ -283,6 +287,15 @@ static struct ctl_table sysctl_base_table[] = {
+ 		.mode		= 0555,
+ 		.child		= dev_table,
+ 	},
++#ifdef CONFIG_FB_CON_DECOR
++	{
++		.procname	= "fbcondecor",
++		.data		= &fbcon_decor_path,
++		.maxlen		= KMOD_PATH_LEN,
++		.mode		= 0644,
++		.proc_handler	= &proc_dostring,
++	},
++#endif
+ 	{ }
+ };
+ 

diff --git a/4400_alpha-sysctl-uac.patch b/4400_alpha-sysctl-uac.patch
new file mode 100644
index 0000000..d42b4ed
--- /dev/null
+++ b/4400_alpha-sysctl-uac.patch
@@ -0,0 +1,142 @@
+diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
+index 7f312d8..1eb686b 100644
+--- a/arch/alpha/Kconfig
++++ b/arch/alpha/Kconfig
+@@ -697,6 +697,33 @@ config HZ
+ 	default 1200 if HZ_1200
+ 	default 1024
+
++config ALPHA_UAC_SYSCTL
++       bool "Configure UAC policy via sysctl"
++       depends on SYSCTL
++       default y
++       ---help---
++         Configuring the UAC (unaligned access control) policy on a Linux
++         system usually involves setting a compile time define. If you say
++         Y here, you will be able to modify the UAC policy at runtime using
++         the /proc interface.
++
++         The UAC policy defines the action Linux should take when an
++         unaligned memory access occurs. The action can include printing a
++         warning message (NOPRINT), sending a signal to the offending
++         program to help developers debug their applications (SIGBUS), or
++         disabling the transparent fixing (NOFIX).
++
++         The sysctls will be initialized to the compile-time defined UAC
++         policy. You can change these manually, or with the sysctl(8)
++         userspace utility.
++
++         To disable the warning messages at runtime, you would use
++
++           echo 1 > /proc/sys/kernel/uac/noprint
++
++         This is pretty harmless. Say Y if you're not sure.
++
++
+ source "drivers/pci/Kconfig"
+ source "drivers/eisa/Kconfig"
+
+diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
+index 74aceea..cb35d80 100644
+--- a/arch/alpha/kernel/traps.c
++++ b/arch/alpha/kernel/traps.c
+@@ -103,6 +103,49 @@ static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
+ 			   "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
+ #endif
+
++#ifdef CONFIG_ALPHA_UAC_SYSCTL
++
++#include <linux/sysctl.h>
++
++static int enabled_noprint = 0;
++static int enabled_sigbus = 0;
++static int enabled_nofix = 0;
++
++struct ctl_table uac_table[] = {
++       {
++               .procname       = "noprint",
++               .data           = &enabled_noprint,
++               .maxlen         = sizeof (int),
++               .mode           = 0644,
++               .proc_handler = &proc_dointvec,
++       },
++       {
++               .procname       = "sigbus",
++               .data           = &enabled_sigbus,
++               .maxlen         = sizeof (int),
++               .mode           = 0644,
++               .proc_handler = &proc_dointvec,
++       },
++       {
++               .procname       = "nofix",
++               .data           = &enabled_nofix,
++               .maxlen         = sizeof (int),
++               .mode           = 0644,
++               .proc_handler = &proc_dointvec,
++       },
++       { }
++};
++
++static int __init init_uac_sysctl(void)
++{
++   /* Initialize sysctls with the #defined UAC policy */
++   enabled_noprint = (test_thread_flag (TS_UAC_NOPRINT)) ? 1 : 0;
++   enabled_sigbus = (test_thread_flag (TS_UAC_SIGBUS)) ? 1 : 0;
++   enabled_nofix = (test_thread_flag (TS_UAC_NOFIX)) ? 1 : 0;
++   return 0;
++}
++#endif
++
+ static void
+ dik_show_code(unsigned int *pc)
+ {
+@@ -785,7 +828,12 @@ do_entUnaUser(void __user * va, unsigned long opcode,
+ 	/* Check the UAC bits to decide what the user wants us to do
+ 	   with the unaliged access.  */
+
++#ifndef CONFIG_ALPHA_UAC_SYSCTL
+ 	if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
++#else  /* CONFIG_ALPHA_UAC_SYSCTL */
++	if (!(current_thread_info()->status & TS_UAC_NOPRINT) &&
++	    !(enabled_noprint)) {
++#endif /* CONFIG_ALPHA_UAC_SYSCTL */
+ 		if (__ratelimit(&ratelimit)) {
+ 			printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
+ 			       current->comm, task_pid_nr(current),
+@@ -1090,3 +1138,6 @@ trap_init(void)
+ 	wrent(entSys, 5);
+ 	wrent(entDbg, 6);
+ }
++#ifdef CONFIG_ALPHA_UAC_SYSCTL
++       __initcall(init_uac_sysctl);
++#endif
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 87b2fc3..55021a8 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -152,6 +152,11 @@ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
++
++#ifdef CONFIG_ALPHA_UAC_SYSCTL
++extern struct ctl_table uac_table[];
++#endif
++
+ #ifdef CONFIG_SPARC
+ #endif
+
+@@ -1844,6 +1849,13 @@ static struct ctl_table debug_table[] = {
+ 		.extra2		= &one,
+ 	},
+ #endif
++#ifdef CONFIG_ALPHA_UAC_SYSCTL
++	{
++	        .procname   = "uac",
++		.mode       = 0555,
++	        .child      = uac_table,
++	 },
++#endif /* CONFIG_ALPHA_UAC_SYSCTL */
+ 	{ }
+ };
+

diff --git a/5010_enable-additional-cpu-optimizations-for-gcc.patch b/5010_enable-additional-cpu-optimizations-for-gcc.patch
new file mode 100644
index 0000000..c68d072
--- /dev/null
+++ b/5010_enable-additional-cpu-optimizations-for-gcc.patch
@@ -0,0 +1,530 @@
+WARNING
+This patch works with gcc versions 4.9+ and with kernel version 3.15+ and should
+NOT be applied when compiling on older versions of gcc due to key name changes
+of the march flags introduced with the version 4.9 release of gcc.[1]
+
+Use the older version of this patch hosted on the same github for older
+versions of gcc.
+
+FEATURES
+This patch adds additional CPU options to the Linux kernel accessible under:
+ Processor type and features  --->
+  Processor family --->
+
+The expanded microarchitectures include:
+* AMD Improved K8-family
+* AMD K10-family
+* AMD Family 10h (Barcelona)
+* AMD Family 14h (Bobcat)
+* AMD Family 16h (Jaguar)
+* AMD Family 15h (Bulldozer)
+* AMD Family 15h (Piledriver)
+* AMD Family 15h (Steamroller)
+* AMD Family 15h (Excavator)
+* AMD Family 17h (Zen)
+* Intel Silvermont low-power processors
+* Intel 1st Gen Core i3/i5/i7 (Nehalem)
+* Intel 1.5 Gen Core i3/i5/i7 (Westmere)
+* Intel 2nd Gen Core i3/i5/i7 (Sandybridge)
+* Intel 3rd Gen Core i3/i5/i7 (Ivybridge)
+* Intel 4th Gen Core i3/i5/i7 (Haswell)
+* Intel 5th Gen Core i3/i5/i7 (Broadwell)
+* Intel 6th Gen Core i3/i5.i7 (Skylake)
+
+It also offers to compile passing the 'native' option which, "selects the CPU
+to generate code for at compilation time by determining the processor type of
+the compiling machine. Using -march=native enables all instruction subsets
+supported by the local machine and will produce code optimized for the local
+machine under the constraints of the selected instruction set."[3]
+
+MINOR NOTES
+This patch also changes 'atom' to 'bonnell' in accordance with the gcc v4.9
+changes. Note that upstream is using the deprecated 'match=atom' flags when I
+believe it should use the newer 'march=bonnell' flag for atom processors.[2]
+
+It is not recommended to compile on Atom-CPUs with the 'native' option.[4] The
+recommendation is use to the 'atom' option instead.
+
+BENEFITS
+Small but real speed increases are measurable using a make endpoint comparing
+a generic kernel to one built with one of the respective microarchs.
+
+See the following experimental evidence supporting this statement:
+https://github.com/graysky2/kernel_gcc_patch
+
+REQUIREMENTS
+linux version >=3.15
+gcc version >=4.9
+
+ACKNOWLEDGMENTS
+This patch builds on the seminal work by Jeroen.[5]
+
+REFERENCES
+1. https://gcc.gnu.org/gcc-4.9/changes.html
+2. https://bugzilla.kernel.org/show_bug.cgi?id=77461
+3. https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html
+4. https://github.com/graysky2/kernel_gcc_patch/issues/15
+5. http://www.linuxforge.net/docs/linux/linux-gcc.php
+
+--- a/arch/x86/include/asm/module.h	2017-08-02 11:41:47.442200461 -0400
++++ b/arch/x86/include/asm/module.h	2017-08-02 12:14:21.204358744 -0400
+@@ -15,6 +15,24 @@
+ #define MODULE_PROC_FAMILY "586MMX "
+ #elif defined CONFIG_MCORE2
+ #define MODULE_PROC_FAMILY "CORE2 "
++#elif defined CONFIG_MNATIVE
++#define MODULE_PROC_FAMILY "NATIVE "
++#elif defined CONFIG_MNEHALEM
++#define MODULE_PROC_FAMILY "NEHALEM "
++#elif defined CONFIG_MWESTMERE
++#define MODULE_PROC_FAMILY "WESTMERE "
++#elif defined CONFIG_MSILVERMONT
++#define MODULE_PROC_FAMILY "SILVERMONT "
++#elif defined CONFIG_MSANDYBRIDGE
++#define MODULE_PROC_FAMILY "SANDYBRIDGE "
++#elif defined CONFIG_MIVYBRIDGE
++#define MODULE_PROC_FAMILY "IVYBRIDGE "
++#elif defined CONFIG_MHASWELL
++#define MODULE_PROC_FAMILY "HASWELL "
++#elif defined CONFIG_MBROADWELL
++#define MODULE_PROC_FAMILY "BROADWELL "
++#elif defined CONFIG_MSKYLAKE
++#define MODULE_PROC_FAMILY "SKYLAKE "
+ #elif defined CONFIG_MATOM
+ #define MODULE_PROC_FAMILY "ATOM "
+ #elif defined CONFIG_M686
+@@ -33,6 +51,26 @@
+ #define MODULE_PROC_FAMILY "K7 "
+ #elif defined CONFIG_MK8
+ #define MODULE_PROC_FAMILY "K8 "
++#elif defined CONFIG_MK8SSE3
++#define MODULE_PROC_FAMILY "K8SSE3 "
++#elif defined CONFIG_MK10
++#define MODULE_PROC_FAMILY "K10 "
++#elif defined CONFIG_MBARCELONA
++#define MODULE_PROC_FAMILY "BARCELONA "
++#elif defined CONFIG_MBOBCAT
++#define MODULE_PROC_FAMILY "BOBCAT "
++#elif defined CONFIG_MBULLDOZER
++#define MODULE_PROC_FAMILY "BULLDOZER "
++#elif defined CONFIG_MPILEDRIVER
++#define MODULE_PROC_FAMILY "PILEDRIVER "
++#elif defined CONFIG_MSTEAMROLLER
++#define MODULE_PROC_FAMILY "STEAMROLLER "
++#elif defined CONFIG_MJAGUAR
++#define MODULE_PROC_FAMILY "JAGUAR "
++#elif defined CONFIG_MEXCAVATOR
++#define MODULE_PROC_FAMILY "EXCAVATOR "
++#elif defined CONFIG_MZEN
++#define MODULE_PROC_FAMILY "ZEN "
+ #elif defined CONFIG_MELAN
+ #define MODULE_PROC_FAMILY "ELAN "
+ #elif defined CONFIG_MCRUSOE
+--- a/arch/x86/Kconfig.cpu	2017-08-02 11:41:47.443200463 -0400
++++ b/arch/x86/Kconfig.cpu	2017-08-02 12:14:37.108956741 -0400
+@@ -115,6 +115,7 @@ config MPENTIUMM
+ config MPENTIUM4
+ 	bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
+ 	depends on X86_32
++	select X86_P6_NOP
+ 	---help---
+ 	  Select this for Intel Pentium 4 chips.  This includes the
+ 	  Pentium 4, Pentium D, P4-based Celeron and Xeon, and
+@@ -147,9 +148,8 @@ config MPENTIUM4
+ 		-Paxville
+ 		-Dempsey
+ 
+-
+ config MK6
+-	bool "K6/K6-II/K6-III"
++	bool "AMD K6/K6-II/K6-III"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD K6-family processor.  Enables use of
+@@ -157,7 +157,7 @@ config MK6
+ 	  flags to GCC.
+ 
+ config MK7
+-	bool "Athlon/Duron/K7"
++	bool "AMD Athlon/Duron/K7"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD Athlon K7-family processor.  Enables use of
+@@ -165,12 +165,83 @@ config MK7
+ 	  flags to GCC.
+ 
+ config MK8
+-	bool "Opteron/Athlon64/Hammer/K8"
++	bool "AMD Opteron/Athlon64/Hammer/K8"
+ 	---help---
+ 	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
+ 	  Enables use of some extended instructions, and passes appropriate
+ 	  optimization flags to GCC.
+ 
++config MK8SSE3
++	bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
++	---help---
++	  Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MK10
++	bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
++	---help---
++	  Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
++		Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MBARCELONA
++	bool "AMD Barcelona"
++	---help---
++	  Select this for AMD Family 10h Barcelona processors.
++
++	  Enables -march=barcelona
++
++config MBOBCAT
++	bool "AMD Bobcat"
++	---help---
++	  Select this for AMD Family 14h Bobcat processors.
++
++	  Enables -march=btver1
++
++config MJAGUAR
++	bool "AMD Jaguar"
++	---help---
++	  Select this for AMD Family 16h Jaguar processors.
++
++	  Enables -march=btver2
++
++config MBULLDOZER
++	bool "AMD Bulldozer"
++	---help---
++	  Select this for AMD Family 15h Bulldozer processors.
++
++	  Enables -march=bdver1
++
++config MPILEDRIVER
++	bool "AMD Piledriver"
++	---help---
++	  Select this for AMD Family 15h Piledriver processors.
++
++	  Enables -march=bdver2
++
++config MSTEAMROLLER
++	bool "AMD Steamroller"
++	---help---
++	  Select this for AMD Family 15h Steamroller processors.
++
++	  Enables -march=bdver3
++
++config MEXCAVATOR
++	bool "AMD Excavator"
++	---help---
++	  Select this for AMD Family 15h Excavator processors.
++
++	  Enables -march=bdver4
++
++config MZEN
++	bool "AMD Zen"
++	---help---
++	  Select this for AMD Family 17h Zen processors.
++
++	  Enables -march=znver1
++
+ config MCRUSOE
+ 	bool "Crusoe"
+ 	depends on X86_32
+@@ -252,6 +323,7 @@ config MVIAC7
+ 
+ config MPSC
+ 	bool "Intel P4 / older Netburst based Xeon"
++	select X86_P6_NOP
+ 	depends on X86_64
+ 	---help---
+ 	  Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
+@@ -261,8 +333,19 @@ config MPSC
+ 	  using the cpu family field
+ 	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
+ 
++config MATOM
++	bool "Intel Atom"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for the Intel Atom platform. Intel Atom CPUs have an
++	  in-order pipelining architecture and thus can benefit from
++	  accordingly optimized code. Use a recent GCC with specific Atom
++	  support in order to fully benefit from selecting this option.
++
+ config MCORE2
+-	bool "Core 2/newer Xeon"
++	bool "Intel Core 2"
++	select X86_P6_NOP
+ 	---help---
+ 
+ 	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
+@@ -270,14 +353,79 @@ config MCORE2
+ 	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
+ 	  (not a typo)
+ 
+-config MATOM
+-	bool "Intel Atom"
++	  Enables -march=core2
++
++config MNEHALEM
++	bool "Intel Nehalem"
++	select X86_P6_NOP
+ 	---help---
+ 
+-	  Select this for the Intel Atom platform. Intel Atom CPUs have an
+-	  in-order pipelining architecture and thus can benefit from
+-	  accordingly optimized code. Use a recent GCC with specific Atom
+-	  support in order to fully benefit from selecting this option.
++	  Select this for 1st Gen Core processors in the Nehalem family.
++
++	  Enables -march=nehalem
++
++config MWESTMERE
++	bool "Intel Westmere"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for the Intel Westmere formerly Nehalem-C family.
++
++	  Enables -march=westmere
++
++config MSILVERMONT
++	bool "Intel Silvermont"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for the Intel Silvermont platform.
++
++	  Enables -march=silvermont
++
++config MSANDYBRIDGE
++	bool "Intel Sandy Bridge"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for 2nd Gen Core processors in the Sandy Bridge family.
++
++	  Enables -march=sandybridge
++
++config MIVYBRIDGE
++	bool "Intel Ivy Bridge"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for 3rd Gen Core processors in the Ivy Bridge family.
++
++	  Enables -march=ivybridge
++
++config MHASWELL
++	bool "Intel Haswell"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for 4th Gen Core processors in the Haswell family.
++
++	  Enables -march=haswell
++
++config MBROADWELL
++	bool "Intel Broadwell"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for 5th Gen Core processors in the Broadwell family.
++
++	  Enables -march=broadwell
++
++config MSKYLAKE
++	bool "Intel Skylake"
++	select X86_P6_NOP
++	---help---
++
++	  Select this for 6th Gen Core processors in the Skylake family.
++
++	  Enables -march=skylake
+ 
+ config GENERIC_CPU
+ 	bool "Generic-x86-64"
+@@ -286,6 +434,19 @@ config GENERIC_CPU
+ 	  Generic x86-64 CPU.
+ 	  Run equally well on all x86-64 CPUs.
+ 
++config MNATIVE
++ bool "Native optimizations autodetected by GCC"
++ ---help---
++
++   GCC 4.2 and above support -march=native, which automatically detects
++   the optimum settings to use based on your processor. -march=native 
++   also detects and applies additional settings beyond -march specific
++   to your CPU, (eg. -msse4). Unless you have a specific reason not to
++   (e.g. distcc cross-compiling), you should probably be using
++   -march=native rather than anything listed below.
++
++   Enables -march=native
++
+ endchoice
+ 
+ config X86_GENERIC
+@@ -310,7 +471,7 @@ config X86_INTERNODE_CACHE_SHIFT
+ config X86_L1_CACHE_SHIFT
+ 	int
+ 	default "7" if MPENTIUM4 || MPSC
+-	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
+ 	default "4" if MELAN || M486 || MGEODEGX1
+ 	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
+ 
+@@ -341,45 +502,46 @@ config X86_ALIGNMENT_16
+ 
+ config X86_INTEL_USERCOPY
+ 	def_bool y
+-	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE
+ 
+ config X86_USE_PPRO_CHECKSUM
+ 	def_bool y
+-	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MATOM || MNATIVE
+ 
+ config X86_USE_3DNOW
+ 	def_bool y
+ 	depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
+ 
+-#
+-# P6_NOPs are a relatively minor optimization that require a family >=
+-# 6 processor, except that it is broken on certain VIA chips.
+-# Furthermore, AMD chips prefer a totally different sequence of NOPs
+-# (which work on all CPUs).  In addition, it looks like Virtual PC
+-# does not understand them.
+-#
+-# As a result, disallow these if we're not compiling for X86_64 (these
+-# NOPs do work on all x86-64 capable chips); the list of processors in
+-# the right-hand clause are the cores that benefit from this optimization.
+-#
+ config X86_P6_NOP
+-	def_bool y
+-	depends on X86_64
+-	depends on (MCORE2 || MPENTIUM4 || MPSC)
++	default n
++	bool "Support for P6_NOPs on Intel chips"
++	depends on (MCORE2 || MPENTIUM4 || MPSC || MATOM || MNEHALEM || MWESTMERE || MSILVERMONT  || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE)
++	---help---
++	P6_NOPs are a relatively minor optimization that require a family >=
++	6 processor, except that it is broken on certain VIA chips.
++	Furthermore, AMD chips prefer a totally different sequence of NOPs
++	(which work on all CPUs).  In addition, it looks like Virtual PC
++	does not understand them.
++
++	As a result, disallow these if we're not compiling for X86_64 (these
++	NOPs do work on all x86-64 capable chips); the list of processors in
++	the right-hand clause are the cores that benefit from this optimization.
++
++	Say Y if you have Intel CPU newer than Pentium Pro, N otherwise.
+ 
+ config X86_TSC
+ 	def_bool y
+-	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE || MATOM) || X86_64
+ 
+ config X86_CMPXCHG64
+ 	def_bool y
+-	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
++	depends on X86_PAE || X86_64 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
+ 
+ # this should be set for all -march=.. options where the compiler
+ # generates cmov.
+ config X86_CMOV
+ 	def_bool y
+-	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++	depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
+ 
+ config X86_MINIMUM_CPU_FAMILY
+ 	int
+--- a/arch/x86/Makefile	2017-08-02 11:41:47.443200463 -0400
++++ b/arch/x86/Makefile	2017-08-02 12:14:46.373727353 -0400
+@@ -121,13 +121,40 @@ else
+ 	KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
+ 
+         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
++        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+         cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
++        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
++        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
++        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
++        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
++        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
++        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
++        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
++        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
++        cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
++        cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
+         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+ 
+         cflags-$(CONFIG_MCORE2) += \
+-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
+-	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+-		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++                $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
++        cflags-$(CONFIG_MNEHALEM) += \
++                $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
++        cflags-$(CONFIG_MWESTMERE) += \
++                $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
++        cflags-$(CONFIG_MSILVERMONT) += \
++                $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
++        cflags-$(CONFIG_MSANDYBRIDGE) += \
++                $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
++        cflags-$(CONFIG_MIVYBRIDGE) += \
++                $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
++        cflags-$(CONFIG_MHASWELL) += \
++                $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
++        cflags-$(CONFIG_MBROADWELL) += \
++                $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
++        cflags-$(CONFIG_MSKYLAKE) += \
++                $(call cc-option,-march=skylake,$(call cc-option,-mtune=skylake))
++        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
++                $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
+         cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+         KBUILD_CFLAGS += $(cflags-y)
+ 
+--- a/arch/x86/Makefile_32.cpu	2017-08-02 11:41:47.444200464 -0400
++++ b/arch/x86/Makefile_32.cpu	2017-08-02 12:23:41.636760695 -0400
+@@ -22,7 +22,18 @@ cflags-$(CONFIG_MK6)		+= -march=k6
+ # Please note, that patches that add -march=athlon-xp and friends are pointless.
+ # They make zero difference whatsosever to performance at this time.
+ cflags-$(CONFIG_MK7)		+= -march=athlon
++cflags-$(CONFIG_MNATIVE) 	+= $(call cc-option,-march=native)
+ cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,-march=athlon)
++cflags-$(CONFIG_MK8SSE3) 	+= $(call cc-option,-march=k8-sse3,-march=athlon)
++cflags-$(CONFIG_MK10) 		+= $(call cc-option,-march=amdfam10,-march=athlon)
++cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon)
++cflags-$(CONFIG_MBOBCAT) 	+= $(call cc-option,-march=btver1,-march=athlon)
++cflags-$(CONFIG_MJAGUAR) 	+= $(call cc-option,-march=btver2,-march=athlon)
++cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon)
++cflags-$(CONFIG_MPILEDRIVER) 	+= $(call cc-option,-march=bdver2,-march=athlon)
++cflags-$(CONFIG_MSTEAMROLLER) 	+= $(call cc-option,-march=bdver3,-march=athlon)
++cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4,-march=athlon)
++cflags-$(CONFIG_MZEN) 		+= $(call cc-option,-march=znver1,-march=athlon)
+ cflags-$(CONFIG_MCRUSOE)	+= -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
+ cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+ cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
+@@ -31,9 +42,12 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
+ cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
+ cflags-$(CONFIG_MVIAC7)		+= -march=i686
+ cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
+-cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
+-	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
+-
++cflags-$(CONFIG_MNEHALEM) 	+= -march=i686 $(call tune,nehalem)
++cflags-$(CONFIG_MWESTMERE) 	+= -march=i686 $(call tune,westmere)
++cflags-$(CONFIG_MSILVERMONT) += -march=i686 $(call tune,silvermont)
++cflags-$(CONFIG_MSANDYBRIDGE) += -march=i686 $(call tune,sandybridge)
++cflags-$(CONFIG_MIVYBRIDGE) += -march=i686 $(call tune,ivybridge)
++cflags-$(CONFIG_MHASWELL) 	+= -march=i686 $(call tune,haswell)
+ # AMD Elan support
+ cflags-$(CONFIG_MELAN)		+= -march=i486
+ 


^ permalink raw reply related	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2018-06-26 16:13 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-04-19 10:45 [gentoo-commits] proj/linux-patches:4.16 commit in: / Mike Pagano
  -- strict thread matches above, loose matches on Subject: below --
2018-06-26 16:13 Alice Ferrazzi
2018-06-20 19:44 Mike Pagano
2018-06-16 15:45 Mike Pagano
2018-06-11 21:48 Mike Pagano
2018-06-05 11:23 Mike Pagano
2018-05-30 11:44 Mike Pagano
2018-05-25 15:37 Mike Pagano
2018-05-22 19:13 Mike Pagano
2018-05-20 22:22 Mike Pagano
2018-05-16 10:25 Mike Pagano
2018-05-09 10:57 Mike Pagano
2018-05-02 16:15 Mike Pagano
2018-04-30 10:30 Mike Pagano
2018-04-26 10:22 Mike Pagano
2018-04-24 11:31 Mike Pagano
2018-04-12 12:21 Mike Pagano
2018-04-08 14:33 Mike Pagano
2018-03-09 19:24 Mike Pagano
2018-02-12 20:46 Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox