public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:4.18 commit in: /
Date: Sat, 15 Sep 2018 10:12:54 +0000 (UTC)	[thread overview]
Message-ID: <1537006366.f69bd2c4a51cabfc16f5a44334d1cd82613e7157.mpagano@gentoo> (raw)

commit:     f69bd2c4a51cabfc16f5a44334d1cd82613e7157
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 15 10:12:46 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Sep 15 10:12:46 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f69bd2c4

Linux patch 4.18.8

 0000_README             |    4 +
 1007_linux-4.18.8.patch | 6654 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6658 insertions(+)

diff --git a/0000_README b/0000_README
index f3682ca..597262e 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-4.18.7.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.18.7
 
+Patch:  1007_linux-4.18.8.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.18.8
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1007_linux-4.18.8.patch b/1007_linux-4.18.8.patch
new file mode 100644
index 0000000..8a888c7
--- /dev/null
+++ b/1007_linux-4.18.8.patch
@@ -0,0 +1,6654 @@
+diff --git a/Makefile b/Makefile
+index 711b04d00e49..0d73431f66cd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 18
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Merciless Moray
+ 
+diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig
+index fafd3d7f9f8c..8ca926522026 100644
+--- a/arch/arm/mach-rockchip/Kconfig
++++ b/arch/arm/mach-rockchip/Kconfig
+@@ -17,6 +17,7 @@ config ARCH_ROCKCHIP
+ 	select ARM_GLOBAL_TIMER
+ 	select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
+ 	select ZONE_DMA if ARM_LPAE
++	select PM
+ 	help
+ 	  Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs
+ 	  containing the RK2928, RK30xx and RK31xx series.
+diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
+index d5aeac351fc3..21a715ad8222 100644
+--- a/arch/arm64/Kconfig.platforms
++++ b/arch/arm64/Kconfig.platforms
+@@ -151,6 +151,7 @@ config ARCH_ROCKCHIP
+ 	select GPIOLIB
+ 	select PINCTRL
+ 	select PINCTRL_ROCKCHIP
++	select PM
+ 	select ROCKCHIP_TIMER
+ 	help
+ 	  This enables support for the ARMv8 based Rockchip chipsets,
+diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
+index 16b077801a5f..a4a718dbfec6 100644
+--- a/arch/powerpc/include/asm/topology.h
++++ b/arch/powerpc/include/asm/topology.h
+@@ -92,6 +92,7 @@ extern int stop_topology_update(void);
+ extern int prrn_is_enabled(void);
+ extern int find_and_online_cpu_nid(int cpu);
+ extern int timed_topology_update(int nsecs);
++extern void __init shared_proc_topology_init(void);
+ #else
+ static inline int start_topology_update(void)
+ {
+@@ -113,6 +114,10 @@ static inline int timed_topology_update(int nsecs)
+ {
+ 	return 0;
+ }
++
++#ifdef CONFIG_SMP
++static inline void shared_proc_topology_init(void) {}
++#endif
+ #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
+ 
+ #include <asm-generic/topology.h>
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index 468653ce844c..327f6112fe8e 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -250,10 +250,17 @@ do {								\
+ 	}							\
+ } while (0)
+ 
++/*
++ * This is a type: either unsigned long, if the argument fits into
++ * that type, or otherwise unsigned long long.
++ */
++#define __long_type(x) \
++	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
++
+ #define __get_user_nocheck(x, ptr, size)			\
+ ({								\
+ 	long __gu_err;						\
+-	unsigned long __gu_val;					\
++	__long_type(*(ptr)) __gu_val;				\
+ 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
+ 	__chk_user_ptr(ptr);					\
+ 	if (!is_kernel_addr((unsigned long)__gu_addr))		\
+@@ -267,7 +274,7 @@ do {								\
+ #define __get_user_check(x, ptr, size)					\
+ ({									\
+ 	long __gu_err = -EFAULT;					\
+-	unsigned long  __gu_val = 0;					\
++	__long_type(*(ptr)) __gu_val = 0;				\
+ 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
+ 	might_fault();							\
+ 	if (access_ok(VERIFY_READ, __gu_addr, (size))) {		\
+@@ -281,7 +288,7 @@ do {								\
+ #define __get_user_nosleep(x, ptr, size)			\
+ ({								\
+ 	long __gu_err;						\
+-	unsigned long __gu_val;					\
++	__long_type(*(ptr)) __gu_val;				\
+ 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
+ 	__chk_user_ptr(ptr);					\
+ 	barrier_nospec();					\
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 285c6465324a..f817342aab8f 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1526,6 +1526,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
+ TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ 	SET_SCRATCH0(r13);
+ 	GET_PACA(r13);
++	std	r1,PACA_EXRFI+EX_R12(r13)
++	ld	r1,PACAKSAVE(r13)
+ 	std	r9,PACA_EXRFI+EX_R9(r13)
+ 	std	r10,PACA_EXRFI+EX_R10(r13)
+ 	std	r11,PACA_EXRFI+EX_R11(r13)
+@@ -1560,12 +1562,15 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ 	ld	r9,PACA_EXRFI+EX_R9(r13)
+ 	ld	r10,PACA_EXRFI+EX_R10(r13)
+ 	ld	r11,PACA_EXRFI+EX_R11(r13)
++	ld	r1,PACA_EXRFI+EX_R12(r13)
+ 	GET_SCRATCH0(r13);
+ 	rfid
+ 
+ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
+ 	SET_SCRATCH0(r13);
+ 	GET_PACA(r13);
++	std	r1,PACA_EXRFI+EX_R12(r13)
++	ld	r1,PACAKSAVE(r13)
+ 	std	r9,PACA_EXRFI+EX_R9(r13)
+ 	std	r10,PACA_EXRFI+EX_R10(r13)
+ 	std	r11,PACA_EXRFI+EX_R11(r13)
+@@ -1600,6 +1605,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
+ 	ld	r9,PACA_EXRFI+EX_R9(r13)
+ 	ld	r10,PACA_EXRFI+EX_R10(r13)
+ 	ld	r11,PACA_EXRFI+EX_R11(r13)
++	ld	r1,PACA_EXRFI+EX_R12(r13)
+ 	GET_SCRATCH0(r13);
+ 	hrfid
+ 
+diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
+index 4794d6b4f4d2..b3142c7b9c31 100644
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -1156,6 +1156,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
+ 	if (smp_ops && smp_ops->bringup_done)
+ 		smp_ops->bringup_done();
+ 
++	/*
++	 * On a shared LPAR, associativity needs to be requested.
++	 * Hence, get numa topology before dumping cpu topology
++	 */
++	shared_proc_topology_init();
+ 	dump_numa_cpu_topology();
+ 
+ 	/*
+diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
+index 0c7e05d89244..35ac5422903a 100644
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -1078,7 +1078,6 @@ static int prrn_enabled;
+ static void reset_topology_timer(void);
+ static int topology_timer_secs = 1;
+ static int topology_inited;
+-static int topology_update_needed;
+ 
+ /*
+  * Change polling interval for associativity changes.
+@@ -1306,11 +1305,8 @@ int numa_update_cpu_topology(bool cpus_locked)
+ 	struct device *dev;
+ 	int weight, new_nid, i = 0;
+ 
+-	if (!prrn_enabled && !vphn_enabled) {
+-		if (!topology_inited)
+-			topology_update_needed = 1;
++	if (!prrn_enabled && !vphn_enabled && topology_inited)
+ 		return 0;
+-	}
+ 
+ 	weight = cpumask_weight(&cpu_associativity_changes_mask);
+ 	if (!weight)
+@@ -1423,7 +1419,6 @@ int numa_update_cpu_topology(bool cpus_locked)
+ 
+ out:
+ 	kfree(updates);
+-	topology_update_needed = 0;
+ 	return changed;
+ }
+ 
+@@ -1551,6 +1546,15 @@ int prrn_is_enabled(void)
+ 	return prrn_enabled;
+ }
+ 
++void __init shared_proc_topology_init(void)
++{
++	if (lppaca_shared_proc(get_lppaca())) {
++		bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask),
++			    nr_cpumask_bits);
++		numa_update_cpu_topology(false);
++	}
++}
++
+ static int topology_read(struct seq_file *file, void *v)
+ {
+ 	if (vphn_enabled || prrn_enabled)
+@@ -1608,10 +1612,6 @@ static int topology_update_init(void)
+ 		return -ENOMEM;
+ 
+ 	topology_inited = 1;
+-	if (topology_update_needed)
+-		bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask),
+-					nr_cpumask_bits);
+-
+ 	return 0;
+ }
+ device_initcall(topology_update_init);
+diff --git a/arch/powerpc/platforms/85xx/t1042rdb_diu.c b/arch/powerpc/platforms/85xx/t1042rdb_diu.c
+index 58fa3d319f1c..dac36ba82fea 100644
+--- a/arch/powerpc/platforms/85xx/t1042rdb_diu.c
++++ b/arch/powerpc/platforms/85xx/t1042rdb_diu.c
+@@ -9,8 +9,10 @@
+  * option) any later version.
+  */
+ 
++#include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
++#include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ 
+@@ -150,3 +152,5 @@ static int __init t1042rdb_diu_init(void)
+ }
+ 
+ early_initcall(t1042rdb_diu_init);
++
++MODULE_LICENSE("GPL");
+diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
+index 2edc673be137..99d1152ae224 100644
+--- a/arch/powerpc/platforms/pseries/ras.c
++++ b/arch/powerpc/platforms/pseries/ras.c
+@@ -371,7 +371,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
+ 		int len, error_log_length;
+ 
+ 		error_log_length = 8 + rtas_error_extended_log_length(h);
+-		len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
++		len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
+ 		memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
+ 		memcpy(global_mce_data_buf, h, len);
+ 		errhdr = (struct rtas_error_log *)global_mce_data_buf;
+diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
+index eb69a5186243..280e964e1aa8 100644
+--- a/arch/powerpc/sysdev/mpic_msgr.c
++++ b/arch/powerpc/sysdev/mpic_msgr.c
+@@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
+ 
+ 	/* IO map the message register block. */
+ 	of_address_to_resource(np, 0, &rsrc);
+-	msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
++	msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
+ 	if (!msgr_block_addr) {
+ 		dev_err(&dev->dev, "Failed to iomap MPIC message registers");
+ 		return -EFAULT;
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
+index f6561b783b61..eed1c137f618 100644
+--- a/arch/riscv/kernel/vdso/Makefile
++++ b/arch/riscv/kernel/vdso/Makefile
+@@ -52,8 +52,8 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
+ # Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
+ # Make sure only to export the intended __vdso_xxx symbol offsets.
+ quiet_cmd_vdsold = VDSOLD  $@
+-      cmd_vdsold = $(CC) $(KCFLAGS) $(call cc-option, -no-pie) -nostdlib $(SYSCFLAGS_$(@F)) \
+-                           -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
++      cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
++                           -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
+                    $(CROSS_COMPILE)objcopy \
+                            $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
+ 
+diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
+index 9f5ea9d87069..9b0216d571ad 100644
+--- a/arch/s390/kernel/crash_dump.c
++++ b/arch/s390/kernel/crash_dump.c
+@@ -404,11 +404,13 @@ static void *get_vmcoreinfo_old(unsigned long *size)
+ 	if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
+ 			       sizeof(nt_name) - 1))
+ 		return NULL;
+-	if (strcmp(nt_name, "VMCOREINFO") != 0)
++	if (strcmp(nt_name, VMCOREINFO_NOTE_NAME) != 0)
+ 		return NULL;
+ 	vmcoreinfo = kzalloc_panic(note.n_descsz);
+-	if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz))
++	if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) {
++		kfree(vmcoreinfo);
+ 		return NULL;
++	}
+ 	*size = note.n_descsz;
+ 	return vmcoreinfo;
+ }
+@@ -418,15 +420,20 @@ static void *get_vmcoreinfo_old(unsigned long *size)
+  */
+ static void *nt_vmcoreinfo(void *ptr)
+ {
++	const char *name = VMCOREINFO_NOTE_NAME;
+ 	unsigned long size;
+ 	void *vmcoreinfo;
+ 
+ 	vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
+-	if (!vmcoreinfo)
+-		vmcoreinfo = get_vmcoreinfo_old(&size);
++	if (vmcoreinfo)
++		return nt_init_name(ptr, 0, vmcoreinfo, size, name);
++
++	vmcoreinfo = get_vmcoreinfo_old(&size);
+ 	if (!vmcoreinfo)
+ 		return ptr;
+-	return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
++	ptr = nt_init_name(ptr, 0, vmcoreinfo, size, name);
++	kfree(vmcoreinfo);
++	return ptr;
+ }
+ 
+ /*
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index e54dda8a0363..de340e41f3b2 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -122,8 +122,7 @@ archheaders:
+ 	$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
+ 	            kbuild-file=$(HOST_DIR)/include/uapi/asm/Kbuild \
+ 		    obj=$(HOST_DIR)/include/generated/uapi/asm
+-	$(Q)$(MAKE) KBUILD_SRC= ARCH=$(HEADER_ARCH) archheaders
+-
++	$(Q)$(MAKE) -f $(srctree)/Makefile ARCH=$(HEADER_ARCH) archheaders
+ 
+ archprepare: include/generated/user_constants.h
+ 
+diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
+index 8c7b3e5a2d01..3a17107594c8 100644
+--- a/arch/x86/include/asm/mce.h
++++ b/arch/x86/include/asm/mce.h
+@@ -148,6 +148,7 @@ enum mce_notifier_prios {
+ 	MCE_PRIO_LOWEST		= 0,
+ };
+ 
++struct notifier_block;
+ extern void mce_register_decode_chain(struct notifier_block *nb);
+ extern void mce_unregister_decode_chain(struct notifier_block *nb);
+ 
+diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
+index bb035a4cbc8c..9eeb1359ec75 100644
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -2,6 +2,8 @@
+ #ifndef _ASM_X86_PGTABLE_3LEVEL_H
+ #define _ASM_X86_PGTABLE_3LEVEL_H
+ 
++#include <asm/atomic64_32.h>
++
+ /*
+  * Intel Physical Address Extension (PAE) Mode - three-level page
+  * tables on PPro+ CPUs.
+@@ -147,10 +149,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
+ {
+ 	pte_t res;
+ 
+-	/* xchg acts as a barrier before the setting of the high bits */
+-	res.pte_low = xchg(&ptep->pte_low, 0);
+-	res.pte_high = ptep->pte_high;
+-	ptep->pte_high = 0;
++	res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
+ 
+ 	return res;
+ }
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index 74392d9d51e0..a10481656d82 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -1343,7 +1343,7 @@ device_initcall(init_tsc_clocksource);
+ 
+ void __init tsc_early_delay_calibrate(void)
+ {
+-	unsigned long lpj;
++	u64 lpj;
+ 
+ 	if (!boot_cpu_has(X86_FEATURE_TSC))
+ 		return;
+@@ -1355,7 +1355,7 @@ void __init tsc_early_delay_calibrate(void)
+ 	if (!tsc_khz)
+ 		return;
+ 
+-	lpj = tsc_khz * 1000;
++	lpj = (u64)tsc_khz * 1000;
+ 	do_div(lpj, HZ);
+ 	loops_per_jiffy = lpj;
+ }
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index a44e568363a4..42f1ba92622a 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -221,6 +221,17 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
+ 						    PT64_EPT_EXECUTABLE_MASK;
+ static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
+ 
++/*
++ * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
++ * to guard against L1TF attacks.
++ */
++static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
++
++/*
++ * The number of high-order 1 bits to use in the mask above.
++ */
++static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
++
+ static void mmu_spte_set(u64 *sptep, u64 spte);
+ 
+ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
+@@ -308,9 +319,13 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
+ {
+ 	unsigned int gen = kvm_current_mmio_generation(vcpu);
+ 	u64 mask = generation_mmio_spte_mask(gen);
++	u64 gpa = gfn << PAGE_SHIFT;
+ 
+ 	access &= ACC_WRITE_MASK | ACC_USER_MASK;
+-	mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
++	mask |= shadow_mmio_value | access;
++	mask |= gpa | shadow_nonpresent_or_rsvd_mask;
++	mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
++		<< shadow_nonpresent_or_rsvd_mask_len;
+ 
+ 	trace_mark_mmio_spte(sptep, gfn, access, gen);
+ 	mmu_spte_set(sptep, mask);
+@@ -323,8 +338,14 @@ static bool is_mmio_spte(u64 spte)
+ 
+ static gfn_t get_mmio_spte_gfn(u64 spte)
+ {
+-	u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
+-	return (spte & ~mask) >> PAGE_SHIFT;
++	u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
++		   shadow_nonpresent_or_rsvd_mask;
++	u64 gpa = spte & ~mask;
++
++	gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
++	       & shadow_nonpresent_or_rsvd_mask;
++
++	return gpa >> PAGE_SHIFT;
+ }
+ 
+ static unsigned get_mmio_spte_access(u64 spte)
+@@ -381,7 +402,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
+ }
+ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
+ 
+-static void kvm_mmu_clear_all_pte_masks(void)
++static void kvm_mmu_reset_all_pte_masks(void)
+ {
+ 	shadow_user_mask = 0;
+ 	shadow_accessed_mask = 0;
+@@ -391,6 +412,18 @@ static void kvm_mmu_clear_all_pte_masks(void)
+ 	shadow_mmio_mask = 0;
+ 	shadow_present_mask = 0;
+ 	shadow_acc_track_mask = 0;
++
++	/*
++	 * If the CPU has 46 or less physical address bits, then set an
++	 * appropriate mask to guard against L1TF attacks. Otherwise, it is
++	 * assumed that the CPU is not vulnerable to L1TF.
++	 */
++	if (boot_cpu_data.x86_phys_bits <
++	    52 - shadow_nonpresent_or_rsvd_mask_len)
++		shadow_nonpresent_or_rsvd_mask =
++			rsvd_bits(boot_cpu_data.x86_phys_bits -
++				  shadow_nonpresent_or_rsvd_mask_len,
++				  boot_cpu_data.x86_phys_bits - 1);
+ }
+ 
+ static int is_cpuid_PSE36(void)
+@@ -5500,7 +5533,7 @@ int kvm_mmu_module_init(void)
+ {
+ 	int ret = -ENOMEM;
+ 
+-	kvm_mmu_clear_all_pte_masks();
++	kvm_mmu_reset_all_pte_masks();
+ 
+ 	pte_list_desc_cache = kmem_cache_create("pte_list_desc",
+ 					    sizeof(struct pte_list_desc),
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index bedabcf33a3e..9869bfd0c601 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -939,17 +939,21 @@ struct vcpu_vmx {
+ 	/*
+ 	 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
+ 	 * non-nested (L1) guest, it always points to vmcs01. For a nested
+-	 * guest (L2), it points to a different VMCS.
++	 * guest (L2), it points to a different VMCS.  loaded_cpu_state points
++	 * to the VMCS whose state is loaded into the CPU registers that only
++	 * need to be switched when transitioning to/from the kernel; a NULL
++	 * value indicates that host state is loaded.
+ 	 */
+ 	struct loaded_vmcs    vmcs01;
+ 	struct loaded_vmcs   *loaded_vmcs;
++	struct loaded_vmcs   *loaded_cpu_state;
+ 	bool                  __launched; /* temporary, used in vmx_vcpu_run */
+ 	struct msr_autoload {
+ 		struct vmx_msrs guest;
+ 		struct vmx_msrs host;
+ 	} msr_autoload;
++
+ 	struct {
+-		int           loaded;
+ 		u16           fs_sel, gs_sel, ldt_sel;
+ #ifdef CONFIG_X86_64
+ 		u16           ds_sel, es_sel;
+@@ -2750,10 +2754,11 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
+ #endif
+ 	int i;
+ 
+-	if (vmx->host_state.loaded)
++	if (vmx->loaded_cpu_state)
+ 		return;
+ 
+-	vmx->host_state.loaded = 1;
++	vmx->loaded_cpu_state = vmx->loaded_vmcs;
++
+ 	/*
+ 	 * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
+ 	 * allow segment selectors with cpl > 0 or ti == 1.
+@@ -2815,11 +2820,14 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
+ 
+ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
+ {
+-	if (!vmx->host_state.loaded)
++	if (!vmx->loaded_cpu_state)
+ 		return;
+ 
++	WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs);
++
+ 	++vmx->vcpu.stat.host_state_reload;
+-	vmx->host_state.loaded = 0;
++	vmx->loaded_cpu_state = NULL;
++
+ #ifdef CONFIG_X86_64
+ 	if (is_long_mode(&vmx->vcpu))
+ 		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+@@ -8115,7 +8123,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
+ 
+ 	/* CPL=0 must be checked manually. */
+ 	if (vmx_get_cpl(vcpu)) {
+-		kvm_queue_exception(vcpu, UD_VECTOR);
++		kvm_inject_gp(vcpu, 0);
+ 		return 1;
+ 	}
+ 
+@@ -8179,7 +8187,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
+ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
+ {
+ 	if (vmx_get_cpl(vcpu)) {
+-		kvm_queue_exception(vcpu, UD_VECTOR);
++		kvm_inject_gp(vcpu, 0);
+ 		return 0;
+ 	}
+ 
+@@ -10517,8 +10525,8 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
+ 		return;
+ 
+ 	cpu = get_cpu();
+-	vmx->loaded_vmcs = vmcs;
+ 	vmx_vcpu_put(vcpu);
++	vmx->loaded_vmcs = vmcs;
+ 	vmx_vcpu_load(vcpu, cpu);
+ 	put_cpu();
+ }
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 24c84aa87049..94cd63081471 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6506,20 +6506,22 @@ static void kvm_set_mmio_spte_mask(void)
+ 	 * Set the reserved bits and the present bit of an paging-structure
+ 	 * entry to generate page fault with PFER.RSV = 1.
+ 	 */
+-	 /* Mask the reserved physical address bits. */
+-	mask = rsvd_bits(maxphyaddr, 51);
++
++	/*
++	 * Mask the uppermost physical address bit, which would be reserved as
++	 * long as the supported physical address width is less than 52.
++	 */
++	mask = 1ull << 51;
+ 
+ 	/* Set the present bit. */
+ 	mask |= 1ull;
+ 
+-#ifdef CONFIG_X86_64
+ 	/*
+ 	 * If reserved bit is not supported, clear the present bit to disable
+ 	 * mmio page fault.
+ 	 */
+-	if (maxphyaddr == 52)
++	if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52)
+ 		mask &= ~1ull;
+-#endif
+ 
+ 	kvm_mmu_set_mmio_spte_mask(mask, mask);
+ }
+diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
+index 2c30cabfda90..071d82ec9abb 100644
+--- a/arch/x86/xen/mmu_pv.c
++++ b/arch/x86/xen/mmu_pv.c
+@@ -434,14 +434,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
+ static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
+ {
+ 	trace_xen_mmu_set_pte_atomic(ptep, pte);
+-	set_64bit((u64 *)ptep, native_pte_val(pte));
++	__xen_set_pte(ptep, pte);
+ }
+ 
+ static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+ 	trace_xen_mmu_pte_clear(mm, addr, ptep);
+-	if (!xen_batched_set_pte(ptep, native_make_pte(0)))
+-		native_pte_clear(mm, addr, ptep);
++	__xen_set_pte(ptep, native_make_pte(0));
+ }
+ 
+ static void xen_pmd_clear(pmd_t *pmdp)
+@@ -1571,7 +1570,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
+ 		pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+ 			       pte_val_ma(pte));
+ #endif
+-	native_set_pte(ptep, pte);
++	__xen_set_pte(ptep, pte);
+ }
+ 
+ /* Early in boot, while setting up the initial pagetable, assume
+diff --git a/block/bio.c b/block/bio.c
+index 047c5dca6d90..ff94640bc734 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -156,7 +156,7 @@ out:
+ 
+ unsigned int bvec_nr_vecs(unsigned short idx)
+ {
+-	return bvec_slabs[idx].nr_vecs;
++	return bvec_slabs[--idx].nr_vecs;
+ }
+ 
+ void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 1646ea85dade..746a5eac4541 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -2159,7 +2159,9 @@ static inline bool should_fail_request(struct hd_struct *part,
+ 
+ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
+ {
+-	if (part->policy && op_is_write(bio_op(bio))) {
++	const int op = bio_op(bio);
++
++	if (part->policy && (op_is_write(op) && !op_is_flush(op))) {
+ 		char b[BDEVNAME_SIZE];
+ 
+ 		WARN_ONCE(1,
+diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
+index 3de0836163c2..d5f2c21d8531 100644
+--- a/block/blk-mq-tag.c
++++ b/block/blk-mq-tag.c
+@@ -23,6 +23,9 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
+ 
+ /*
+  * If a previously inactive queue goes active, bump the active user count.
++ * We need to do this before try to allocate driver tag, then even if fail
++ * to get tag when first time, the other shared-tag users could reserve
++ * budget for it.
+  */
+ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
+ {
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 654b0dc7e001..2f9e14361673 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -285,7 +285,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
+ 		rq->tag = -1;
+ 		rq->internal_tag = tag;
+ 	} else {
+-		if (blk_mq_tag_busy(data->hctx)) {
++		if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
+ 			rq_flags = RQF_MQ_INFLIGHT;
+ 			atomic_inc(&data->hctx->nr_active);
+ 		}
+@@ -367,6 +367,8 @@ static struct request *blk_mq_get_request(struct request_queue *q,
+ 		if (!op_is_flush(op) && e->type->ops.mq.limit_depth &&
+ 		    !(data->flags & BLK_MQ_REQ_RESERVED))
+ 			e->type->ops.mq.limit_depth(op, data);
++	} else {
++		blk_mq_tag_busy(data->hctx);
+ 	}
+ 
+ 	tag = blk_mq_get_tag(data);
+@@ -970,6 +972,7 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
+ 		.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
+ 		.flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
+ 	};
++	bool shared;
+ 
+ 	might_sleep_if(wait);
+ 
+@@ -979,9 +982,10 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
+ 	if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
+ 		data.flags |= BLK_MQ_REQ_RESERVED;
+ 
++	shared = blk_mq_tag_busy(data.hctx);
+ 	rq->tag = blk_mq_get_tag(&data);
+ 	if (rq->tag >= 0) {
+-		if (blk_mq_tag_busy(data.hctx)) {
++		if (shared) {
+ 			rq->rq_flags |= RQF_MQ_INFLIGHT;
+ 			atomic_inc(&data.hctx->nr_active);
+ 		}
+diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
+index 82b6c27b3245..f6f180f3aa1c 100644
+--- a/block/cfq-iosched.c
++++ b/block/cfq-iosched.c
+@@ -4735,12 +4735,13 @@ USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
+ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
+ {									\
+ 	struct cfq_data *cfqd = e->elevator_data;			\
+-	unsigned int __data;						\
++	unsigned int __data, __min = (MIN), __max = (MAX);		\
++									\
+ 	cfq_var_store(&__data, (page));					\
+-	if (__data < (MIN))						\
+-		__data = (MIN);						\
+-	else if (__data > (MAX))					\
+-		__data = (MAX);						\
++	if (__data < __min)						\
++		__data = __min;						\
++	else if (__data > __max)					\
++		__data = __max;						\
+ 	if (__CONV)							\
+ 		*(__PTR) = (u64)__data * NSEC_PER_MSEC;			\
+ 	else								\
+@@ -4769,12 +4770,13 @@ STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX,
+ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)	\
+ {									\
+ 	struct cfq_data *cfqd = e->elevator_data;			\
+-	unsigned int __data;						\
++	unsigned int __data, __min = (MIN), __max = (MAX);		\
++									\
+ 	cfq_var_store(&__data, (page));					\
+-	if (__data < (MIN))						\
+-		__data = (MIN);						\
+-	else if (__data > (MAX))					\
+-		__data = (MAX);						\
++	if (__data < __min)						\
++		__data = __min;						\
++	else if (__data > __max)					\
++		__data = __max;						\
+ 	*(__PTR) = (u64)__data * NSEC_PER_USEC;				\
+ 	return count;							\
+ }
+diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
+index 3de794bcf8fa..69603ba52a3a 100644
+--- a/drivers/acpi/acpica/hwregs.c
++++ b/drivers/acpi/acpica/hwregs.c
+@@ -528,13 +528,18 @@ acpi_status acpi_hw_register_read(u32 register_id, u32 *return_value)
+ 
+ 		status =
+ 		    acpi_hw_read(&value64, &acpi_gbl_FADT.xpm2_control_block);
+-		value = (u32)value64;
++		if (ACPI_SUCCESS(status)) {
++			value = (u32)value64;
++		}
+ 		break;
+ 
+ 	case ACPI_REGISTER_PM_TIMER:	/* 32-bit access */
+ 
+ 		status = acpi_hw_read(&value64, &acpi_gbl_FADT.xpm_timer_block);
+-		value = (u32)value64;
++		if (ACPI_SUCCESS(status)) {
++			value = (u32)value64;
++		}
++
+ 		break;
+ 
+ 	case ACPI_REGISTER_SMI_COMMAND_BLOCK:	/* 8-bit access */
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 970dd87d347c..6799d00dd790 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -1612,7 +1612,8 @@ static int acpi_add_single_object(struct acpi_device **child,
+ 	 * Note this must be done before the get power-/wakeup_dev-flags calls.
+ 	 */
+ 	if (type == ACPI_BUS_TYPE_DEVICE)
+-		acpi_bus_get_status(device);
++		if (acpi_bus_get_status(device) < 0)
++			acpi_set_device_status(device, 0);
+ 
+ 	acpi_bus_get_power_flags(device);
+ 	acpi_bus_get_wakeup_device_flags(device);
+@@ -1690,7 +1691,7 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
+ 		 * acpi_add_single_object updates this once we've an acpi_device
+ 		 * so that acpi_bus_get_status' quirk handling can be used.
+ 		 */
+-		*sta = 0;
++		*sta = ACPI_STA_DEFAULT;
+ 		break;
+ 	case ACPI_TYPE_PROCESSOR:
+ 		*type = ACPI_BUS_TYPE_PROCESSOR;
+diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
+index 2a8634a52856..5a628148f3f0 100644
+--- a/drivers/clk/rockchip/clk-rk3399.c
++++ b/drivers/clk/rockchip/clk-rk3399.c
+@@ -1523,6 +1523,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
+ 	"pclk_pmu_src",
+ 	"fclk_cm0s_src_pmu",
+ 	"clk_timer_src_pmu",
++	"pclk_rkpwm_pmu",
+ };
+ 
+ static void __init rk3399_clk_init(struct device_node *np)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 7dcbac8af9a7..b60aa7d43cb7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1579,9 +1579,9 @@ struct amdgpu_device {
+ 	DECLARE_HASHTABLE(mn_hash, 7);
+ 
+ 	/* tracking pinned memory */
+-	u64 vram_pin_size;
+-	u64 invisible_pin_size;
+-	u64 gart_pin_size;
++	atomic64_t vram_pin_size;
++	atomic64_t visible_pin_size;
++	atomic64_t gart_pin_size;
+ 
+ 	/* amdkfd interface */
+ 	struct kfd_dev          *kfd;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 9c85a90be293..5a196ec49be8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -257,7 +257,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
+ 		return;
+ 	}
+ 
+-	total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
++	total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
+ 	used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ 	free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 91517b166a3b..063f9aa96946 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -494,13 +494,13 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ 	case AMDGPU_INFO_VRAM_GTT: {
+ 		struct drm_amdgpu_info_vram_gtt vram_gtt;
+ 
+-		vram_gtt.vram_size = adev->gmc.real_vram_size;
+-		vram_gtt.vram_size -= adev->vram_pin_size;
+-		vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
+-		vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
++		vram_gtt.vram_size = adev->gmc.real_vram_size -
++			atomic64_read(&adev->vram_pin_size);
++		vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
++			atomic64_read(&adev->visible_pin_size);
+ 		vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
+ 		vram_gtt.gtt_size *= PAGE_SIZE;
+-		vram_gtt.gtt_size -= adev->gart_pin_size;
++		vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
+ 		return copy_to_user(out, &vram_gtt,
+ 				    min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
+ 	}
+@@ -509,17 +509,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ 
+ 		memset(&mem, 0, sizeof(mem));
+ 		mem.vram.total_heap_size = adev->gmc.real_vram_size;
+-		mem.vram.usable_heap_size =
+-			adev->gmc.real_vram_size - adev->vram_pin_size;
++		mem.vram.usable_heap_size = adev->gmc.real_vram_size -
++			atomic64_read(&adev->vram_pin_size);
+ 		mem.vram.heap_usage =
+ 			amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ 		mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
+ 
+ 		mem.cpu_accessible_vram.total_heap_size =
+ 			adev->gmc.visible_vram_size;
+-		mem.cpu_accessible_vram.usable_heap_size =
+-			adev->gmc.visible_vram_size -
+-			(adev->vram_pin_size - adev->invisible_pin_size);
++		mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
++			atomic64_read(&adev->visible_pin_size);
+ 		mem.cpu_accessible_vram.heap_usage =
+ 			amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
+ 		mem.cpu_accessible_vram.max_allocation =
+@@ -527,8 +526,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
+ 
+ 		mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
+ 		mem.gtt.total_heap_size *= PAGE_SIZE;
+-		mem.gtt.usable_heap_size = mem.gtt.total_heap_size
+-			- adev->gart_pin_size;
++		mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
++			atomic64_read(&adev->gart_pin_size);
+ 		mem.gtt.heap_usage =
+ 			amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
+ 		mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 3526efa8960e..3873c3353020 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -50,11 +50,35 @@ static bool amdgpu_need_backup(struct amdgpu_device *adev)
+ 	return true;
+ }
+ 
++/**
++ * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
++ *
++ * @bo: &amdgpu_bo buffer object
++ *
++ * This function is called when a BO stops being pinned, and updates the
++ * &amdgpu_device pin_size values accordingly.
++ */
++static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
++{
++	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
++
++	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
++		atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
++		atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
++			     &adev->visible_pin_size);
++	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
++		atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
++	}
++}
++
+ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+ {
+ 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
+ 	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ 
++	if (bo->pin_count > 0)
++		amdgpu_bo_subtract_pin_size(bo);
++
+ 	if (bo->kfd_bo)
+ 		amdgpu_amdkfd_unreserve_system_memory_limit(bo);
+ 
+@@ -761,10 +785,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
+ 
+ 	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ 	if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+-		adev->vram_pin_size += amdgpu_bo_size(bo);
+-		adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
++		atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
++		atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
++			     &adev->visible_pin_size);
+ 	} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
+-		adev->gart_pin_size += amdgpu_bo_size(bo);
++		atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
+ 	}
+ 
+ error:
+@@ -790,12 +815,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+ 	if (bo->pin_count)
+ 		return 0;
+ 
+-	if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+-		adev->vram_pin_size -= amdgpu_bo_size(bo);
+-		adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
+-	} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+-		adev->gart_pin_size -= amdgpu_bo_size(bo);
+-	}
++	amdgpu_bo_subtract_pin_size(bo);
+ 
+ 	for (i = 0; i < bo->placement.num_placement; i++) {
+ 		bo->placements[i].lpfn = 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index a44c3d58fef4..2ec20348b983 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -1157,7 +1157,7 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
+ 	int r, size = sizeof(vddnb);
+ 
+ 	/* only APUs have vddnb */
+-	if  (adev->flags & AMD_IS_APU)
++	if  (!(adev->flags & AMD_IS_APU))
+ 		return -EINVAL;
+ 
+ 	/* Can't get voltage when the card is off */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index 9f1a5bd39ae8..5b39d1399630 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -131,6 +131,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
+ 		msleep(1);
+ 	}
+ 
++	if (ucode) {
++		ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
++		ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
++	}
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+index 86a0715d9431..1cafe8d83a4d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+@@ -53,9 +53,8 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
+ 						  int fd,
+ 						  enum drm_sched_priority priority)
+ {
+-	struct file *filp = fcheck(fd);
++	struct file *filp = fget(fd);
+ 	struct drm_file *file;
+-	struct pid *pid;
+ 	struct amdgpu_fpriv *fpriv;
+ 	struct amdgpu_ctx *ctx;
+ 	uint32_t id;
+@@ -63,20 +62,12 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
+ 	if (!filp)
+ 		return -EINVAL;
+ 
+-	pid = get_pid(((struct drm_file *)filp->private_data)->pid);
++	file = filp->private_data;
++	fpriv = file->driver_priv;
++	idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
++		amdgpu_ctx_priority_override(ctx, priority);
+ 
+-	mutex_lock(&adev->ddev->filelist_mutex);
+-	list_for_each_entry(file, &adev->ddev->filelist, lhead) {
+-		if (file->pid != pid)
+-			continue;
+-
+-		fpriv = file->driver_priv;
+-		idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
+-				amdgpu_ctx_priority_override(ctx, priority);
+-	}
+-	mutex_unlock(&adev->ddev->filelist_mutex);
+-
+-	put_pid(pid);
++	fput(filp);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+index e5da4654b630..8b3cc6687769 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+@@ -73,7 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
+ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
+ int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
+ 
+-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
++u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
+ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
+ uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+index 08e38579af24..bdc472b6e641 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+@@ -194,6 +194,7 @@ enum AMDGPU_UCODE_ID {
+ 	AMDGPU_UCODE_ID_SMC,
+ 	AMDGPU_UCODE_ID_UVD,
+ 	AMDGPU_UCODE_ID_VCE,
++	AMDGPU_UCODE_ID_VCN,
+ 	AMDGPU_UCODE_ID_MAXIMUM,
+ };
+ 
+@@ -226,6 +227,9 @@ struct amdgpu_firmware_info {
+ 	void *kaddr;
+ 	/* ucode_size_bytes */
+ 	uint32_t ucode_size;
++	/* starting tmr mc address */
++	uint32_t tmr_mc_addr_lo;
++	uint32_t tmr_mc_addr_hi;
+ };
+ 
+ void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 1b4ad9b2a755..bee49991c1ff 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -111,9 +111,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
+ 			version_major, version_minor, family_id);
+ 	}
+ 
+-	bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+-		  +  AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
++	bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
+ 		  +  AMDGPU_VCN_SESSION_SIZE * 40;
++	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
++		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+ 	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
+ 				    &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
+@@ -187,11 +188,13 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
+ 		unsigned offset;
+ 
+ 		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+-		offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+-		memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
+-			    le32_to_cpu(hdr->ucode_size_bytes));
+-		size -= le32_to_cpu(hdr->ucode_size_bytes);
+-		ptr += le32_to_cpu(hdr->ucode_size_bytes);
++		if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
++			offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
++			memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
++				    le32_to_cpu(hdr->ucode_size_bytes));
++			size -= le32_to_cpu(hdr->ucode_size_bytes);
++			ptr += le32_to_cpu(hdr->ucode_size_bytes);
++		}
+ 		memset_io(ptr, 0, size);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+index b6333f92ba45..ef4784458800 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+@@ -97,33 +97,29 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
+ }
+ 
+ /**
+- * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
++ * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
+  *
+  * @bo: &amdgpu_bo buffer object (must be in VRAM)
+  *
+  * Returns:
+- * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
++ * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
+  */
+-u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
++u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
+ {
+ 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ 	struct ttm_mem_reg *mem = &bo->tbo.mem;
+ 	struct drm_mm_node *nodes = mem->mm_node;
+ 	unsigned pages = mem->num_pages;
+-	u64 usage = 0;
++	u64 usage;
+ 
+ 	if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
+-		return 0;
++		return amdgpu_bo_size(bo);
+ 
+ 	if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+-		return amdgpu_bo_size(bo);
++		return 0;
+ 
+-	while (nodes && pages) {
+-		usage += nodes->size << PAGE_SHIFT;
+-		usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
+-		pages -= nodes->size;
+-		++nodes;
+-	}
++	for (usage = 0; nodes && pages; pages -= nodes->size, nodes++)
++		usage += amdgpu_vram_mgr_vis_size(adev, nodes);
+ 
+ 	return usage;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index a69153435ea7..8f0ac805ecd2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -3433,7 +3433,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+ 
+ 		/* wait for RLC_SAFE_MODE */
+ 		for (i = 0; i < adev->usec_timeout; i++) {
+-			if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
++			if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+ 				break;
+ 			udelay(1);
+ 		}
+diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+index 0ff136d02d9b..02be34e72ed9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+@@ -88,6 +88,9 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
+ 	case AMDGPU_UCODE_ID_VCE:
+ 		*type = GFX_FW_TYPE_VCE;
+ 		break;
++	case AMDGPU_UCODE_ID_VCN:
++		*type = GFX_FW_TYPE_VCN;
++		break;
+ 	case AMDGPU_UCODE_ID_MAXIMUM:
+ 	default:
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+index bfddf97dd13e..a16eebc05d12 100644
+--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+@@ -1569,7 +1569,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
+ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+ 	.type = AMDGPU_RING_TYPE_UVD,
+ 	.align_mask = 0xf,
+-	.nop = PACKET0(mmUVD_NO_OP, 0),
+ 	.support_64bit_ptrs = false,
+ 	.get_rptr = uvd_v6_0_ring_get_rptr,
+ 	.get_wptr = uvd_v6_0_ring_get_wptr,
+@@ -1587,7 +1586,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
+ 	.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
+ 	.test_ring = uvd_v6_0_ring_test_ring,
+ 	.test_ib = amdgpu_uvd_ring_test_ib,
+-	.insert_nop = amdgpu_ring_insert_nop,
++	.insert_nop = uvd_v6_0_ring_insert_nop,
+ 	.pad_ib = amdgpu_ring_generic_pad_ib,
+ 	.begin_use = amdgpu_uvd_ring_begin_use,
+ 	.end_use = amdgpu_uvd_ring_end_use,
+diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+index 29684c3ea4ef..700119168067 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+@@ -90,6 +90,16 @@ static int vcn_v1_0_sw_init(void *handle)
+ 	if (r)
+ 		return r;
+ 
++	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
++		const struct common_firmware_header *hdr;
++		hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
++		adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
++		adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
++		adev->firmware.fw_size +=
++			ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
++		DRM_INFO("PSP loading VCN firmware\n");
++	}
++
+ 	r = amdgpu_vcn_resume(adev);
+ 	if (r)
+ 		return r;
+@@ -241,26 +251,38 @@ static int vcn_v1_0_resume(void *handle)
+ static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
+ {
+ 	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+-
+-	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
++	uint32_t offset;
++
++	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
++		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
++			     (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
++		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
++			     (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
++		WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
++		offset = 0;
++	} else {
++		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ 			lower_32_bits(adev->vcn.gpu_addr));
+-	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
++		WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ 			upper_32_bits(adev->vcn.gpu_addr));
+-	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
+-				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
++		offset = size;
++		WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
++			     AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
++	}
++
+ 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
+ 
+ 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+-			lower_32_bits(adev->vcn.gpu_addr + size));
++		     lower_32_bits(adev->vcn.gpu_addr + offset));
+ 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+-			upper_32_bits(adev->vcn.gpu_addr + size));
++		     upper_32_bits(adev->vcn.gpu_addr + offset));
+ 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
+ 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
+ 
+ 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+-			lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
++		     lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
+ 	WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+-			upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
++		     upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
+ 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
+ 	WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
+ 			AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 770c6b24be0b..e484d0a94bdc 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1334,6 +1334,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
+ 	struct backlight_properties props = { 0 };
+ 
+ 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
++	props.brightness = AMDGPU_MAX_BL_LEVEL;
+ 	props.type = BACKLIGHT_RAW;
+ 
+ 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
+@@ -2123,13 +2124,8 @@ convert_color_depth_from_display_info(const struct drm_connector *connector)
+ static enum dc_aspect_ratio
+ get_aspect_ratio(const struct drm_display_mode *mode_in)
+ {
+-	int32_t width = mode_in->crtc_hdisplay * 9;
+-	int32_t height = mode_in->crtc_vdisplay * 16;
+-
+-	if ((width - height) < 10 && (width - height) > -10)
+-		return ASPECT_RATIO_16_9;
+-	else
+-		return ASPECT_RATIO_4_3;
++	/* 1-1 mapping, since both enums follow the HDMI spec. */
++	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
+ }
+ 
+ static enum dc_color_space
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+index 52f2c01349e3..9bfb040352e9 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+@@ -98,10 +98,16 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
+  */
+ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
+ {
+-	struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
+-	struct dc_stream_state *stream_state = crtc_state->stream;
++	struct dm_crtc_state *crtc_state;
++	struct dc_stream_state *stream_state;
+ 	uint32_t crcs[3];
+ 
++	if (crtc == NULL)
++		return;
++
++	crtc_state = to_dm_crtc_state(crtc->state);
++	stream_state = crtc_state->stream;
++
+ 	/* Early return if CRC capture is not enabled. */
+ 	if (!crtc_state->crc_enabled)
+ 		return;
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+index 651e1fd4622f..a558bfaa0c46 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+@@ -808,6 +808,24 @@ static enum bp_result transmitter_control_v1_5(
+ 	 * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
+ 	 * LVDS mode: usPixelClock = pixel clock
+ 	 */
++	if  (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
++		switch (cntl->color_depth) {
++		case COLOR_DEPTH_101010:
++			params.usSymClock =
++				cpu_to_le16((le16_to_cpu(params.usSymClock) * 30) / 24);
++			break;
++		case COLOR_DEPTH_121212:
++			params.usSymClock =
++				cpu_to_le16((le16_to_cpu(params.usSymClock) * 36) / 24);
++			break;
++		case COLOR_DEPTH_161616:
++			params.usSymClock =
++				cpu_to_le16((le16_to_cpu(params.usSymClock) * 48) / 24);
++			break;
++		default:
++			break;
++		}
++	}
+ 
+ 	if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ 		result = BP_RESULT_OK;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 2fa521812d23..8a7890b03d97 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -728,6 +728,17 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+ 			break;
+ 		case EDID_NO_RESPONSE:
+ 			DC_LOG_ERROR("No EDID read.\n");
++
++			/*
++			 * Abort detection for non-DP connectors if we have
++			 * no EDID
++			 *
++			 * DP needs to report as connected if HDP is high
++			 * even if we have no EDID in order to go to
++			 * fail-safe mode
++			 */
++			if (!dc_is_dp_signal(link->connector_signal))
++				return false;
+ 		default:
+ 			break;
+ 		}
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 751f3ac9d921..754b4c2fc90a 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -268,24 +268,30 @@ bool resource_construct(
+ 
+ 	return true;
+ }
++static int find_matching_clock_source(
++		const struct resource_pool *pool,
++		struct clock_source *clock_source)
++{
+ 
++	int i;
++
++	for (i = 0; i < pool->clk_src_count; i++) {
++		if (pool->clock_sources[i] == clock_source)
++			return i;
++	}
++	return -1;
++}
+ 
+ void resource_unreference_clock_source(
+ 		struct resource_context *res_ctx,
+ 		const struct resource_pool *pool,
+ 		struct clock_source *clock_source)
+ {
+-	int i;
+-
+-	for (i = 0; i < pool->clk_src_count; i++) {
+-		if (pool->clock_sources[i] != clock_source)
+-			continue;
++	int i = find_matching_clock_source(pool, clock_source);
+ 
++	if (i > -1)
+ 		res_ctx->clock_source_ref_count[i]--;
+ 
+-		break;
+-	}
+-
+ 	if (pool->dp_clock_source == clock_source)
+ 		res_ctx->dp_clock_source_ref_count--;
+ }
+@@ -295,19 +301,31 @@ void resource_reference_clock_source(
+ 		const struct resource_pool *pool,
+ 		struct clock_source *clock_source)
+ {
+-	int i;
+-	for (i = 0; i < pool->clk_src_count; i++) {
+-		if (pool->clock_sources[i] != clock_source)
+-			continue;
++	int i = find_matching_clock_source(pool, clock_source);
+ 
++	if (i > -1)
+ 		res_ctx->clock_source_ref_count[i]++;
+-		break;
+-	}
+ 
+ 	if (pool->dp_clock_source == clock_source)
+ 		res_ctx->dp_clock_source_ref_count++;
+ }
+ 
++int resource_get_clock_source_reference(
++		struct resource_context *res_ctx,
++		const struct resource_pool *pool,
++		struct clock_source *clock_source)
++{
++	int i = find_matching_clock_source(pool, clock_source);
++
++	if (i > -1)
++		return res_ctx->clock_source_ref_count[i];
++
++	if (pool->dp_clock_source == clock_source)
++		return res_ctx->dp_clock_source_ref_count;
++
++	return -1;
++}
++
+ bool resource_are_streams_timing_synchronizable(
+ 	struct dc_stream_state *stream1,
+ 	struct dc_stream_state *stream2)
+@@ -330,6 +348,9 @@ bool resource_are_streams_timing_synchronizable(
+ 				!= stream2->timing.pix_clk_khz)
+ 		return false;
+ 
++	if (stream1->clamping.c_depth != stream2->clamping.c_depth)
++		return false;
++
+ 	if (stream1->phy_pix_clk != stream2->phy_pix_clk
+ 			&& (!dc_is_dp_signal(stream1->signal)
+ 			|| !dc_is_dp_signal(stream2->signal)))
+@@ -337,6 +358,20 @@ bool resource_are_streams_timing_synchronizable(
+ 
+ 	return true;
+ }
++static bool is_dp_and_hdmi_sharable(
++		struct dc_stream_state *stream1,
++		struct dc_stream_state *stream2)
++{
++	if (stream1->ctx->dc->caps.disable_dp_clk_share)
++		return false;
++
++	if (stream1->clamping.c_depth != COLOR_DEPTH_888 ||
++	    stream2->clamping.c_depth != COLOR_DEPTH_888)
++	return false;
++
++	return true;
++
++}
+ 
+ static bool is_sharable_clk_src(
+ 	const struct pipe_ctx *pipe_with_clk_src,
+@@ -348,7 +383,10 @@ static bool is_sharable_clk_src(
+ 	if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ 		return false;
+ 
+-	if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
++	if (dc_is_dp_signal(pipe_with_clk_src->stream->signal) ||
++		(dc_is_dp_signal(pipe->stream->signal) &&
++		!is_dp_and_hdmi_sharable(pipe_with_clk_src->stream,
++				     pipe->stream)))
+ 		return false;
+ 
+ 	if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
+diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
+index 53c71296f3dd..efe155d50668 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc.h
++++ b/drivers/gpu/drm/amd/display/dc/dc.h
+@@ -77,6 +77,7 @@ struct dc_caps {
+ 	bool dual_link_dvi;
+ 	bool post_blend_color_processing;
+ 	bool force_dp_tps4_for_cp2520;
++	bool disable_dp_clk_share;
+ };
+ 
+ struct dc_dcc_surface_param {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+index dbe3b26b6d9e..f6ec1d3dfd0c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+@@ -919,7 +919,7 @@ void dce110_link_encoder_enable_tmds_output(
+ 	enum bp_result result;
+ 
+ 	/* Enable the PHY */
+-
++	cntl.connector_obj_id = enc110->base.connector;
+ 	cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ 	cntl.engine_id = enc->preferred_engine;
+ 	cntl.transmitter = enc110->base.transmitter;
+@@ -961,7 +961,7 @@ void dce110_link_encoder_enable_dp_output(
+ 	 * We need to set number of lanes manually.
+ 	 */
+ 	configure_encoder(enc110, link_settings);
+-
++	cntl.connector_obj_id = enc110->base.connector;
+ 	cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ 	cntl.engine_id = enc->preferred_engine;
+ 	cntl.transmitter = enc110->base.transmitter;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index 344dd2e69e7c..aa2f03eb46fe 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -884,7 +884,7 @@ static bool construct(
+ 	dc->caps.i2c_speed_in_khz = 40;
+ 	dc->caps.max_cursor_size = 128;
+ 	dc->caps.dual_link_dvi = true;
+-
++	dc->caps.disable_dp_clk_share = true;
+ 	for (i = 0; i < pool->base.pipe_count; i++) {
+ 		pool->base.timing_generators[i] =
+ 			dce100_timing_generator_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+index e2994d337044..111c4921987f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+@@ -143,7 +143,7 @@ static void wait_for_fbc_state_changed(
+ 	struct dce110_compressor *cp110,
+ 	bool enabled)
+ {
+-	uint8_t counter = 0;
++	uint16_t counter = 0;
+ 	uint32_t addr = mmFBC_STATUS;
+ 	uint32_t value;
+ 
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index c29052b6da5a..7c0b1d7aa9b8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1939,7 +1939,9 @@ static void dce110_reset_hw_ctx_wrap(
+ 			pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
+ 					pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
+ 
+-			if (old_clk)
++			if (old_clk && 0 == resource_get_clock_source_reference(&context->res_ctx,
++										dc->res_pool,
++										old_clk))
+ 				old_clk->funcs->cs_power_down(old_clk);
+ 
+ 			dc->hwss.disable_plane(dc, pipe_ctx_old);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index 48a068964722..6f4992bdc9ce 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -902,6 +902,7 @@ static bool dce80_construct(
+ 	}
+ 
+ 	dc->caps.max_planes =  pool->base.pipe_count;
++	dc->caps.disable_dp_clk_share = true;
+ 
+ 	if (!resource_construct(num_virtual_links, dc, &pool->base,
+ 			&res_create_funcs))
+@@ -1087,6 +1088,7 @@ static bool dce81_construct(
+ 	}
+ 
+ 	dc->caps.max_planes =  pool->base.pipe_count;
++	dc->caps.disable_dp_clk_share = true;
+ 
+ 	if (!resource_construct(num_virtual_links, dc, &pool->base,
+ 			&res_create_funcs))
+@@ -1268,6 +1270,7 @@ static bool dce83_construct(
+ 	}
+ 
+ 	dc->caps.max_planes =  pool->base.pipe_count;
++	dc->caps.disable_dp_clk_share = true;
+ 
+ 	if (!resource_construct(num_virtual_links, dc, &pool->base,
+ 			&res_create_funcs))
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
+index 640a647f4611..abf42a7d0859 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
+@@ -102,6 +102,11 @@ void resource_reference_clock_source(
+ 		const struct resource_pool *pool,
+ 		struct clock_source *clock_source);
+ 
++int resource_get_clock_source_reference(
++		struct resource_context *res_ctx,
++		const struct resource_pool *pool,
++		struct clock_source *clock_source);
++
+ bool resource_are_streams_timing_synchronizable(
+ 		struct dc_stream_state *stream1,
+ 		struct dc_stream_state *stream2);
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+index c952845833d7..5e19f5977eb1 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+@@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
+ 	{   ixDIDT_SQ_CTRL1,                   DIDT_SQ_CTRL1__MAX_POWER_MASK,                      DIDT_SQ_CTRL1__MAX_POWER__SHIFT,                    0xffff,     GPU_CONFIGREG_DIDT_IND },
+ 
+ 	{   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__UNUSED_0_MASK,                    DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_CTRL_OCP,                DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0xffff,     GPU_CONFIGREG_DIDT_IND },
++
++	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3853,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_0_MASK,                       DIDT_SQ_CTRL2__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK,       DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT,     0x005a,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_1_MASK,                       DIDT_SQ_CTRL2__UNUSED_1__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK,       DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT,     0x0000,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_CTRL2,                   DIDT_SQ_CTRL2__UNUSED_2_MASK,                       DIDT_SQ_CTRL2__UNUSED_2__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
++
++	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK,    DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT,  0x0001,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK,       DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK,   DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_STALL_CTRL,              DIDT_SQ_STALL_CTRL__UNUSED_0_MASK,                  DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
++
++	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK,       DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT,     0x0001,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT,     0x3853,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK,       DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT,     0x3153,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_TUNING_CTRL,             DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK,                 DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT,               0x0000,     GPU_CONFIGREG_DIDT_IND },
++
++	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK,                   DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT,                 0x0001,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK,                  DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__PHASE_OFFSET_MASK,                   DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT,                 0x0000,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK,                  DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT,                0x0000,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK,           DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT,         0x0000,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK,     DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT,   0x0010,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_SQ_CTRL0,                   DIDT_SQ_CTRL0__UNUSED_0_MASK,                       DIDT_SQ_CTRL0__UNUSED_0__SHIFT,                     0x0000,     GPU_CONFIGREG_DIDT_IND },
++
++	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT0_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT,                  0x000a,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT1_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT,                  0x0010,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT2_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT,                  0x0017,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_TD_WEIGHT0_3,               DIDT_TD_WEIGHT0_3__WEIGHT3_MASK,                    DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT,                  0x002f,     GPU_CONFIGREG_DIDT_IND },
++
++	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT4_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT,                  0x0046,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT5_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT,                  0x005d,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT6_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_TD_WEIGHT4_7,               DIDT_TD_WEIGHT4_7__WEIGHT7_MASK,                    DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
++
++	{   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MIN_POWER_MASK,                      DIDT_TD_CTRL1__MIN_POWER__SHIFT,                    0x0000,     GPU_CONFIGREG_DIDT_IND },
++	{   ixDIDT_TD_CTRL1,                   DIDT_TD_CTRL1__MAX_POWER_MASK,                      DIDT_TD_CTRL1__MAX_POWER__SHIFT,                    0xffff,     GPU_CONFIGREG_DIDT_IND },
++
++	{   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__UNUSED_0_MASK,                    DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT,                  0x0000,     GPU_CONFIGREG_DIDT_IND },
+ 	{   ixDIDT_TD_CTRL_OCP,                DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK,               DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT,             0x00ff,     GPU_CONFIGREG_DIDT_IND },
+ 
+ 	{   ixDIDT_TD_CTRL2,                   DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK,                DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT,              0x3fff,     GPU_CONFIGREG_DIDT_IND },
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+index 50690c72b2ea..617557bd8c24 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+@@ -244,6 +244,7 @@ static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
+ 	return 0;
+ }
+ 
++/* convert form 8bit vid to real voltage in mV*4 */
+ static uint32_t smu8_convert_8Bit_index_to_voltage(
+ 			struct pp_hwmgr *hwmgr, uint16_t voltage)
+ {
+@@ -1702,13 +1703,13 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
+ 	case AMDGPU_PP_SENSOR_VDDNB:
+ 		tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
+ 			CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+-		vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp);
++		vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
+ 		*((uint32_t *)value) = vddnb;
+ 		return 0;
+ 	case AMDGPU_PP_SENSOR_VDDGFX:
+ 		tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
+ 			CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+-		vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
++		vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
+ 		*((uint32_t *)value) = vddgfx;
+ 		return 0;
+ 	case AMDGPU_PP_SENSOR_UVD_VCLK:
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index c98e5de777cd..fcd2808874bf 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -490,7 +490,7 @@ static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr,
+ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
+ 		PPCLK_e clkID, uint32_t index, uint32_t *clock)
+ {
+-	int result;
++	int result = 0;
+ 
+ 	/*
+ 	 *SMU expects the Clock ID to be in the top 16 bits.
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index a5808382bdf0..c7b4481c90d7 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -116,6 +116,9 @@ static const struct edid_quirk {
+ 	/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
+ 	{ "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
+ 
++	/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
++	{ "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
++
+ 	/* Belinea 10 15 55 */
+ 	{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
+ 	{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+@@ -163,8 +166,9 @@ static const struct edid_quirk {
+ 	/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
+ 	{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
+ 
+-	/* HTC Vive VR Headset */
++	/* HTC Vive and Vive Pro VR Headsets */
+ 	{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
++	{ "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
+ 
+ 	/* Oculus Rift DK1, DK2, and CV1 VR Headsets */
+ 	{ "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+index 686f6552db48..3ef440b235e5 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+@@ -799,6 +799,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
+ 
+ free_buffer:
+ 	etnaviv_cmdbuf_free(&gpu->buffer);
++	gpu->buffer.suballoc = NULL;
+ destroy_iommu:
+ 	etnaviv_iommu_destroy(gpu->mmu);
+ 	gpu->mmu = NULL;
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 9c449b8d8eab..015f9e93419d 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -919,7 +919,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
+ 	spin_lock_init(&dev_priv->uncore.lock);
+ 
+ 	mutex_init(&dev_priv->sb_lock);
+-	mutex_init(&dev_priv->modeset_restore_lock);
+ 	mutex_init(&dev_priv->av_mutex);
+ 	mutex_init(&dev_priv->wm.wm_mutex);
+ 	mutex_init(&dev_priv->pps_mutex);
+@@ -1560,11 +1559,6 @@ static int i915_drm_suspend(struct drm_device *dev)
+ 	pci_power_t opregion_target_state;
+ 	int error;
+ 
+-	/* ignore lid events during suspend */
+-	mutex_lock(&dev_priv->modeset_restore_lock);
+-	dev_priv->modeset_restore = MODESET_SUSPENDED;
+-	mutex_unlock(&dev_priv->modeset_restore_lock);
+-
+ 	disable_rpm_wakeref_asserts(dev_priv);
+ 
+ 	/* We do a lot of poking in a lot of registers, make sure they work
+@@ -1764,10 +1758,6 @@ static int i915_drm_resume(struct drm_device *dev)
+ 
+ 	intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
+ 
+-	mutex_lock(&dev_priv->modeset_restore_lock);
+-	dev_priv->modeset_restore = MODESET_DONE;
+-	mutex_unlock(&dev_priv->modeset_restore_lock);
+-
+ 	intel_opregion_notify_adapter(dev_priv, PCI_D0);
+ 
+ 	enable_rpm_wakeref_asserts(dev_priv);
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 71e1aa54f774..7c22fac3aa04 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1003,12 +1003,6 @@ struct i915_gem_mm {
+ #define I915_ENGINE_DEAD_TIMEOUT  (4 * HZ)  /* Seqno, head and subunits dead */
+ #define I915_SEQNO_DEAD_TIMEOUT   (12 * HZ) /* Seqno dead with active head */
+ 
+-enum modeset_restore {
+-	MODESET_ON_LID_OPEN,
+-	MODESET_DONE,
+-	MODESET_SUSPENDED,
+-};
+-
+ #define DP_AUX_A 0x40
+ #define DP_AUX_B 0x10
+ #define DP_AUX_C 0x20
+@@ -1740,8 +1734,6 @@ struct drm_i915_private {
+ 
+ 	unsigned long quirks;
+ 
+-	enum modeset_restore modeset_restore;
+-	struct mutex modeset_restore_lock;
+ 	struct drm_atomic_state *modeset_restore_state;
+ 	struct drm_modeset_acquire_ctx reset_ctx;
+ 
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 7720569f2024..6e048ee88e3f 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -8825,6 +8825,7 @@ enum skl_power_gate {
+ #define  TRANS_MSA_10_BPC		(2<<5)
+ #define  TRANS_MSA_12_BPC		(3<<5)
+ #define  TRANS_MSA_16_BPC		(4<<5)
++#define  TRANS_MSA_CEA_RANGE		(1<<3)
+ 
+ /* LCPLL Control */
+ #define LCPLL_CTL			_MMIO(0x130040)
+diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
+index fed26d6e4e27..e195c287c263 100644
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -1659,6 +1659,10 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
+ 	WARN_ON(transcoder_is_dsi(cpu_transcoder));
+ 
+ 	temp = TRANS_MSA_SYNC_CLK;
++
++	if (crtc_state->limited_color_range)
++		temp |= TRANS_MSA_CEA_RANGE;
++
+ 	switch (crtc_state->pipe_bpp) {
+ 	case 18:
+ 		temp |= TRANS_MSA_6_BPC;
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index 16faea30114a..8e465095fe06 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -4293,18 +4293,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
+ 	return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
+ }
+ 
+-/*
+- * If display is now connected check links status,
+- * there has been known issues of link loss triggering
+- * long pulse.
+- *
+- * Some sinks (eg. ASUS PB287Q) seem to perform some
+- * weird HPD ping pong during modesets. So we can apparently
+- * end up with HPD going low during a modeset, and then
+- * going back up soon after. And once that happens we must
+- * retrain the link to get a picture. That's in case no
+- * userspace component reacted to intermittent HPD dip.
+- */
+ int intel_dp_retrain_link(struct intel_encoder *encoder,
+ 			  struct drm_modeset_acquire_ctx *ctx)
+ {
+@@ -4794,7 +4782,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
+ }
+ 
+ static int
+-intel_dp_long_pulse(struct intel_connector *connector)
++intel_dp_long_pulse(struct intel_connector *connector,
++		    struct drm_modeset_acquire_ctx *ctx)
+ {
+ 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ 	struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+@@ -4853,6 +4842,22 @@ intel_dp_long_pulse(struct intel_connector *connector)
+ 		 */
+ 		status = connector_status_disconnected;
+ 		goto out;
++	} else {
++		/*
++		 * If display is now connected check links status,
++		 * there has been known issues of link loss triggering
++		 * long pulse.
++		 *
++		 * Some sinks (eg. ASUS PB287Q) seem to perform some
++		 * weird HPD ping pong during modesets. So we can apparently
++		 * end up with HPD going low during a modeset, and then
++		 * going back up soon after. And once that happens we must
++		 * retrain the link to get a picture. That's in case no
++		 * userspace component reacted to intermittent HPD dip.
++		 */
++		struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
++
++		intel_dp_retrain_link(encoder, ctx);
+ 	}
+ 
+ 	/*
+@@ -4914,7 +4919,7 @@ intel_dp_detect(struct drm_connector *connector,
+ 				return ret;
+ 		}
+ 
+-		status = intel_dp_long_pulse(intel_dp->attached_connector);
++		status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
+ 	}
+ 
+ 	intel_dp->detect_done = false;
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index d8cb53ef4351..c8640959a7fc 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -933,8 +933,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
+ 
+ 	ret = i2c_transfer(adapter, &msg, 1);
+ 	if (ret == 1)
+-		return 0;
+-	return ret >= 0 ? -EIO : ret;
++		ret = 0;
++	else if (ret >= 0)
++		ret = -EIO;
++
++	kfree(write_buf);
++	return ret;
+ }
+ 
+ static
+diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
+index b4941101f21a..cdf19553ffac 100644
+--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
++++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
+@@ -127,9 +127,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
+ 		return platdev;
+ 	}
+ 
+-	pm_runtime_forbid(&platdev->dev);
+-	pm_runtime_set_active(&platdev->dev);
+-	pm_runtime_enable(&platdev->dev);
++	pm_runtime_no_callbacks(&platdev->dev);
+ 
+ 	return platdev;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
+index 8ae8f42f430a..6b6758419fb3 100644
+--- a/drivers/gpu/drm/i915/intel_lspcon.c
++++ b/drivers/gpu/drm/i915/intel_lspcon.c
+@@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
+ 	DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
+ 		      lspcon_mode_name(mode));
+ 
+-	wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100);
++	wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400);
+ 	if (current_mode != mode)
+ 		DRM_ERROR("LSPCON mode hasn't settled\n");
+ 
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 48f618dc9abb..63d7faa99946 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -44,8 +44,6 @@
+ /* Private structure for the integrated LVDS support */
+ struct intel_lvds_connector {
+ 	struct intel_connector base;
+-
+-	struct notifier_block lid_notifier;
+ };
+ 
+ struct intel_lvds_pps {
+@@ -454,26 +452,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
+ 	return true;
+ }
+ 
+-/*
+- * Detect the LVDS connection.
+- *
+- * Since LVDS doesn't have hotlug, we use the lid as a proxy.  Open means
+- * connected and closed means disconnected.  We also send hotplug events as
+- * needed, using lid status notification from the input layer.
+- */
+ static enum drm_connector_status
+ intel_lvds_detect(struct drm_connector *connector, bool force)
+ {
+-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
+-	enum drm_connector_status status;
+-
+-	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+-		      connector->base.id, connector->name);
+-
+-	status = intel_panel_detect(dev_priv);
+-	if (status != connector_status_unknown)
+-		return status;
+-
+ 	return connector_status_connected;
+ }
+ 
+@@ -498,117 +479,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
+ 	return 1;
+ }
+ 
+-static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
+-{
+-	DRM_INFO("Skipping forced modeset for %s\n", id->ident);
+-	return 1;
+-}
+-
+-/* The GPU hangs up on these systems if modeset is performed on LID open */
+-static const struct dmi_system_id intel_no_modeset_on_lid[] = {
+-	{
+-		.callback = intel_no_modeset_on_lid_dmi_callback,
+-		.ident = "Toshiba Tecra A11",
+-		.matches = {
+-			DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+-			DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
+-		},
+-	},
+-
+-	{ }	/* terminating entry */
+-};
+-
+-/*
+- * Lid events. Note the use of 'modeset':
+- *  - we set it to MODESET_ON_LID_OPEN on lid close,
+- *    and set it to MODESET_DONE on open
+- *  - we use it as a "only once" bit (ie we ignore
+- *    duplicate events where it was already properly set)
+- *  - the suspend/resume paths will set it to
+- *    MODESET_SUSPENDED and ignore the lid open event,
+- *    because they restore the mode ("lid open").
+- */
+-static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
+-			    void *unused)
+-{
+-	struct intel_lvds_connector *lvds_connector =
+-		container_of(nb, struct intel_lvds_connector, lid_notifier);
+-	struct drm_connector *connector = &lvds_connector->base.base;
+-	struct drm_device *dev = connector->dev;
+-	struct drm_i915_private *dev_priv = to_i915(dev);
+-
+-	if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+-		return NOTIFY_OK;
+-
+-	mutex_lock(&dev_priv->modeset_restore_lock);
+-	if (dev_priv->modeset_restore == MODESET_SUSPENDED)
+-		goto exit;
+-	/*
+-	 * check and update the status of LVDS connector after receiving
+-	 * the LID nofication event.
+-	 */
+-	connector->status = connector->funcs->detect(connector, false);
+-
+-	/* Don't force modeset on machines where it causes a GPU lockup */
+-	if (dmi_check_system(intel_no_modeset_on_lid))
+-		goto exit;
+-	if (!acpi_lid_open()) {
+-		/* do modeset on next lid open event */
+-		dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
+-		goto exit;
+-	}
+-
+-	if (dev_priv->modeset_restore == MODESET_DONE)
+-		goto exit;
+-
+-	/*
+-	 * Some old platform's BIOS love to wreak havoc while the lid is closed.
+-	 * We try to detect this here and undo any damage. The split for PCH
+-	 * platforms is rather conservative and a bit arbitrary expect that on
+-	 * those platforms VGA disabling requires actual legacy VGA I/O access,
+-	 * and as part of the cleanup in the hw state restore we also redisable
+-	 * the vga plane.
+-	 */
+-	if (!HAS_PCH_SPLIT(dev_priv))
+-		intel_display_resume(dev);
+-
+-	dev_priv->modeset_restore = MODESET_DONE;
+-
+-exit:
+-	mutex_unlock(&dev_priv->modeset_restore_lock);
+-	return NOTIFY_OK;
+-}
+-
+-static int
+-intel_lvds_connector_register(struct drm_connector *connector)
+-{
+-	struct intel_lvds_connector *lvds = to_lvds_connector(connector);
+-	int ret;
+-
+-	ret = intel_connector_register(connector);
+-	if (ret)
+-		return ret;
+-
+-	lvds->lid_notifier.notifier_call = intel_lid_notify;
+-	if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
+-		DRM_DEBUG_KMS("lid notifier registration failed\n");
+-		lvds->lid_notifier.notifier_call = NULL;
+-	}
+-
+-	return 0;
+-}
+-
+-static void
+-intel_lvds_connector_unregister(struct drm_connector *connector)
+-{
+-	struct intel_lvds_connector *lvds = to_lvds_connector(connector);
+-
+-	if (lvds->lid_notifier.notifier_call)
+-		acpi_lid_notifier_unregister(&lvds->lid_notifier);
+-
+-	intel_connector_unregister(connector);
+-}
+-
+ /**
+  * intel_lvds_destroy - unregister and free LVDS structures
+  * @connector: connector to free
+@@ -641,8 +511,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
+ 	.fill_modes = drm_helper_probe_single_connector_modes,
+ 	.atomic_get_property = intel_digital_connector_atomic_get_property,
+ 	.atomic_set_property = intel_digital_connector_atomic_set_property,
+-	.late_register = intel_lvds_connector_register,
+-	.early_unregister = intel_lvds_connector_unregister,
++	.late_register = intel_connector_register,
++	.early_unregister = intel_connector_unregister,
+ 	.destroy = intel_lvds_destroy,
+ 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ 	.atomic_duplicate_state = intel_digital_connector_duplicate_state,
+@@ -1108,8 +978,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
+ 	 * 2) check for VBT data
+ 	 * 3) check to see if LVDS is already on
+ 	 *    if none of the above, no panel
+-	 * 4) make sure lid is open
+-	 *    if closed, act like it's not there for now
+ 	 */
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index 2121345a61af..78ce3d232c4d 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -486,6 +486,31 @@ static void vop_line_flag_irq_disable(struct vop *vop)
+ 	spin_unlock_irqrestore(&vop->irq_lock, flags);
+ }
+ 
++static int vop_core_clks_enable(struct vop *vop)
++{
++	int ret;
++
++	ret = clk_enable(vop->hclk);
++	if (ret < 0)
++		return ret;
++
++	ret = clk_enable(vop->aclk);
++	if (ret < 0)
++		goto err_disable_hclk;
++
++	return 0;
++
++err_disable_hclk:
++	clk_disable(vop->hclk);
++	return ret;
++}
++
++static void vop_core_clks_disable(struct vop *vop)
++{
++	clk_disable(vop->aclk);
++	clk_disable(vop->hclk);
++}
++
+ static int vop_enable(struct drm_crtc *crtc)
+ {
+ 	struct vop *vop = to_vop(crtc);
+@@ -497,17 +522,13 @@ static int vop_enable(struct drm_crtc *crtc)
+ 		return ret;
+ 	}
+ 
+-	ret = clk_enable(vop->hclk);
++	ret = vop_core_clks_enable(vop);
+ 	if (WARN_ON(ret < 0))
+ 		goto err_put_pm_runtime;
+ 
+ 	ret = clk_enable(vop->dclk);
+ 	if (WARN_ON(ret < 0))
+-		goto err_disable_hclk;
+-
+-	ret = clk_enable(vop->aclk);
+-	if (WARN_ON(ret < 0))
+-		goto err_disable_dclk;
++		goto err_disable_core;
+ 
+ 	/*
+ 	 * Slave iommu shares power, irq and clock with vop.  It was associated
+@@ -519,7 +540,7 @@ static int vop_enable(struct drm_crtc *crtc)
+ 	if (ret) {
+ 		DRM_DEV_ERROR(vop->dev,
+ 			      "failed to attach dma mapping, %d\n", ret);
+-		goto err_disable_aclk;
++		goto err_disable_dclk;
+ 	}
+ 
+ 	spin_lock(&vop->reg_lock);
+@@ -552,18 +573,14 @@ static int vop_enable(struct drm_crtc *crtc)
+ 
+ 	spin_unlock(&vop->reg_lock);
+ 
+-	enable_irq(vop->irq);
+-
+ 	drm_crtc_vblank_on(crtc);
+ 
+ 	return 0;
+ 
+-err_disable_aclk:
+-	clk_disable(vop->aclk);
+ err_disable_dclk:
+ 	clk_disable(vop->dclk);
+-err_disable_hclk:
+-	clk_disable(vop->hclk);
++err_disable_core:
++	vop_core_clks_disable(vop);
+ err_put_pm_runtime:
+ 	pm_runtime_put_sync(vop->dev);
+ 	return ret;
+@@ -599,8 +616,6 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
+ 
+ 	vop_dsp_hold_valid_irq_disable(vop);
+ 
+-	disable_irq(vop->irq);
+-
+ 	vop->is_enabled = false;
+ 
+ 	/*
+@@ -609,8 +624,7 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
+ 	rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
+ 
+ 	clk_disable(vop->dclk);
+-	clk_disable(vop->aclk);
+-	clk_disable(vop->hclk);
++	vop_core_clks_disable(vop);
+ 	pm_runtime_put(vop->dev);
+ 	mutex_unlock(&vop->vop_lock);
+ 
+@@ -1177,6 +1191,18 @@ static irqreturn_t vop_isr(int irq, void *data)
+ 	uint32_t active_irqs;
+ 	int ret = IRQ_NONE;
+ 
++	/*
++	 * The irq is shared with the iommu. If the runtime-pm state of the
++	 * vop-device is disabled the irq has to be targeted at the iommu.
++	 */
++	if (!pm_runtime_get_if_in_use(vop->dev))
++		return IRQ_NONE;
++
++	if (vop_core_clks_enable(vop)) {
++		DRM_DEV_ERROR_RATELIMITED(vop->dev, "couldn't enable clocks\n");
++		goto out;
++	}
++
+ 	/*
+ 	 * interrupt register has interrupt status, enable and clear bits, we
+ 	 * must hold irq_lock to avoid a race with enable/disable_vblank().
+@@ -1192,7 +1218,7 @@ static irqreturn_t vop_isr(int irq, void *data)
+ 
+ 	/* This is expected for vop iommu irqs, since the irq is shared */
+ 	if (!active_irqs)
+-		return IRQ_NONE;
++		goto out_disable;
+ 
+ 	if (active_irqs & DSP_HOLD_VALID_INTR) {
+ 		complete(&vop->dsp_hold_completion);
+@@ -1218,6 +1244,10 @@ static irqreturn_t vop_isr(int irq, void *data)
+ 		DRM_DEV_ERROR(vop->dev, "Unknown VOP IRQs: %#02x\n",
+ 			      active_irqs);
+ 
++out_disable:
++	vop_core_clks_disable(vop);
++out:
++	pm_runtime_put(vop->dev);
+ 	return ret;
+ }
+ 
+@@ -1596,9 +1626,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
+ 	if (ret)
+ 		goto err_disable_pm_runtime;
+ 
+-	/* IRQ is initially disabled; it gets enabled in power_on */
+-	disable_irq(vop->irq);
+-
+ 	return 0;
+ 
+ err_disable_pm_runtime:
+diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+index e67f4ea28c0e..051b8be3dc0f 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
++++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
+@@ -363,8 +363,10 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
+ 		of_property_read_u32(endpoint, "reg", &endpoint_id);
+ 		ret = drm_of_find_panel_or_bridge(dev->of_node, 1, endpoint_id,
+ 						  &lvds->panel, &lvds->bridge);
+-		if (!ret)
++		if (!ret) {
++			of_node_put(endpoint);
+ 			break;
++		}
+ 	}
+ 	if (!child_count) {
+ 		DRM_DEV_ERROR(dev, "lvds port does not have any children\n");
+diff --git a/drivers/hid/hid-redragon.c b/drivers/hid/hid-redragon.c
+index daf59578bf93..73c9d4c4fa34 100644
+--- a/drivers/hid/hid-redragon.c
++++ b/drivers/hid/hid-redragon.c
+@@ -44,29 +44,6 @@ static __u8 *redragon_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 	return rdesc;
+ }
+ 
+-static int redragon_probe(struct hid_device *dev,
+-	const struct hid_device_id *id)
+-{
+-	int ret;
+-
+-	ret = hid_parse(dev);
+-	if (ret) {
+-		hid_err(dev, "parse failed\n");
+-		return ret;
+-	}
+-
+-	/* do not register unused input device */
+-	if (dev->maxapplication == 1)
+-		return 0;
+-
+-	ret = hid_hw_start(dev, HID_CONNECT_DEFAULT);
+-	if (ret) {
+-		hid_err(dev, "hw start failed\n");
+-		return ret;
+-	}
+-
+-	return 0;
+-}
+ static const struct hid_device_id redragon_devices[] = {
+ 	{HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_REDRAGON_ASURA)},
+ 	{}
+@@ -77,8 +54,7 @@ MODULE_DEVICE_TABLE(hid, redragon_devices);
+ static struct hid_driver redragon_driver = {
+ 	.name = "redragon",
+ 	.id_table = redragon_devices,
+-	.report_fixup = redragon_report_fixup,
+-	.probe = redragon_probe
++	.report_fixup = redragon_report_fixup
+ };
+ 
+ module_hid_driver(redragon_driver);
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index b8f303dea305..32affd3fa8bd 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -453,8 +453,12 @@ static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
+ 		else
+ 			dev_err(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n",
+ 				data_len, client->addr, cmd, ret);
+-	} else {
++	/* 2 transfers must have completed successfully */
++	} else if (ret == 2) {
+ 		memcpy(data, buffer, data_len);
++		ret = 0;
++	} else {
++		ret = -EIO;
+ 	}
+ 
+ 	kfree(buffer);
+@@ -595,8 +599,6 @@ i2c_acpi_space_handler(u32 function, acpi_physical_address command,
+ 		if (action == ACPI_READ) {
+ 			status = acpi_gsb_i2c_read_bytes(client, command,
+ 					gsb->data, info->access_length);
+-			if (status > 0)
+-				status = 0;
+ 		} else {
+ 			status = acpi_gsb_i2c_write_bytes(client, command,
+ 					gsb->data, info->access_length);
+diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
+index fbe7198a715a..bedd5fba33b0 100644
+--- a/drivers/infiniband/hw/hfi1/affinity.c
++++ b/drivers/infiniband/hw/hfi1/affinity.c
+@@ -198,7 +198,7 @@ int node_affinity_init(void)
+ 		while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
+ 			node = pcibus_to_node(dev->bus);
+ 			if (node < 0)
+-				node = numa_node_id();
++				goto out;
+ 
+ 			hfi1_per_node_cntr[node]++;
+ 		}
+@@ -206,6 +206,18 @@ int node_affinity_init(void)
+ 	}
+ 
+ 	return 0;
++
++out:
++	/*
++	 * Invalid PCI NUMA node information found, note it, and populate
++	 * our database 1:1.
++	 */
++	pr_err("HFI: Invalid PCI NUMA node. Performance may be affected\n");
++	pr_err("HFI: System BIOS may need to be upgraded\n");
++	for (node = 0; node < node_affinity.num_possible_nodes; node++)
++		hfi1_per_node_cntr[node] = 1;
++
++	return 0;
+ }
+ 
+ static void node_affinity_destroy(struct hfi1_affinity_node *entry)
+@@ -622,8 +634,14 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
+ 	int curr_cpu, possible, i, ret;
+ 	bool new_entry = false;
+ 
+-	if (node < 0)
+-		node = numa_node_id();
++	/*
++	 * If the BIOS does not have the NUMA node information set, select
++	 * NUMA 0 so we get consistent performance.
++	 */
++	if (node < 0) {
++		dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
++		node = 0;
++	}
+ 	dd->node = node;
+ 
+ 	local_mask = cpumask_of_node(dd->node);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
+index b9f2c871ff9a..e11c149da04d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
++++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
+@@ -37,7 +37,7 @@
+ 
+ static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
+ {
+-	return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
++	return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0;
+ }
+ 
+ static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index baaf906f7c2e..97664570c5ac 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -115,7 +115,10 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
+ {
+ 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+ 
+-	return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
++	return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
++					   base) ?
++		       -ENOMEM :
++		       0;
+ }
+ 
+ enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
+diff --git a/drivers/input/input.c b/drivers/input/input.c
+index 6365c1958264..3304aaaffe87 100644
+--- a/drivers/input/input.c
++++ b/drivers/input/input.c
+@@ -480,11 +480,19 @@ EXPORT_SYMBOL(input_inject_event);
+  */
+ void input_alloc_absinfo(struct input_dev *dev)
+ {
+-	if (!dev->absinfo)
+-		dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo),
+-					GFP_KERNEL);
++	if (dev->absinfo)
++		return;
+ 
+-	WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__);
++	dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
++	if (!dev->absinfo) {
++		dev_err(dev->dev.parent ?: &dev->dev,
++			"%s: unable to allocate memory\n", __func__);
++		/*
++		 * We will handle this allocation failure in
++		 * input_register_device() when we refuse to register input
++		 * device with ABS bits but without absinfo.
++		 */
++	}
+ }
+ EXPORT_SYMBOL(input_alloc_absinfo);
+ 
+diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
+index af4a8e7fcd27..3b05117118c3 100644
+--- a/drivers/iommu/omap-iommu.c
++++ b/drivers/iommu/omap-iommu.c
+@@ -550,7 +550,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
+ 
+ pte_ready:
+ 	iopte = iopte_offset(iopgd, da);
+-	*pt_dma = virt_to_phys(iopte);
++	*pt_dma = iopgd_page_paddr(iopgd);
+ 	dev_vdbg(obj->dev,
+ 		 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
+ 		 __func__, da, iopgd, *iopgd, iopte, *iopte);
+@@ -738,7 +738,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
+ 		}
+ 		bytes *= nent;
+ 		memset(iopte, 0, nent * sizeof(*iopte));
+-		pt_dma = virt_to_phys(iopte);
++		pt_dma = iopgd_page_paddr(iopgd);
+ 		flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
+ 
+ 		/*
+diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
+index 054cd2c8e9c8..2b1724e8d307 100644
+--- a/drivers/iommu/rockchip-iommu.c
++++ b/drivers/iommu/rockchip-iommu.c
+@@ -521,10 +521,11 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
+ 	u32 int_status;
+ 	dma_addr_t iova;
+ 	irqreturn_t ret = IRQ_NONE;
+-	int i;
++	int i, err;
+ 
+-	if (WARN_ON(!pm_runtime_get_if_in_use(iommu->dev)))
+-		return 0;
++	err = pm_runtime_get_if_in_use(iommu->dev);
++	if (WARN_ON_ONCE(err <= 0))
++		return ret;
+ 
+ 	if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)))
+ 		goto out;
+@@ -620,11 +621,15 @@ static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
+ 	spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ 	list_for_each(pos, &rk_domain->iommus) {
+ 		struct rk_iommu *iommu;
++		int ret;
+ 
+ 		iommu = list_entry(pos, struct rk_iommu, node);
+ 
+ 		/* Only zap TLBs of IOMMUs that are powered on. */
+-		if (pm_runtime_get_if_in_use(iommu->dev)) {
++		ret = pm_runtime_get_if_in_use(iommu->dev);
++		if (WARN_ON_ONCE(ret < 0))
++			continue;
++		if (ret) {
+ 			WARN_ON(clk_bulk_enable(iommu->num_clocks,
+ 						iommu->clocks));
+ 			rk_iommu_zap_lines(iommu, iova, size);
+@@ -891,6 +896,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
+ 	struct rk_iommu *iommu;
+ 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
+ 	unsigned long flags;
++	int ret;
+ 
+ 	/* Allow 'virtual devices' (eg drm) to detach from domain */
+ 	iommu = rk_iommu_from_dev(dev);
+@@ -909,7 +915,9 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
+ 	list_del_init(&iommu->node);
+ 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+ 
+-	if (pm_runtime_get_if_in_use(iommu->dev)) {
++	ret = pm_runtime_get_if_in_use(iommu->dev);
++	WARN_ON_ONCE(ret < 0);
++	if (ret > 0) {
+ 		rk_iommu_disable(iommu);
+ 		pm_runtime_put(iommu->dev);
+ 	}
+@@ -946,7 +954,8 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
+ 	list_add_tail(&iommu->node, &rk_domain->iommus);
+ 	spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+ 
+-	if (!pm_runtime_get_if_in_use(iommu->dev))
++	ret = pm_runtime_get_if_in_use(iommu->dev);
++	if (!ret || WARN_ON_ONCE(ret < 0))
+ 		return 0;
+ 
+ 	ret = rk_iommu_enable(iommu);
+@@ -1152,17 +1161,6 @@ static int rk_iommu_probe(struct platform_device *pdev)
+ 	if (iommu->num_mmu == 0)
+ 		return PTR_ERR(iommu->bases[0]);
+ 
+-	i = 0;
+-	while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
+-		if (irq < 0)
+-			return irq;
+-
+-		err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
+-				       IRQF_SHARED, dev_name(dev), iommu);
+-		if (err)
+-			return err;
+-	}
+-
+ 	iommu->reset_disabled = device_property_read_bool(dev,
+ 					"rockchip,disable-mmu-reset");
+ 
+@@ -1219,6 +1217,19 @@ static int rk_iommu_probe(struct platform_device *pdev)
+ 
+ 	pm_runtime_enable(dev);
+ 
++	i = 0;
++	while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
++		if (irq < 0)
++			return irq;
++
++		err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
++				       IRQF_SHARED, dev_name(dev), iommu);
++		if (err) {
++			pm_runtime_disable(dev);
++			goto err_remove_sysfs;
++		}
++	}
++
+ 	return 0;
+ err_remove_sysfs:
+ 	iommu_device_sysfs_remove(&iommu->iommu);
+diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
+index faf734ff4cf3..0f6e30e9009d 100644
+--- a/drivers/irqchip/irq-bcm7038-l1.c
++++ b/drivers/irqchip/irq-bcm7038-l1.c
+@@ -217,6 +217,7 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_SMP
+ static void bcm7038_l1_cpu_offline(struct irq_data *d)
+ {
+ 	struct cpumask *mask = irq_data_get_affinity_mask(d);
+@@ -241,6 +242,7 @@ static void bcm7038_l1_cpu_offline(struct irq_data *d)
+ 	}
+ 	irq_set_affinity_locked(d, &new_affinity, false);
+ }
++#endif
+ 
+ static int __init bcm7038_l1_init_one(struct device_node *dn,
+ 				      unsigned int idx,
+@@ -293,7 +295,9 @@ static struct irq_chip bcm7038_l1_irq_chip = {
+ 	.irq_mask		= bcm7038_l1_mask,
+ 	.irq_unmask		= bcm7038_l1_unmask,
+ 	.irq_set_affinity	= bcm7038_l1_set_affinity,
++#ifdef CONFIG_SMP
+ 	.irq_cpu_offline	= bcm7038_l1_cpu_offline,
++#endif
+ };
+ 
+ static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
+diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
+index 3a7e8905a97e..880e48947576 100644
+--- a/drivers/irqchip/irq-stm32-exti.c
++++ b/drivers/irqchip/irq-stm32-exti.c
+@@ -602,17 +602,24 @@ stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
+ 					sizeof(struct stm32_exti_chip_data),
+ 					GFP_KERNEL);
+ 	if (!host_data->chips_data)
+-		return NULL;
++		goto free_host_data;
+ 
+ 	host_data->base = of_iomap(node, 0);
+ 	if (!host_data->base) {
+ 		pr_err("%pOF: Unable to map registers\n", node);
+-		return NULL;
++		goto free_chips_data;
+ 	}
+ 
+ 	stm32_host_data = host_data;
+ 
+ 	return host_data;
++
++free_chips_data:
++	kfree(host_data->chips_data);
++free_host_data:
++	kfree(host_data);
++
++	return NULL;
+ }
+ 
+ static struct
+@@ -664,10 +671,8 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
+ 	struct irq_domain *domain;
+ 
+ 	host_data = stm32_exti_host_init(drv_data, node);
+-	if (!host_data) {
+-		ret = -ENOMEM;
+-		goto out_free_mem;
+-	}
++	if (!host_data)
++		return -ENOMEM;
+ 
+ 	domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK,
+ 				       &irq_exti_domain_ops, NULL);
+@@ -724,7 +729,6 @@ out_free_domain:
+ 	irq_domain_remove(domain);
+ out_unmap:
+ 	iounmap(host_data->base);
+-out_free_mem:
+ 	kfree(host_data->chips_data);
+ 	kfree(host_data);
+ 	return ret;
+@@ -751,10 +755,8 @@ __init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data,
+ 	}
+ 
+ 	host_data = stm32_exti_host_init(drv_data, node);
+-	if (!host_data) {
+-		ret = -ENOMEM;
+-		goto out_free_mem;
+-	}
++	if (!host_data)
++		return -ENOMEM;
+ 
+ 	for (i = 0; i < drv_data->bank_nr; i++)
+ 		stm32_exti_chip_init(host_data, i, node);
+@@ -776,7 +778,6 @@ __init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data,
+ 
+ out_unmap:
+ 	iounmap(host_data->base);
+-out_free_mem:
+ 	kfree(host_data->chips_data);
+ 	kfree(host_data);
+ 	return ret;
+diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
+index 3c7547a3c371..d7b9cdafd1c3 100644
+--- a/drivers/md/dm-kcopyd.c
++++ b/drivers/md/dm-kcopyd.c
+@@ -487,6 +487,8 @@ static int run_complete_job(struct kcopyd_job *job)
+ 	if (atomic_dec_and_test(&kc->nr_jobs))
+ 		wake_up(&kc->destroyq);
+ 
++	cond_resched();
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
+index 2a87b0d2f21f..a530972c5a7e 100644
+--- a/drivers/mfd/sm501.c
++++ b/drivers/mfd/sm501.c
+@@ -715,6 +715,7 @@ sm501_create_subdev(struct sm501_devdata *sm, char *name,
+ 	smdev->pdev.name = name;
+ 	smdev->pdev.id = sm->pdev_id;
+ 	smdev->pdev.dev.parent = sm->dev;
++	smdev->pdev.dev.coherent_dma_mask = 0xffffffff;
+ 
+ 	if (res_count) {
+ 		smdev->pdev.resource = (struct resource *)(smdev+1);
+diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
+index 94d7a865b135..7504f430c011 100644
+--- a/drivers/mtd/ubi/vtbl.c
++++ b/drivers/mtd/ubi/vtbl.c
+@@ -578,6 +578,16 @@ static int init_volumes(struct ubi_device *ubi,
+ 		vol->ubi = ubi;
+ 		reserved_pebs += vol->reserved_pebs;
+ 
++		/*
++		 * We use ubi->peb_count and not vol->reserved_pebs because
++		 * we want to keep the code simple. Otherwise we'd have to
++		 * resize/check the bitmap upon volume resize too.
++		 * Allocating a few bytes more does not hurt.
++		 */
++		err = ubi_fastmap_init_checkmap(vol, ubi->peb_count);
++		if (err)
++			return err;
++
+ 		/*
+ 		 * In case of dynamic volume UBI knows nothing about how many
+ 		 * data is stored there. So assume the whole volume is used.
+@@ -620,16 +630,6 @@ static int init_volumes(struct ubi_device *ubi,
+ 			(long long)(vol->used_ebs - 1) * vol->usable_leb_size;
+ 		vol->used_bytes += av->last_data_size;
+ 		vol->last_eb_bytes = av->last_data_size;
+-
+-		/*
+-		 * We use ubi->peb_count and not vol->reserved_pebs because
+-		 * we want to keep the code simple. Otherwise we'd have to
+-		 * resize/check the bitmap upon volume resize too.
+-		 * Allocating a few bytes more does not hurt.
+-		 */
+-		err = ubi_fastmap_init_checkmap(vol, ubi->peb_count);
+-		if (err)
+-			return err;
+ 	}
+ 
+ 	/* And add the layout volume */
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 4394c1162be4..4fdf3d33aa59 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -5907,12 +5907,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
+ 	return bp->hw_resc.max_cp_rings;
+ }
+ 
+-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
++unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
+ {
+-	bp->hw_resc.max_cp_rings = max;
++	return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
+ }
+ 
+-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
++static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
+ {
+ 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ 
+@@ -8492,7 +8492,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
+ 
+ 	*max_tx = hw_resc->max_tx_rings;
+ 	*max_rx = hw_resc->max_rx_rings;
+-	*max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
++	*max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
++			hw_resc->max_irqs);
+ 	*max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
+ 	max_ring_grps = hw_resc->max_hw_ring_grps;
+ 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index 91575ef97c8c..ea1246a94b38 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1468,8 +1468,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
+ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
+ void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
+ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
+-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
+-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
++unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp);
+ int bnxt_get_avail_msix(struct bnxt *bp, int num);
+ int bnxt_reserve_rings(struct bnxt *bp);
+ void bnxt_tx_disable(struct bnxt *bp);
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+index a64910892c25..2c77004a022b 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+@@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
+ 
+ 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
+ 
+-	vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
++	vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
+ 	vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
+ 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ 		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
+@@ -544,7 +544,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
+ 	max_stat_ctxs = hw_resc->max_stat_ctxs;
+ 
+ 	/* Remaining rings are distributed equally amongs VF's for now */
+-	vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
++	vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) -
++		       bp->cp_nr_rings) / num_vfs;
+ 	vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
+ 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ 		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
+@@ -638,7 +639,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
+ 	 */
+ 	vfs_supported = *num_vfs;
+ 
+-	avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
++	avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
+ 	avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
+ 	avail_cp = min_t(int, avail_cp, avail_stat);
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+index 840f6e505f73..4209cfd73971 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
+ 		edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
+ 	}
+ 	bnxt_fill_msix_vecs(bp, ent);
+-	bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
+ 	edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
+ 	return avail_msix;
+ }
+@@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
+ {
+ 	struct net_device *dev = edev->net;
+ 	struct bnxt *bp = netdev_priv(dev);
+-	int max_cp_rings, msix_requested;
+ 
+ 	ASSERT_RTNL();
+ 	if (ulp_id != BNXT_ROCE_ULP)
+@@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
+ 	if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ 		return 0;
+ 
+-	max_cp_rings = bnxt_get_max_func_cp_rings(bp);
+-	msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
+-	bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
+ 	edev->ulp_tbl[ulp_id].msix_requested = 0;
+ 	edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
+ 	if (netif_running(dev)) {
+@@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
+ 	return 0;
+ }
+ 
+-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
+-{
+-	ASSERT_RTNL();
+-	if (bnxt_ulp_registered(bp->edev, ulp_id)) {
+-		struct bnxt_en_dev *edev = bp->edev;
+-		unsigned int msix_req, max;
+-
+-		msix_req = edev->ulp_tbl[ulp_id].msix_requested;
+-		max = bnxt_get_max_func_cp_rings(bp);
+-		bnxt_set_max_func_cp_rings(bp, max - msix_req);
+-		max = bnxt_get_max_func_stat_ctxs(bp);
+-		bnxt_set_max_func_stat_ctxs(bp, max - 1);
+-	}
+-}
+-
+ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
+ 			 struct bnxt_fw_msg *fw_msg)
+ {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+index df48ac71729f..d9bea37cd211 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+@@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
+ 
+ int bnxt_get_ulp_msix_num(struct bnxt *bp);
+ int bnxt_get_ulp_msix_base(struct bnxt *bp);
+-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
+ void bnxt_ulp_stop(struct bnxt *bp);
+ void bnxt_ulp_start(struct bnxt *bp);
+ void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+index b773bc07edf7..14b49612aa86 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+@@ -186,6 +186,9 @@ struct bcmgenet_mib_counters {
+ #define UMAC_MAC1			0x010
+ #define UMAC_MAX_FRAME_LEN		0x014
+ 
++#define UMAC_MODE			0x44
++#define  MODE_LINK_STATUS		(1 << 5)
++
+ #define UMAC_EEE_CTRL			0x064
+ #define  EN_LPI_RX_PAUSE		(1 << 0)
+ #define  EN_LPI_TX_PFC			(1 << 1)
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+index 5333274a283c..4241ae928d4a 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
+@@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
+ static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
+ 					  struct fixed_phy_status *status)
+ {
+-	if (dev && dev->phydev && status)
+-		status->link = dev->phydev->link;
++	struct bcmgenet_priv *priv;
++	u32 reg;
++
++	if (dev && dev->phydev && status) {
++		priv = netdev_priv(dev);
++		reg = bcmgenet_umac_readl(priv, UMAC_MODE);
++		status->link = !!(reg & MODE_LINK_STATUS);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index a6c911bb5ce2..515d96e32143 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -481,11 +481,6 @@ static int macb_mii_probe(struct net_device *dev)
+ 
+ 	if (np) {
+ 		if (of_phy_is_fixed_link(np)) {
+-			if (of_phy_register_fixed_link(np) < 0) {
+-				dev_err(&bp->pdev->dev,
+-					"broken fixed-link specification\n");
+-				return -ENODEV;
+-			}
+ 			bp->phy_node = of_node_get(np);
+ 		} else {
+ 			bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
+@@ -568,7 +563,7 @@ static int macb_mii_init(struct macb *bp)
+ {
+ 	struct macb_platform_data *pdata;
+ 	struct device_node *np;
+-	int err;
++	int err = -ENXIO;
+ 
+ 	/* Enable management port */
+ 	macb_writel(bp, NCR, MACB_BIT(MPE));
+@@ -591,12 +586,23 @@ static int macb_mii_init(struct macb *bp)
+ 	dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
+ 
+ 	np = bp->pdev->dev.of_node;
+-	if (pdata)
+-		bp->mii_bus->phy_mask = pdata->phy_mask;
++	if (np && of_phy_is_fixed_link(np)) {
++		if (of_phy_register_fixed_link(np) < 0) {
++			dev_err(&bp->pdev->dev,
++				"broken fixed-link specification %pOF\n", np);
++			goto err_out_free_mdiobus;
++		}
++
++		err = mdiobus_register(bp->mii_bus);
++	} else {
++		if (pdata)
++			bp->mii_bus->phy_mask = pdata->phy_mask;
++
++		err = of_mdiobus_register(bp->mii_bus, np);
++	}
+ 
+-	err = of_mdiobus_register(bp->mii_bus, np);
+ 	if (err)
+-		goto err_out_free_mdiobus;
++		goto err_out_free_fixed_link;
+ 
+ 	err = macb_mii_probe(bp->dev);
+ 	if (err)
+@@ -606,6 +612,7 @@ static int macb_mii_init(struct macb *bp)
+ 
+ err_out_unregister_bus:
+ 	mdiobus_unregister(bp->mii_bus);
++err_out_free_fixed_link:
+ 	if (np && of_phy_is_fixed_link(np))
+ 		of_phy_deregister_fixed_link(np);
+ err_out_free_mdiobus:
+@@ -1957,14 +1964,17 @@ static void macb_reset_hw(struct macb *bp)
+ {
+ 	struct macb_queue *queue;
+ 	unsigned int q;
++	u32 ctrl = macb_readl(bp, NCR);
+ 
+ 	/* Disable RX and TX (XXX: Should we halt the transmission
+ 	 * more gracefully?)
+ 	 */
+-	macb_writel(bp, NCR, 0);
++	ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
+ 
+ 	/* Clear the stats registers (XXX: Update stats first?) */
+-	macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
++	ctrl |= MACB_BIT(CLRSTAT);
++
++	macb_writel(bp, NCR, ctrl);
+ 
+ 	/* Clear all status flags */
+ 	macb_writel(bp, TSR, -1);
+@@ -2152,7 +2162,7 @@ static void macb_init_hw(struct macb *bp)
+ 	}
+ 
+ 	/* Enable TX and RX */
+-	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
++	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
+ }
+ 
+ /* The hash address register is 64 bits long and takes up two
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index d318d35e598f..6fd7ea8074b0 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -3911,7 +3911,7 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
+ #define HCLGE_FUNC_NUMBER_PER_DESC 6
+ 	int i, j;
+ 
+-	for (i = 0; i < HCLGE_DESC_NUMBER; i++)
++	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
+ 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
+ 			if (desc[i].data[j])
+ 				return false;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+index 9f7932e423b5..6315e8ad8467 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+@@ -208,6 +208,8 @@ int hclge_mac_start_phy(struct hclge_dev *hdev)
+ 	if (!phydev)
+ 		return 0;
+ 
++	phydev->supported &= ~SUPPORTED_FIBRE;
++
+ 	ret = phy_connect_direct(netdev, phydev,
+ 				 hclge_mac_adjust_link,
+ 				 PHY_INTERFACE_MODE_SGMII);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+index 86478a6b99c5..c8c315eb5128 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+@@ -139,14 +139,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ 		      struct mlx5_wq_ctrl *wq_ctrl)
+ {
+ 	u32 sq_strides_offset;
++	u32 rq_pg_remainder;
+ 	int err;
+ 
+ 	mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
+ 		      MLX5_GET(qpc, qpc, log_rq_size),
+ 		      &wq->rq.fbc);
+ 
+-	sq_strides_offset =
+-		((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
++	rq_pg_remainder   = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
++	sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
+ 
+ 	mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
+ 			     MLX5_GET(qpc, qpc, log_sq_size),
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+index 4a519d8edec8..3500c79e29cd 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+@@ -433,6 +433,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
+ void
+ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
++void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
++				 struct net_device *dev);
+ 
+ /* spectrum_kvdl.c */
+ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+index 77b2adb29341..cb43d17097fa 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+@@ -6228,6 +6228,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
+ 	mlxsw_sp_vr_put(mlxsw_sp, vr);
+ }
+ 
++void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
++				 struct net_device *dev)
++{
++	struct mlxsw_sp_rif *rif;
++
++	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
++	if (!rif)
++		return;
++	mlxsw_sp_rif_destroy(rif);
++}
++
+ static void
+ mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
+ 				 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+index eea5666a86b2..6cb43dda8232 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -160,6 +160,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
+ 	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ }
+ 
++static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
++						    void *data)
++{
++	struct mlxsw_sp *mlxsw_sp = data;
++
++	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
++	return 0;
++}
++
++static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
++						struct net_device *dev)
++{
++	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
++	netdev_walk_all_upper_dev_rcu(dev,
++				      mlxsw_sp_bridge_device_upper_rif_destroy,
++				      mlxsw_sp);
++}
++
+ static struct mlxsw_sp_bridge_device *
+ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
+ 			      struct net_device *br_dev)
+@@ -198,6 +216,8 @@ static void
+ mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
+ 			       struct mlxsw_sp_bridge_device *bridge_device)
+ {
++	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
++					    bridge_device->dev);
+ 	list_del(&bridge_device->list);
+ 	if (bridge_device->vlan_enabled)
+ 		bridge->vlan_enabled_exists = false;
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+index d4c27f849f9b..c2a9e64bc57b 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+@@ -227,29 +227,16 @@ done:
+ 	spin_unlock_bh(&nn->reconfig_lock);
+ }
+ 
+-/**
+- * nfp_net_reconfig() - Reconfigure the firmware
+- * @nn:      NFP Net device to reconfigure
+- * @update:  The value for the update field in the BAR config
+- *
+- * Write the update word to the BAR and ping the reconfig queue.  The
+- * poll until the firmware has acknowledged the update by zeroing the
+- * update word.
+- *
+- * Return: Negative errno on error, 0 on success
+- */
+-int nfp_net_reconfig(struct nfp_net *nn, u32 update)
++static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
+ {
+ 	bool cancelled_timer = false;
+ 	u32 pre_posted_requests;
+-	int ret;
+ 
+ 	spin_lock_bh(&nn->reconfig_lock);
+ 
+ 	nn->reconfig_sync_present = true;
+ 
+ 	if (nn->reconfig_timer_active) {
+-		del_timer(&nn->reconfig_timer);
+ 		nn->reconfig_timer_active = false;
+ 		cancelled_timer = true;
+ 	}
+@@ -258,14 +245,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+ 
+ 	spin_unlock_bh(&nn->reconfig_lock);
+ 
+-	if (cancelled_timer)
++	if (cancelled_timer) {
++		del_timer_sync(&nn->reconfig_timer);
+ 		nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
++	}
+ 
+ 	/* Run the posted reconfigs which were issued before we started */
+ 	if (pre_posted_requests) {
+ 		nfp_net_reconfig_start(nn, pre_posted_requests);
+ 		nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
+ 	}
++}
++
++static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
++{
++	nfp_net_reconfig_sync_enter(nn);
++
++	spin_lock_bh(&nn->reconfig_lock);
++	nn->reconfig_sync_present = false;
++	spin_unlock_bh(&nn->reconfig_lock);
++}
++
++/**
++ * nfp_net_reconfig() - Reconfigure the firmware
++ * @nn:      NFP Net device to reconfigure
++ * @update:  The value for the update field in the BAR config
++ *
++ * Write the update word to the BAR and ping the reconfig queue.  The
++ * poll until the firmware has acknowledged the update by zeroing the
++ * update word.
++ *
++ * Return: Negative errno on error, 0 on success
++ */
++int nfp_net_reconfig(struct nfp_net *nn, u32 update)
++{
++	int ret;
++
++	nfp_net_reconfig_sync_enter(nn);
+ 
+ 	nfp_net_reconfig_start(nn, update);
+ 	ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
+@@ -3609,6 +3625,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
+  */
+ void nfp_net_free(struct nfp_net *nn)
+ {
++	WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
+ 	if (nn->dp.netdev)
+ 		free_netdev(nn->dp.netdev);
+ 	else
+@@ -3893,4 +3910,5 @@ void nfp_net_clean(struct nfp_net *nn)
+ 		return;
+ 
+ 	unregister_netdev(nn->dp.netdev);
++	nfp_net_reconfig_wait_posted(nn);
+ }
+diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+index 353f1c129af1..059ba9429e51 100644
+--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+@@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
+ 	return status;
+ }
+ 
+-static netdev_features_t qlge_fix_features(struct net_device *ndev,
+-	netdev_features_t features)
+-{
+-	int err;
+-
+-	/* Update the behavior of vlan accel in the adapter */
+-	err = qlge_update_hw_vlan_features(ndev, features);
+-	if (err)
+-		return err;
+-
+-	return features;
+-}
+-
+ static int qlge_set_features(struct net_device *ndev,
+ 	netdev_features_t features)
+ {
+ 	netdev_features_t changed = ndev->features ^ features;
++	int err;
++
++	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
++		/* Update the behavior of vlan accel in the adapter */
++		err = qlge_update_hw_vlan_features(ndev, features);
++		if (err)
++			return err;
+ 
+-	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ 		qlge_vlan_mode(ndev, features);
++	}
+ 
+ 	return 0;
+ }
+@@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = {
+ 	.ndo_set_mac_address	= qlge_set_mac_address,
+ 	.ndo_validate_addr	= eth_validate_addr,
+ 	.ndo_tx_timeout		= qlge_tx_timeout,
+-	.ndo_fix_features	= qlge_fix_features,
+ 	.ndo_set_features	= qlge_set_features,
+ 	.ndo_vlan_rx_add_vid	= qlge_vlan_rx_add_vid,
+ 	.ndo_vlan_rx_kill_vid	= qlge_vlan_rx_kill_vid,
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 9ceb34bac3a9..e5eb361b973c 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -303,6 +303,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8161), 0, 0, RTL_CFG_1 },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8167), 0, 0, RTL_CFG_0 },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8168), 0, 0, RTL_CFG_1 },
++	{ PCI_DEVICE(PCI_VENDOR_ID_NCUBE,	0x8168), 0, 0, RTL_CFG_1 },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8169), 0, 0, RTL_CFG_0 },
+ 	{ PCI_VENDOR_ID_DLINK,			0x4300,
+ 		PCI_VENDOR_ID_DLINK, 0x4b10,		 0, 0, RTL_CFG_1 },
+@@ -5038,7 +5039,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
+ 	rtl_hw_reset(tp);
+ }
+ 
+-static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
++static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
+ {
+ 	/* Set DMA burst size and Interframe Gap Time */
+ 	RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
+@@ -5149,12 +5150,14 @@ static void rtl_hw_start(struct  rtl8169_private *tp)
+ 
+ 	rtl_set_rx_max_size(tp);
+ 	rtl_set_rx_tx_desc_registers(tp);
+-	rtl_set_rx_tx_config_registers(tp);
++	rtl_set_tx_config_registers(tp);
+ 	RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ 
+ 	/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
+ 	RTL_R8(tp, IntrMask);
+ 	RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
++	rtl_init_rxcfg(tp);
++
+ 	rtl_set_rx_mode(tp->dev);
+ 	/* no early-rx interrupts */
+ 	RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+index 76649adf8fb0..c0a855b7ab3b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+@@ -112,7 +112,6 @@ struct stmmac_priv {
+ 	u32 tx_count_frames;
+ 	u32 tx_coal_frames;
+ 	u32 tx_coal_timer;
+-	bool tx_timer_armed;
+ 
+ 	int tx_coalesce;
+ 	int hwts_tx_en;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index ef6a8d39db2f..c579d98b9666 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -3126,16 +3126,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	 * element in case of no SG.
+ 	 */
+ 	priv->tx_count_frames += nfrags + 1;
+-	if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
+-	    !priv->tx_timer_armed) {
++	if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ 		mod_timer(&priv->txtimer,
+ 			  STMMAC_COAL_TIMER(priv->tx_coal_timer));
+-		priv->tx_timer_armed = true;
+ 	} else {
+ 		priv->tx_count_frames = 0;
+ 		stmmac_set_tx_ic(priv, desc);
+ 		priv->xstats.tx_set_ic_bit++;
+-		priv->tx_timer_armed = false;
+ 	}
+ 
+ 	skb_tx_timestamp(skb);
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index dd1d6e115145..6d74cde68163 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -29,6 +29,7 @@
+ #include <linux/netdevice.h>
+ #include <linux/inetdevice.h>
+ #include <linux/etherdevice.h>
++#include <linux/pci.h>
+ #include <linux/skbuff.h>
+ #include <linux/if_vlan.h>
+ #include <linux/in.h>
+@@ -1939,12 +1940,16 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
+ {
+ 	struct net_device *ndev;
+ 	struct net_device_context *net_device_ctx;
++	struct device *pdev = vf_netdev->dev.parent;
+ 	struct netvsc_device *netvsc_dev;
+ 	int ret;
+ 
+ 	if (vf_netdev->addr_len != ETH_ALEN)
+ 		return NOTIFY_DONE;
+ 
++	if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev))
++		return NOTIFY_DONE;
++
+ 	/*
+ 	 * We will use the MAC address to locate the synthetic interface to
+ 	 * associate with the VF interface. If we don't find a matching
+@@ -2101,6 +2106,16 @@ static int netvsc_probe(struct hv_device *dev,
+ 
+ 	memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+ 
++	/* We must get rtnl lock before scheduling nvdev->subchan_work,
++	 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
++	 * all subchannels to show up, but that may not happen because
++	 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
++	 * -> ... -> device_add() -> ... -> __device_attach() can't get
++	 * the device lock, so all the subchannels can't be processed --
++	 * finally netvsc_subchan_work() hangs for ever.
++	 */
++	rtnl_lock();
++
+ 	if (nvdev->num_chn > 1)
+ 		schedule_work(&nvdev->subchan_work);
+ 
+@@ -2119,7 +2134,6 @@ static int netvsc_probe(struct hv_device *dev,
+ 	else
+ 		net->max_mtu = ETH_DATA_LEN;
+ 
+-	rtnl_lock();
+ 	ret = register_netdevice(net);
+ 	if (ret != 0) {
+ 		pr_err("Unable to register netdev.\n");
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 2a58607a6aea..1b07bb5e110d 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -5214,8 +5214,8 @@ static int rtl8152_probe(struct usb_interface *intf,
+ 		netdev->hw_features &= ~NETIF_F_RXCSUM;
+ 	}
+ 
+-	if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
+-	    udev->serial && !strcmp(udev->serial, "000001000000")) {
++	if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
++	    (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) {
+ 		dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
+ 		set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
+ 	}
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+index b6122aad639e..7569f9af8d47 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+@@ -6926,15 +6926,15 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
+ 	cfg->d11inf.io_type = (u8)io_type;
+ 	brcmu_d11_attach(&cfg->d11inf);
+ 
+-	err = brcmf_setup_wiphy(wiphy, ifp);
+-	if (err < 0)
+-		goto priv_out;
+-
+ 	/* regulatory notifer below needs access to cfg so
+ 	 * assign it now.
+ 	 */
+ 	drvr->config = cfg;
+ 
++	err = brcmf_setup_wiphy(wiphy, ifp);
++	if (err < 0)
++		goto priv_out;
++
+ 	brcmf_dbg(INFO, "Registering custom regulatory\n");
+ 	wiphy->reg_notifier = brcmf_cfg80211_reg_notifier;
+ 	wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
+index 23e270839e6a..f00df2384985 100644
+--- a/drivers/pci/controller/pci-mvebu.c
++++ b/drivers/pci/controller/pci-mvebu.c
+@@ -1219,7 +1219,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
+ 		pcie->realio.start = PCIBIOS_MIN_IO;
+ 		pcie->realio.end = min_t(resource_size_t,
+ 					 IO_SPACE_LIMIT,
+-					 resource_size(&pcie->io));
++					 resource_size(&pcie->io) - 1);
+ 	} else
+ 		pcie->realio = pcie->io;
+ 
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index b2857865c0aa..a1a243ee36bb 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1725,7 +1725,7 @@ int pci_setup_device(struct pci_dev *dev)
+ static void pci_configure_mps(struct pci_dev *dev)
+ {
+ 	struct pci_dev *bridge = pci_upstream_bridge(dev);
+-	int mps, p_mps, rc;
++	int mps, mpss, p_mps, rc;
+ 
+ 	if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
+ 		return;
+@@ -1753,6 +1753,14 @@ static void pci_configure_mps(struct pci_dev *dev)
+ 	if (pcie_bus_config != PCIE_BUS_DEFAULT)
+ 		return;
+ 
++	mpss = 128 << dev->pcie_mpss;
++	if (mpss < p_mps && pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
++		pcie_set_mps(bridge, mpss);
++		pci_info(dev, "Upstream bridge's Max Payload Size set to %d (was %d, max %d)\n",
++			 mpss, p_mps, 128 << bridge->pcie_mpss);
++		p_mps = pcie_get_mps(bridge);
++	}
++
+ 	rc = pcie_set_mps(dev, p_mps);
+ 	if (rc) {
+ 		pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+@@ -1761,7 +1769,7 @@ static void pci_configure_mps(struct pci_dev *dev)
+ 	}
+ 
+ 	pci_info(dev, "Max Payload Size set to %d (was %d, max %d)\n",
+-		 p_mps, mps, 128 << dev->pcie_mpss);
++		 p_mps, mps, mpss);
+ }
+ 
+ static struct hpp_type0 pci_default_type0 = {
+diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
+index a52779f33ad4..afd0b533c40a 100644
+--- a/drivers/pinctrl/pinctrl-axp209.c
++++ b/drivers/pinctrl/pinctrl-axp209.c
+@@ -316,7 +316,7 @@ static const struct pinctrl_ops axp20x_pctrl_ops = {
+ 	.get_group_pins		= axp20x_group_pins,
+ };
+ 
+-static void axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
++static int axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
+ 					  unsigned int mask_len,
+ 					  struct axp20x_pinctrl_function *func,
+ 					  const struct pinctrl_pin_desc *pins)
+@@ -331,18 +331,22 @@ static void axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
+ 		func->groups = devm_kcalloc(dev,
+ 					    ngroups, sizeof(const char *),
+ 					    GFP_KERNEL);
++		if (!func->groups)
++			return -ENOMEM;
+ 		group = func->groups;
+ 		for_each_set_bit(bit, &mask_cpy, mask_len) {
+ 			*group = pins[bit].name;
+ 			group++;
+ 		}
+ 	}
++
++	return 0;
+ }
+ 
+-static void axp20x_build_funcs_groups(struct platform_device *pdev)
++static int axp20x_build_funcs_groups(struct platform_device *pdev)
+ {
+ 	struct axp20x_pctl *pctl = platform_get_drvdata(pdev);
+-	int i, pin, npins = pctl->desc->npins;
++	int i, ret, pin, npins = pctl->desc->npins;
+ 
+ 	pctl->funcs[AXP20X_FUNC_GPIO_OUT].name = "gpio_out";
+ 	pctl->funcs[AXP20X_FUNC_GPIO_OUT].muxval = AXP20X_MUX_GPIO_OUT;
+@@ -366,13 +370,19 @@ static void axp20x_build_funcs_groups(struct platform_device *pdev)
+ 			pctl->funcs[i].groups[pin] = pctl->desc->pins[pin].name;
+ 	}
+ 
+-	axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->ldo_mask,
++	ret = axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->ldo_mask,
+ 				      npins, &pctl->funcs[AXP20X_FUNC_LDO],
+ 				      pctl->desc->pins);
++	if (ret)
++		return ret;
+ 
+-	axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->adc_mask,
++	ret = axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->adc_mask,
+ 				      npins, &pctl->funcs[AXP20X_FUNC_ADC],
+ 				      pctl->desc->pins);
++	if (ret)
++		return ret;
++
++	return 0;
+ }
+ 
+ static const struct of_device_id axp20x_pctl_match[] = {
+@@ -424,7 +434,11 @@ static int axp20x_pctl_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, pctl);
+ 
+-	axp20x_build_funcs_groups(pdev);
++	ret = axp20x_build_funcs_groups(pdev);
++	if (ret) {
++		dev_err(&pdev->dev, "failed to build groups\n");
++		return ret;
++	}
+ 
+ 	pctrl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctrl_desc), GFP_KERNEL);
+ 	if (!pctrl_desc)
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index 136ff2b4cce5..db2af09067db 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -496,6 +496,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
+ 	{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
+ 	{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
+ 	{ KE_IGNORE, 0xC6, },  /* Ambient Light Sensor notification */
++	{ KE_KEY, 0xFA, { KEY_PROG2 } },           /* Lid flip action */
+ 	{ KE_END, 0},
+ };
+ 
+diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
+index b5b890127479..b7dfe06261f1 100644
+--- a/drivers/platform/x86/intel_punit_ipc.c
++++ b/drivers/platform/x86/intel_punit_ipc.c
+@@ -17,6 +17,7 @@
+ #include <linux/bitops.h>
+ #include <linux/device.h>
+ #include <linux/interrupt.h>
++#include <linux/io.h>
+ #include <linux/platform_device.h>
+ #include <asm/intel_punit_ipc.h>
+ 
+diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
+index 822860b4801a..c1ed641b3e26 100644
+--- a/drivers/pwm/pwm-meson.c
++++ b/drivers/pwm/pwm-meson.c
+@@ -458,7 +458,6 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
+ 				   struct meson_pwm_channel *channels)
+ {
+ 	struct device *dev = meson->chip.dev;
+-	struct device_node *np = dev->of_node;
+ 	struct clk_init_data init;
+ 	unsigned int i;
+ 	char name[255];
+@@ -467,7 +466,7 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
+ 	for (i = 0; i < meson->chip.npwm; i++) {
+ 		struct meson_pwm_channel *channel = &channels[i];
+ 
+-		snprintf(name, sizeof(name), "%pOF#mux%u", np, i);
++		snprintf(name, sizeof(name), "%s#mux%u", dev_name(dev), i);
+ 
+ 		init.name = name;
+ 		init.ops = &clk_mux_ops;
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
+index bbf95b78ef5d..43e3398c9268 100644
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -1780,6 +1780,9 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
+ 	struct dasd_eckd_private *private = device->private;
+ 	int i;
+ 
++	if (!private)
++		return;
++
+ 	dasd_alias_disconnect_device_from_lcu(device);
+ 	private->ned = NULL;
+ 	private->sneq = NULL;
+@@ -2035,8 +2038,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device)
+ 
+ static int dasd_eckd_online_to_ready(struct dasd_device *device)
+ {
+-	cancel_work_sync(&device->reload_device);
+-	cancel_work_sync(&device->kick_validate);
++	if (cancel_work_sync(&device->reload_device))
++		dasd_put_device(device);
++	if (cancel_work_sync(&device->kick_validate))
++		dasd_put_device(device);
++
+ 	return 0;
+ };
+ 
+diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
+index 80e5b283fd81..1391e5f35918 100644
+--- a/drivers/scsi/aic94xx/aic94xx_init.c
++++ b/drivers/scsi/aic94xx/aic94xx_init.c
+@@ -1030,8 +1030,10 @@ static int __init aic94xx_init(void)
+ 
+ 	aic94xx_transport_template =
+ 		sas_domain_attach_transport(&aic94xx_transport_functions);
+-	if (!aic94xx_transport_template)
++	if (!aic94xx_transport_template) {
++		err = -ENOMEM;
+ 		goto out_destroy_caches;
++	}
+ 
+ 	err = pci_register_driver(&aic94xx_pci_driver);
+ 	if (err)
+diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+index e40a2c0a9543..d3da39a9f567 100644
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
+@@ -5446,11 +5446,11 @@ static int ni_E_init(struct comedi_device *dev,
+ 	/* Digital I/O (PFI) subdevice */
+ 	s = &dev->subdevices[NI_PFI_DIO_SUBDEV];
+ 	s->type		= COMEDI_SUBD_DIO;
+-	s->subdev_flags	= SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
+ 	s->maxdata	= 1;
+ 	if (devpriv->is_m_series) {
+ 		s->n_chan	= 16;
+ 		s->insn_bits	= ni_pfi_insn_bits;
++		s->subdev_flags	= SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
+ 
+ 		ni_writew(dev, s->state, NI_M_PFI_DO_REG);
+ 		for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) {
+@@ -5459,6 +5459,7 @@ static int ni_E_init(struct comedi_device *dev,
+ 		}
+ 	} else {
+ 		s->n_chan	= 10;
++		s->subdev_flags	= SDF_INTERNAL;
+ 	}
+ 	s->insn_config	= ni_pfi_insn_config;
+ 
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index ed3114556fda..560ed8711706 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -951,7 +951,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
+ 	list_for_each_entry_safe(node, n, &d->pending_list, node) {
+ 		struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
+ 		if (msg->iova <= vq_msg->iova &&
+-		    msg->iova + msg->size - 1 > vq_msg->iova &&
++		    msg->iova + msg->size - 1 >= vq_msg->iova &&
+ 		    vq_msg->type == VHOST_IOTLB_MISS) {
+ 			vhost_poll_queue(&node->vq->poll);
+ 			list_del(&node->node);
+diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
+index 2780886e8ba3..de062fb201bc 100644
+--- a/drivers/virtio/virtio_pci_legacy.c
++++ b/drivers/virtio/virtio_pci_legacy.c
+@@ -122,6 +122,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
+ 	struct virtqueue *vq;
+ 	u16 num;
+ 	int err;
++	u64 q_pfn;
+ 
+ 	/* Select the queue we're interested in */
+ 	iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+@@ -141,9 +142,17 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
+ 	if (!vq)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
++	if (q_pfn >> 32) {
++		dev_err(&vp_dev->pci_dev->dev,
++			"platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
++			0x1ULL << (32 + PAGE_SHIFT - 30));
++		err = -E2BIG;
++		goto out_del_vq;
++	}
++
+ 	/* activate the queue */
+-	iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
+-		  vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
++	iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+ 
+ 	vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
+ 
+@@ -160,6 +169,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
+ 
+ out_deactivate:
+ 	iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
++out_del_vq:
+ 	vring_del_virtqueue(vq);
+ 	return ERR_PTR(err);
+ }
+diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
+index b437fccd4e62..294f35ce9e46 100644
+--- a/drivers/xen/xen-balloon.c
++++ b/drivers/xen/xen-balloon.c
+@@ -81,7 +81,7 @@ static void watch_target(struct xenbus_watch *watch,
+ 			static_max = new_target;
+ 		else
+ 			static_max >>= PAGE_SHIFT - 10;
+-		target_diff = xen_pv_domain() ? 0
++		target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0
+ 				: static_max - balloon_stats.target_pages;
+ 	}
+ 
+diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
+index a3fdb4fe967d..daf45472bef9 100644
+--- a/fs/btrfs/check-integrity.c
++++ b/fs/btrfs/check-integrity.c
+@@ -1539,7 +1539,12 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
+ 	}
+ 
+ 	device = multi->stripes[0].dev;
+-	block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev->bd_dev);
++	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) ||
++	    !device->bdev || !device->name)
++		block_ctx_out->dev = NULL;
++	else
++		block_ctx_out->dev = btrfsic_dev_state_lookup(
++							device->bdev->bd_dev);
+ 	block_ctx_out->dev_bytenr = multi->stripes[0].physical;
+ 	block_ctx_out->start = bytenr;
+ 	block_ctx_out->len = len;
+diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
+index e2ba0419297a..d20b244623f2 100644
+--- a/fs/btrfs/dev-replace.c
++++ b/fs/btrfs/dev-replace.c
+@@ -676,6 +676,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
+ 
+ 	btrfs_rm_dev_replace_unblocked(fs_info);
+ 
++	/*
++	 * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will
++	 * update on-disk dev stats value during commit transaction
++	 */
++	atomic_inc(&tgt_device->dev_stats_ccnt);
++
+ 	/*
+ 	 * this is again a consistent state where no dev_replace procedure
+ 	 * is running, the target device is part of the filesystem, the
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 8aab7a6c1e58..53cac20650d8 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -10687,7 +10687,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
+ 		/* Don't want to race with allocators so take the groups_sem */
+ 		down_write(&space_info->groups_sem);
+ 		spin_lock(&block_group->lock);
+-		if (block_group->reserved ||
++		if (block_group->reserved || block_group->pinned ||
+ 		    btrfs_block_group_used(&block_group->item) ||
+ 		    block_group->ro ||
+ 		    list_is_singular(&block_group->list)) {
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 879b76fa881a..be94c65bb4d2 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1321,18 +1321,19 @@ static void __del_reloc_root(struct btrfs_root *root)
+ 	struct mapping_node *node = NULL;
+ 	struct reloc_control *rc = fs_info->reloc_ctl;
+ 
+-	spin_lock(&rc->reloc_root_tree.lock);
+-	rb_node = tree_search(&rc->reloc_root_tree.rb_root,
+-			      root->node->start);
+-	if (rb_node) {
+-		node = rb_entry(rb_node, struct mapping_node, rb_node);
+-		rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
++	if (rc) {
++		spin_lock(&rc->reloc_root_tree.lock);
++		rb_node = tree_search(&rc->reloc_root_tree.rb_root,
++				      root->node->start);
++		if (rb_node) {
++			node = rb_entry(rb_node, struct mapping_node, rb_node);
++			rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
++		}
++		spin_unlock(&rc->reloc_root_tree.lock);
++		if (!node)
++			return;
++		BUG_ON((struct btrfs_root *)node->data != root);
+ 	}
+-	spin_unlock(&rc->reloc_root_tree.lock);
+-
+-	if (!node)
+-		return;
+-	BUG_ON((struct btrfs_root *)node->data != root);
+ 
+ 	spin_lock(&fs_info->trans_lock);
+ 	list_del_init(&root->root_list);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index bddfc28b27c0..9b25f29d0e73 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -892,6 +892,8 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
+ 	char *device_name, *opts, *orig, *p;
+ 	int error = 0;
+ 
++	lockdep_assert_held(&uuid_mutex);
++
+ 	if (!options)
+ 		return 0;
+ 
+@@ -1526,12 +1528,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
+ 	if (!(flags & SB_RDONLY))
+ 		mode |= FMODE_WRITE;
+ 
+-	error = btrfs_parse_early_options(data, mode, fs_type,
+-					  &fs_devices);
+-	if (error) {
+-		return ERR_PTR(error);
+-	}
+-
+ 	security_init_mnt_opts(&new_sec_opts);
+ 	if (data) {
+ 		error = parse_security_options(data, &new_sec_opts);
+@@ -1539,10 +1535,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
+ 			return ERR_PTR(error);
+ 	}
+ 
+-	error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
+-	if (error)
+-		goto error_sec_opts;
+-
+ 	/*
+ 	 * Setup a dummy root and fs_info for test/set super.  This is because
+ 	 * we don't actually fill this stuff out until open_ctree, but we need
+@@ -1555,8 +1547,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
+ 		goto error_sec_opts;
+ 	}
+ 
+-	fs_info->fs_devices = fs_devices;
+-
+ 	fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
+ 	fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL);
+ 	security_init_mnt_opts(&fs_info->security_opts);
+@@ -1565,7 +1555,23 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
+ 		goto error_fs_info;
+ 	}
+ 
++	mutex_lock(&uuid_mutex);
++	error = btrfs_parse_early_options(data, mode, fs_type, &fs_devices);
++	if (error) {
++		mutex_unlock(&uuid_mutex);
++		goto error_fs_info;
++	}
++
++	error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
++	if (error) {
++		mutex_unlock(&uuid_mutex);
++		goto error_fs_info;
++	}
++
++	fs_info->fs_devices = fs_devices;
++
+ 	error = btrfs_open_devices(fs_devices, mode, fs_type);
++	mutex_unlock(&uuid_mutex);
+ 	if (error)
+ 		goto error_fs_info;
+ 
+@@ -2234,15 +2240,21 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
+ 
+ 	switch (cmd) {
+ 	case BTRFS_IOC_SCAN_DEV:
++		mutex_lock(&uuid_mutex);
+ 		ret = btrfs_scan_one_device(vol->name, FMODE_READ,
+ 					    &btrfs_root_fs_type, &fs_devices);
++		mutex_unlock(&uuid_mutex);
+ 		break;
+ 	case BTRFS_IOC_DEVICES_READY:
++		mutex_lock(&uuid_mutex);
+ 		ret = btrfs_scan_one_device(vol->name, FMODE_READ,
+ 					    &btrfs_root_fs_type, &fs_devices);
+-		if (ret)
++		if (ret) {
++			mutex_unlock(&uuid_mutex);
+ 			break;
++		}
+ 		ret = !(fs_devices->num_devices == fs_devices->total_devices);
++		mutex_unlock(&uuid_mutex);
+ 		break;
+ 	case BTRFS_IOC_GET_SUPPORTED_FEATURES:
+ 		ret = btrfs_ioctl_get_supported_features((void __user*)arg);
+@@ -2368,7 +2380,7 @@ static __cold void btrfs_interface_exit(void)
+ 
+ static void __init btrfs_print_mod_info(void)
+ {
+-	pr_info("Btrfs loaded, crc32c=%s"
++	static const char options[] = ""
+ #ifdef CONFIG_BTRFS_DEBUG
+ 			", debug=on"
+ #endif
+@@ -2381,8 +2393,8 @@ static void __init btrfs_print_mod_info(void)
+ #ifdef CONFIG_BTRFS_FS_REF_VERIFY
+ 			", ref-verify=on"
+ #endif
+-			"\n",
+-			crc32c_impl());
++			;
++	pr_info("Btrfs loaded, crc32c=%s%s\n", crc32c_impl(), options);
+ }
+ 
+ static int __init init_btrfs_fs(void)
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 8d40e7dd8c30..d014af352ce0 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -396,9 +396,22 @@ static int check_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf,
+ 	 * skip this check for relocation trees.
+ 	 */
+ 	if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
++		u64 owner = btrfs_header_owner(leaf);
+ 		struct btrfs_root *check_root;
+ 
+-		key.objectid = btrfs_header_owner(leaf);
++		/* These trees must never be empty */
++		if (owner == BTRFS_ROOT_TREE_OBJECTID ||
++		    owner == BTRFS_CHUNK_TREE_OBJECTID ||
++		    owner == BTRFS_EXTENT_TREE_OBJECTID ||
++		    owner == BTRFS_DEV_TREE_OBJECTID ||
++		    owner == BTRFS_FS_TREE_OBJECTID ||
++		    owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
++			generic_err(fs_info, leaf, 0,
++			"invalid root, root %llu must never be empty",
++				    owner);
++			return -EUCLEAN;
++		}
++		key.objectid = owner;
+ 		key.type = BTRFS_ROOT_ITEM_KEY;
+ 		key.offset = (u64)-1;
+ 
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 1da162928d1a..5304b8d6ceb8 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -634,44 +634,48 @@ static void pending_bios_fn(struct btrfs_work *work)
+  *		devices.
+  */
+ static void btrfs_free_stale_devices(const char *path,
+-				     struct btrfs_device *skip_dev)
++				     struct btrfs_device *skip_device)
+ {
+-	struct btrfs_fs_devices *fs_devs, *tmp_fs_devs;
+-	struct btrfs_device *dev, *tmp_dev;
++	struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
++	struct btrfs_device *device, *tmp_device;
+ 
+-	list_for_each_entry_safe(fs_devs, tmp_fs_devs, &fs_uuids, fs_list) {
+-
+-		if (fs_devs->opened)
++	list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
++		mutex_lock(&fs_devices->device_list_mutex);
++		if (fs_devices->opened) {
++			mutex_unlock(&fs_devices->device_list_mutex);
+ 			continue;
++		}
+ 
+-		list_for_each_entry_safe(dev, tmp_dev,
+-					 &fs_devs->devices, dev_list) {
++		list_for_each_entry_safe(device, tmp_device,
++					 &fs_devices->devices, dev_list) {
+ 			int not_found = 0;
+ 
+-			if (skip_dev && skip_dev == dev)
++			if (skip_device && skip_device == device)
+ 				continue;
+-			if (path && !dev->name)
++			if (path && !device->name)
+ 				continue;
+ 
+ 			rcu_read_lock();
+ 			if (path)
+-				not_found = strcmp(rcu_str_deref(dev->name),
++				not_found = strcmp(rcu_str_deref(device->name),
+ 						   path);
+ 			rcu_read_unlock();
+ 			if (not_found)
+ 				continue;
+ 
+ 			/* delete the stale device */
+-			if (fs_devs->num_devices == 1) {
+-				btrfs_sysfs_remove_fsid(fs_devs);
+-				list_del(&fs_devs->fs_list);
+-				free_fs_devices(fs_devs);
++			fs_devices->num_devices--;
++			list_del(&device->dev_list);
++			btrfs_free_device(device);
++
++			if (fs_devices->num_devices == 0)
+ 				break;
+-			} else {
+-				fs_devs->num_devices--;
+-				list_del(&dev->dev_list);
+-				btrfs_free_device(dev);
+-			}
++		}
++		mutex_unlock(&fs_devices->device_list_mutex);
++		if (fs_devices->num_devices == 0) {
++			btrfs_sysfs_remove_fsid(fs_devices);
++			list_del(&fs_devices->fs_list);
++			free_fs_devices(fs_devices);
+ 		}
+ 	}
+ }
+@@ -750,7 +754,8 @@ error_brelse:
+  * error pointer when failed
+  */
+ static noinline struct btrfs_device *device_list_add(const char *path,
+-			   struct btrfs_super_block *disk_super)
++			   struct btrfs_super_block *disk_super,
++			   bool *new_device_added)
+ {
+ 	struct btrfs_device *device;
+ 	struct btrfs_fs_devices *fs_devices;
+@@ -764,21 +769,26 @@ static noinline struct btrfs_device *device_list_add(const char *path,
+ 		if (IS_ERR(fs_devices))
+ 			return ERR_CAST(fs_devices);
+ 
++		mutex_lock(&fs_devices->device_list_mutex);
+ 		list_add(&fs_devices->fs_list, &fs_uuids);
+ 
+ 		device = NULL;
+ 	} else {
++		mutex_lock(&fs_devices->device_list_mutex);
+ 		device = find_device(fs_devices, devid,
+ 				disk_super->dev_item.uuid);
+ 	}
+ 
+ 	if (!device) {
+-		if (fs_devices->opened)
++		if (fs_devices->opened) {
++			mutex_unlock(&fs_devices->device_list_mutex);
+ 			return ERR_PTR(-EBUSY);
++		}
+ 
+ 		device = btrfs_alloc_device(NULL, &devid,
+ 					    disk_super->dev_item.uuid);
+ 		if (IS_ERR(device)) {
++			mutex_unlock(&fs_devices->device_list_mutex);
+ 			/* we can safely leave the fs_devices entry around */
+ 			return device;
+ 		}
+@@ -786,17 +796,16 @@ static noinline struct btrfs_device *device_list_add(const char *path,
+ 		name = rcu_string_strdup(path, GFP_NOFS);
+ 		if (!name) {
+ 			btrfs_free_device(device);
++			mutex_unlock(&fs_devices->device_list_mutex);
+ 			return ERR_PTR(-ENOMEM);
+ 		}
+ 		rcu_assign_pointer(device->name, name);
+ 
+-		mutex_lock(&fs_devices->device_list_mutex);
+ 		list_add_rcu(&device->dev_list, &fs_devices->devices);
+ 		fs_devices->num_devices++;
+-		mutex_unlock(&fs_devices->device_list_mutex);
+ 
+ 		device->fs_devices = fs_devices;
+-		btrfs_free_stale_devices(path, device);
++		*new_device_added = true;
+ 
+ 		if (disk_super->label[0])
+ 			pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
+@@ -840,12 +849,15 @@ static noinline struct btrfs_device *device_list_add(const char *path,
+ 			 * with larger generation number or the last-in if
+ 			 * generation are equal.
+ 			 */
++			mutex_unlock(&fs_devices->device_list_mutex);
+ 			return ERR_PTR(-EEXIST);
+ 		}
+ 
+ 		name = rcu_string_strdup(path, GFP_NOFS);
+-		if (!name)
++		if (!name) {
++			mutex_unlock(&fs_devices->device_list_mutex);
+ 			return ERR_PTR(-ENOMEM);
++		}
+ 		rcu_string_free(device->name);
+ 		rcu_assign_pointer(device->name, name);
+ 		if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
+@@ -865,6 +877,7 @@ static noinline struct btrfs_device *device_list_add(const char *path,
+ 
+ 	fs_devices->total_devices = btrfs_super_num_devices(disk_super);
+ 
++	mutex_unlock(&fs_devices->device_list_mutex);
+ 	return device;
+ }
+ 
+@@ -1146,7 +1159,8 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
+ {
+ 	int ret;
+ 
+-	mutex_lock(&uuid_mutex);
++	lockdep_assert_held(&uuid_mutex);
++
+ 	mutex_lock(&fs_devices->device_list_mutex);
+ 	if (fs_devices->opened) {
+ 		fs_devices->opened++;
+@@ -1156,7 +1170,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
+ 		ret = open_fs_devices(fs_devices, flags, holder);
+ 	}
+ 	mutex_unlock(&fs_devices->device_list_mutex);
+-	mutex_unlock(&uuid_mutex);
+ 
+ 	return ret;
+ }
+@@ -1221,12 +1234,15 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
+ 			  struct btrfs_fs_devices **fs_devices_ret)
+ {
+ 	struct btrfs_super_block *disk_super;
++	bool new_device_added = false;
+ 	struct btrfs_device *device;
+ 	struct block_device *bdev;
+ 	struct page *page;
+ 	int ret = 0;
+ 	u64 bytenr;
+ 
++	lockdep_assert_held(&uuid_mutex);
++
+ 	/*
+ 	 * we would like to check all the supers, but that would make
+ 	 * a btrfs mount succeed after a mkfs from a different FS.
+@@ -1245,13 +1261,14 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
+ 		goto error_bdev_put;
+ 	}
+ 
+-	mutex_lock(&uuid_mutex);
+-	device = device_list_add(path, disk_super);
+-	if (IS_ERR(device))
++	device = device_list_add(path, disk_super, &new_device_added);
++	if (IS_ERR(device)) {
+ 		ret = PTR_ERR(device);
+-	else
++	} else {
+ 		*fs_devices_ret = device->fs_devices;
+-	mutex_unlock(&uuid_mutex);
++		if (new_device_added)
++			btrfs_free_stale_devices(path, device);
++	}
+ 
+ 	btrfs_release_disk_super(page);
+ 
+@@ -2029,6 +2046,9 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
+ 
+ 	cur_devices->num_devices--;
+ 	cur_devices->total_devices--;
++	/* Update total_devices of the parent fs_devices if it's seed */
++	if (cur_devices != fs_devices)
++		fs_devices->total_devices--;
+ 
+ 	if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
+ 		cur_devices->missing_devices--;
+@@ -6563,10 +6583,14 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
+ 	write_lock(&map_tree->map_tree.lock);
+ 	ret = add_extent_mapping(&map_tree->map_tree, em, 0);
+ 	write_unlock(&map_tree->map_tree.lock);
+-	BUG_ON(ret); /* Tree corruption */
++	if (ret < 0) {
++		btrfs_err(fs_info,
++			  "failed to add chunk map, start=%llu len=%llu: %d",
++			  em->start, em->len, ret);
++	}
+ 	free_extent_map(em);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static void fill_device_from_item(struct extent_buffer *leaf,
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index 991bfb271908..b20297988fe0 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -383,6 +383,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
+ 		atomic_set(&totBufAllocCount, 0);
+ 		atomic_set(&totSmBufAllocCount, 0);
+ #endif /* CONFIG_CIFS_STATS2 */
++		spin_lock(&GlobalMid_Lock);
++		GlobalMaxActiveXid = 0;
++		GlobalCurrentXid = 0;
++		spin_unlock(&GlobalMid_Lock);
+ 		spin_lock(&cifs_tcp_ses_lock);
+ 		list_for_each(tmp1, &cifs_tcp_ses_list) {
+ 			server = list_entry(tmp1, struct TCP_Server_Info,
+@@ -395,6 +399,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
+ 							  struct cifs_tcon,
+ 							  tcon_list);
+ 					atomic_set(&tcon->num_smbs_sent, 0);
++					spin_lock(&tcon->stat_lock);
++					tcon->bytes_read = 0;
++					tcon->bytes_written = 0;
++					spin_unlock(&tcon->stat_lock);
+ 					if (server->ops->clear_stats)
+ 						server->ops->clear_stats(tcon);
+ 				}
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 5df2c0698cda..9d02563b2147 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3031,11 +3031,15 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
+ 	}
+ 
+ #ifdef CONFIG_CIFS_SMB311
+-	if ((volume_info->linux_ext) && (ses->server->posix_ext_supported)) {
+-		if (ses->server->vals->protocol_id == SMB311_PROT_ID) {
++	if (volume_info->linux_ext) {
++		if (ses->server->posix_ext_supported) {
+ 			tcon->posix_extensions = true;
+ 			printk_once(KERN_WARNING
+ 				"SMB3.11 POSIX Extensions are experimental\n");
++		} else {
++			cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions.\n");
++			rc = -EOPNOTSUPP;
++			goto out_fail;
+ 		}
+ 	}
+ #endif /* 311 */
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index 3ff7cec2da81..239215dcc00b 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -240,6 +240,13 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
+ 		if (clc_len == len + 1)
+ 			return 0;
+ 
++		/*
++		 * Some windows servers (win2016) will pad also the final
++		 * PDU in a compound to 8 bytes.
++		 */
++		if (((clc_len + 7) & ~7) == len)
++			return 0;
++
+ 		/*
+ 		 * MacOS server pads after SMB2.1 write response with 3 bytes
+ 		 * of junk. Other servers match RFC1001 len to actual
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index ffce77e00a58..44e511a35559 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -360,7 +360,7 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+ 		       total_len);
+ 
+ 	if (tcon != NULL) {
+-#ifdef CONFIG_CIFS_STATS2
++#ifdef CONFIG_CIFS_STATS
+ 		uint16_t com_code = le16_to_cpu(smb2_command);
+ 		cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
+ #endif
+@@ -1928,7 +1928,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ {
+ 	struct smb_rqst rqst;
+ 	struct smb2_create_req *req;
+-	struct smb2_create_rsp *rsp;
++	struct smb2_create_rsp *rsp = NULL;
+ 	struct TCP_Server_Info *server;
+ 	struct cifs_ses *ses = tcon->ses;
+ 	struct kvec iov[3]; /* make sure at least one for each open context */
+@@ -1943,27 +1943,31 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ 	char *pc_buf = NULL;
+ 	int flags = 0;
+ 	unsigned int total_len;
+-	__le16 *path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+-
+-	if (!path)
+-		return -ENOMEM;
++	__le16 *utf16_path = NULL;
+ 
+ 	cifs_dbg(FYI, "mkdir\n");
+ 
++	/* resource #1: path allocation */
++	utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
++	if (!utf16_path)
++		return -ENOMEM;
++
+ 	if (ses && (ses->server))
+ 		server = ses->server;
+-	else
+-		return -EIO;
++	else {
++		rc = -EIO;
++		goto err_free_path;
++	}
+ 
++	/* resource #2: request */
+ 	rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
+-
+ 	if (rc)
+-		return rc;
++		goto err_free_path;
++
+ 
+ 	if (smb3_encryption_required(tcon))
+ 		flags |= CIFS_TRANSFORM_REQ;
+ 
+-
+ 	req->ImpersonationLevel = IL_IMPERSONATION;
+ 	req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
+ 	/* File attributes ignored on open (used in create though) */
+@@ -1992,50 +1996,44 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ 		req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
+ 		rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
+ 						 &name_len,
+-						 tcon->treeName, path);
+-		if (rc) {
+-			cifs_small_buf_release(req);
+-			return rc;
+-		}
++						 tcon->treeName, utf16_path);
++		if (rc)
++			goto err_free_req;
++
+ 		req->NameLength = cpu_to_le16(name_len * 2);
+ 		uni_path_len = copy_size;
+-		path = copy_path;
++		/* free before overwriting resource */
++		kfree(utf16_path);
++		utf16_path = copy_path;
+ 	} else {
+-		uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
++		uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
+ 		/* MUST set path len (NameLength) to 0 opening root of share */
+ 		req->NameLength = cpu_to_le16(uni_path_len - 2);
+ 		if (uni_path_len % 8 != 0) {
+ 			copy_size = roundup(uni_path_len, 8);
+ 			copy_path = kzalloc(copy_size, GFP_KERNEL);
+ 			if (!copy_path) {
+-				cifs_small_buf_release(req);
+-				return -ENOMEM;
++				rc = -ENOMEM;
++				goto err_free_req;
+ 			}
+-			memcpy((char *)copy_path, (const char *)path,
++			memcpy((char *)copy_path, (const char *)utf16_path,
+ 			       uni_path_len);
+ 			uni_path_len = copy_size;
+-			path = copy_path;
++			/* free before overwriting resource */
++			kfree(utf16_path);
++			utf16_path = copy_path;
+ 		}
+ 	}
+ 
+ 	iov[1].iov_len = uni_path_len;
+-	iov[1].iov_base = path;
++	iov[1].iov_base = utf16_path;
+ 	req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
+ 
+ 	if (tcon->posix_extensions) {
+-		if (n_iov > 2) {
+-			struct create_context *ccontext =
+-			    (struct create_context *)iov[n_iov-1].iov_base;
+-			ccontext->Next =
+-				cpu_to_le32(iov[n_iov-1].iov_len);
+-		}
+-
++		/* resource #3: posix buf */
+ 		rc = add_posix_context(iov, &n_iov, mode);
+-		if (rc) {
+-			cifs_small_buf_release(req);
+-			kfree(copy_path);
+-			return rc;
+-		}
++		if (rc)
++			goto err_free_req;
+ 		pc_buf = iov[n_iov-1].iov_base;
+ 	}
+ 
+@@ -2044,32 +2042,33 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ 	rqst.rq_iov = iov;
+ 	rqst.rq_nvec = n_iov;
+ 
+-	rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
+-			    &rsp_iov);
+-
+-	cifs_small_buf_release(req);
+-	rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
+-
+-	if (rc != 0) {
++	/* resource #4: response buffer */
++	rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
++	if (rc) {
+ 		cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
+ 		trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
+-				    CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, rc);
+-		goto smb311_mkdir_exit;
+-	} else
+-		trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
+-				     ses->Suid, CREATE_NOT_FILE,
+-				     FILE_WRITE_ATTRIBUTES);
++					   CREATE_NOT_FILE,
++					   FILE_WRITE_ATTRIBUTES, rc);
++		goto err_free_rsp_buf;
++	}
++
++	rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
++	trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
++				    ses->Suid, CREATE_NOT_FILE,
++				    FILE_WRITE_ATTRIBUTES);
+ 
+ 	SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
+ 
+ 	/* Eventually save off posix specific response info and timestaps */
+ 
+-smb311_mkdir_exit:
+-	kfree(copy_path);
+-	kfree(pc_buf);
++err_free_rsp_buf:
+ 	free_rsp_buf(resp_buftype, rsp);
++	kfree(pc_buf);
++err_free_req:
++	cifs_small_buf_release(req);
++err_free_path:
++	kfree(utf16_path);
+ 	return rc;
+-
+ }
+ #endif /* SMB311 */
+ 
+diff --git a/fs/dcache.c b/fs/dcache.c
+index ceb7b491d1b9..d19a0dc46c04 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -292,7 +292,8 @@ void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry
+ 		spin_unlock(&dentry->d_lock);
+ 		name->name = p->name;
+ 	} else {
+-		memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
++		memcpy(name->inline_name, dentry->d_iname,
++		       dentry->d_name.len + 1);
+ 		spin_unlock(&dentry->d_lock);
+ 		name->name = name->inline_name;
+ 	}
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 8f931d699287..b61954d40c25 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2149,8 +2149,12 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
+ 
+ 	if (to > i_size) {
+ 		down_write(&F2FS_I(inode)->i_mmap_sem);
++		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
++
+ 		truncate_pagecache(inode, i_size);
+ 		f2fs_truncate_blocks(inode, i_size, true);
++
++		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ 		up_write(&F2FS_I(inode)->i_mmap_sem);
+ 	}
+ }
+@@ -2490,6 +2494,10 @@ static int f2fs_set_data_page_dirty(struct page *page)
+ 	if (!PageUptodate(page))
+ 		SetPageUptodate(page);
+ 
++	/* don't remain PG_checked flag which was set during GC */
++	if (is_cold_data(page))
++		clear_cold_data(page);
++
+ 	if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
+ 		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
+ 			f2fs_register_inmem_page(inode, page);
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 6880c6f78d58..3ffa341cf586 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -782,22 +782,26 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	}
+ 
+ 	if (attr->ia_valid & ATTR_SIZE) {
+-		if (attr->ia_size <= i_size_read(inode)) {
+-			down_write(&F2FS_I(inode)->i_mmap_sem);
+-			truncate_setsize(inode, attr->ia_size);
++		bool to_smaller = (attr->ia_size <= i_size_read(inode));
++
++		down_write(&F2FS_I(inode)->i_mmap_sem);
++		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
++
++		truncate_setsize(inode, attr->ia_size);
++
++		if (to_smaller)
+ 			err = f2fs_truncate(inode);
+-			up_write(&F2FS_I(inode)->i_mmap_sem);
+-			if (err)
+-				return err;
+-		} else {
+-			/*
+-			 * do not trim all blocks after i_size if target size is
+-			 * larger than i_size.
+-			 */
+-			down_write(&F2FS_I(inode)->i_mmap_sem);
+-			truncate_setsize(inode, attr->ia_size);
+-			up_write(&F2FS_I(inode)->i_mmap_sem);
++		/*
++		 * do not trim all blocks after i_size if target size is
++		 * larger than i_size.
++		 */
++		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
++		up_write(&F2FS_I(inode)->i_mmap_sem);
+ 
++		if (err)
++			return err;
++
++		if (!to_smaller) {
+ 			/* should convert inline inode here */
+ 			if (!f2fs_may_inline_data(inode)) {
+ 				err = f2fs_convert_inline_inode(inode);
+@@ -944,13 +948,18 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
+ 
+ 			blk_start = (loff_t)pg_start << PAGE_SHIFT;
+ 			blk_end = (loff_t)pg_end << PAGE_SHIFT;
++
+ 			down_write(&F2FS_I(inode)->i_mmap_sem);
++			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
++
+ 			truncate_inode_pages_range(mapping, blk_start,
+ 					blk_end - 1);
+ 
+ 			f2fs_lock_op(sbi);
+ 			ret = f2fs_truncate_hole(inode, pg_start, pg_end);
+ 			f2fs_unlock_op(sbi);
++
++			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ 			up_write(&F2FS_I(inode)->i_mmap_sem);
+ 		}
+ 	}
+@@ -1295,8 +1304,6 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
+ 	if (ret)
+ 		goto out_sem;
+ 
+-	truncate_pagecache_range(inode, offset, offset + len - 1);
+-
+ 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+ 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
+ 
+@@ -1326,12 +1333,19 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
+ 			unsigned int end_offset;
+ 			pgoff_t end;
+ 
++			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
++
++			truncate_pagecache_range(inode,
++				(loff_t)index << PAGE_SHIFT,
++				((loff_t)pg_end << PAGE_SHIFT) - 1);
++
+ 			f2fs_lock_op(sbi);
+ 
+ 			set_new_dnode(&dn, inode, NULL, NULL, 0);
+ 			ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
+ 			if (ret) {
+ 				f2fs_unlock_op(sbi);
++				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ 				goto out;
+ 			}
+ 
+@@ -1340,7 +1354,9 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
+ 
+ 			ret = f2fs_do_zero_range(&dn, index, end);
+ 			f2fs_put_dnode(&dn);
++
+ 			f2fs_unlock_op(sbi);
++			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ 
+ 			f2fs_balance_fs(sbi, dn.node_changed);
+ 
+diff --git a/fs/fat/cache.c b/fs/fat/cache.c
+index e9bed49df6b7..78d501c1fb65 100644
+--- a/fs/fat/cache.c
++++ b/fs/fat/cache.c
+@@ -225,7 +225,8 @@ static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
+ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
+ {
+ 	struct super_block *sb = inode->i_sb;
+-	const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
++	struct msdos_sb_info *sbi = MSDOS_SB(sb);
++	const int limit = sb->s_maxbytes >> sbi->cluster_bits;
+ 	struct fat_entry fatent;
+ 	struct fat_cache_id cid;
+ 	int nr;
+@@ -234,6 +235,12 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
+ 
+ 	*fclus = 0;
+ 	*dclus = MSDOS_I(inode)->i_start;
++	if (!fat_valid_entry(sbi, *dclus)) {
++		fat_fs_error_ratelimit(sb,
++			"%s: invalid start cluster (i_pos %lld, start %08x)",
++			__func__, MSDOS_I(inode)->i_pos, *dclus);
++		return -EIO;
++	}
+ 	if (cluster == 0)
+ 		return 0;
+ 
+@@ -250,9 +257,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
+ 		/* prevent the infinite loop of cluster chain */
+ 		if (*fclus > limit) {
+ 			fat_fs_error_ratelimit(sb,
+-					"%s: detected the cluster chain loop"
+-					" (i_pos %lld)", __func__,
+-					MSDOS_I(inode)->i_pos);
++				"%s: detected the cluster chain loop (i_pos %lld)",
++				__func__, MSDOS_I(inode)->i_pos);
+ 			nr = -EIO;
+ 			goto out;
+ 		}
+@@ -262,9 +268,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
+ 			goto out;
+ 		else if (nr == FAT_ENT_FREE) {
+ 			fat_fs_error_ratelimit(sb,
+-				       "%s: invalid cluster chain (i_pos %lld)",
+-				       __func__,
+-				       MSDOS_I(inode)->i_pos);
++				"%s: invalid cluster chain (i_pos %lld)",
++				__func__, MSDOS_I(inode)->i_pos);
+ 			nr = -EIO;
+ 			goto out;
+ 		} else if (nr == FAT_ENT_EOF) {
+diff --git a/fs/fat/fat.h b/fs/fat/fat.h
+index 8fc1093da47d..a0a00f3734bc 100644
+--- a/fs/fat/fat.h
++++ b/fs/fat/fat.h
+@@ -348,6 +348,11 @@ static inline void fatent_brelse(struct fat_entry *fatent)
+ 	fatent->fat_inode = NULL;
+ }
+ 
++static inline bool fat_valid_entry(struct msdos_sb_info *sbi, int entry)
++{
++	return FAT_START_ENT <= entry && entry < sbi->max_cluster;
++}
++
+ extern void fat_ent_access_init(struct super_block *sb);
+ extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent,
+ 			int entry);
+diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
+index bac10de678cc..3aef8630a4b9 100644
+--- a/fs/fat/fatent.c
++++ b/fs/fat/fatent.c
+@@ -23,7 +23,7 @@ static void fat12_ent_blocknr(struct super_block *sb, int entry,
+ {
+ 	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ 	int bytes = entry + (entry >> 1);
+-	WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
++	WARN_ON(!fat_valid_entry(sbi, entry));
+ 	*offset = bytes & (sb->s_blocksize - 1);
+ 	*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
+ }
+@@ -33,7 +33,7 @@ static void fat_ent_blocknr(struct super_block *sb, int entry,
+ {
+ 	struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ 	int bytes = (entry << sbi->fatent_shift);
+-	WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
++	WARN_ON(!fat_valid_entry(sbi, entry));
+ 	*offset = bytes & (sb->s_blocksize - 1);
+ 	*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
+ }
+@@ -353,7 +353,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
+ 	int err, offset;
+ 	sector_t blocknr;
+ 
+-	if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
++	if (!fat_valid_entry(sbi, entry)) {
+ 		fatent_brelse(fatent);
+ 		fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
+ 		return -EIO;
+diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
+index ad04a5741016..9a8772465a90 100644
+--- a/fs/hfs/brec.c
++++ b/fs/hfs/brec.c
+@@ -75,9 +75,10 @@ int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
+ 	if (!fd->bnode) {
+ 		if (!tree->root)
+ 			hfs_btree_inc_height(tree);
+-		fd->bnode = hfs_bnode_find(tree, tree->leaf_head);
+-		if (IS_ERR(fd->bnode))
+-			return PTR_ERR(fd->bnode);
++		node = hfs_bnode_find(tree, tree->leaf_head);
++		if (IS_ERR(node))
++			return PTR_ERR(node);
++		fd->bnode = node;
+ 		fd->record = -1;
+ 	}
+ 	new_node = NULL;
+diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
+index b5254378f011..cd017d7dbdfa 100644
+--- a/fs/hfsplus/dir.c
++++ b/fs/hfsplus/dir.c
+@@ -78,13 +78,13 @@ again:
+ 				cpu_to_be32(HFSP_HARDLINK_TYPE) &&
+ 				entry.file.user_info.fdCreator ==
+ 				cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
++				HFSPLUS_SB(sb)->hidden_dir &&
+ 				(entry.file.create_date ==
+ 					HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->
+ 						create_date ||
+ 				entry.file.create_date ==
+ 					HFSPLUS_I(d_inode(sb->s_root))->
+-						create_date) &&
+-				HFSPLUS_SB(sb)->hidden_dir) {
++						create_date)) {
+ 			struct qstr str;
+ 			char name[32];
+ 
+diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
+index a6c0f54c48c3..80abba550bfa 100644
+--- a/fs/hfsplus/super.c
++++ b/fs/hfsplus/super.c
+@@ -524,8 +524,10 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
+ 		goto out_put_root;
+ 	if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
+ 		hfs_find_exit(&fd);
+-		if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
++		if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
++			err = -EINVAL;
+ 			goto out_put_root;
++		}
+ 		inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
+ 		if (IS_ERR(inode)) {
+ 			err = PTR_ERR(inode);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 464db0c0f5c8..ff98e2a3f3cc 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7734,7 +7734,7 @@ static int nfs4_sp4_select_mode(struct nfs_client *clp,
+ 	}
+ out:
+ 	clp->cl_sp4_flags = flags;
+-	return 0;
++	return ret;
+ }
+ 
+ struct nfs41_exchange_id_data {
+diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
+index e64ecb9f2720..66c373230e60 100644
+--- a/fs/proc/kcore.c
++++ b/fs/proc/kcore.c
+@@ -384,8 +384,10 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
+ 		phdr->p_flags	= PF_R|PF_W|PF_X;
+ 		phdr->p_offset	= kc_vaddr_to_offset(m->addr) + dataoff;
+ 		phdr->p_vaddr	= (size_t)m->addr;
+-		if (m->type == KCORE_RAM || m->type == KCORE_TEXT)
++		if (m->type == KCORE_RAM)
+ 			phdr->p_paddr	= __pa(m->addr);
++		else if (m->type == KCORE_TEXT)
++			phdr->p_paddr	= __pa_symbol(m->addr);
+ 		else
+ 			phdr->p_paddr	= (elf_addr_t)-1;
+ 		phdr->p_filesz	= phdr->p_memsz	= m->size;
+diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
+index cfb6674331fd..0651646dd04d 100644
+--- a/fs/proc/vmcore.c
++++ b/fs/proc/vmcore.c
+@@ -225,6 +225,7 @@ out_unlock:
+ 	return ret;
+ }
+ 
++#ifdef CONFIG_MMU
+ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
+ 			       u64 start, size_t size)
+ {
+@@ -259,6 +260,7 @@ out_unlock:
+ 	mutex_unlock(&vmcoredd_mutex);
+ 	return ret;
+ }
++#endif /* CONFIG_MMU */
+ #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
+ 
+ /* Read from the ELF header and then the crash dump. On error, negative value is
+diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
+index ae4811fecc1f..6d670bd9ab6b 100644
+--- a/fs/reiserfs/reiserfs.h
++++ b/fs/reiserfs/reiserfs.h
+@@ -271,7 +271,7 @@ struct reiserfs_journal_list {
+ 
+ 	struct mutex j_commit_mutex;
+ 	unsigned int j_trans_id;
+-	time_t j_timestamp;
++	time64_t j_timestamp; /* write-only but useful for crash dump analysis */
+ 	struct reiserfs_list_bitmap *j_list_bitmap;
+ 	struct buffer_head *j_commit_bh;	/* commit buffer head */
+ 	struct reiserfs_journal_cnode *j_realblock;
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 29502238e510..bf85e152af05 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -3082,4 +3082,6 @@
+ 
+ #define PCI_VENDOR_ID_OCZ		0x1b85
+ 
++#define PCI_VENDOR_ID_NCUBE		0x10ff
++
+ #endif /* _LINUX_PCI_IDS_H */
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index cd3ecda9386a..106e01c721e6 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -2023,6 +2023,10 @@ int tcp_set_ulp_id(struct sock *sk, const int ulp);
+ void tcp_get_available_ulp(char *buf, size_t len);
+ void tcp_cleanup_ulp(struct sock *sk);
+ 
++#define MODULE_ALIAS_TCP_ULP(name)				\
++	__MODULE_INFO(alias, alias_userspace, name);		\
++	__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
++
+ /* Call BPF_SOCK_OPS program that returns an int. If the return value
+  * is < 0, then the BPF op failed (for example if the loaded BPF
+  * program does not support the chosen operation or there is no BPF
+diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
+index 7b8c9e19bad1..910cc4334b21 100644
+--- a/include/uapi/linux/keyctl.h
++++ b/include/uapi/linux/keyctl.h
+@@ -65,7 +65,7 @@
+ 
+ /* keyctl structures */
+ struct keyctl_dh_params {
+-	__s32 private;
++	__s32 dh_private;
+ 	__s32 prime;
+ 	__s32 base;
+ };
+diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
+index 76efe9a183f5..fc5b103512e7 100644
+--- a/kernel/bpf/inode.c
++++ b/kernel/bpf/inode.c
+@@ -196,19 +196,21 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
+ {
+ 	struct bpf_map *map = seq_file_to_map(m);
+ 	void *key = map_iter(m)->key;
++	void *prev_key;
+ 
+ 	if (map_iter(m)->done)
+ 		return NULL;
+ 
+ 	if (unlikely(v == SEQ_START_TOKEN))
+-		goto done;
++		prev_key = NULL;
++	else
++		prev_key = key;
+ 
+-	if (map->ops->map_get_next_key(map, key, key)) {
++	if (map->ops->map_get_next_key(map, prev_key, key)) {
+ 		map_iter(m)->done = true;
+ 		return NULL;
+ 	}
+ 
+-done:
+ 	++(*pos);
+ 	return key;
+ }
+diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
+index c4d75c52b4fc..58899601fccf 100644
+--- a/kernel/bpf/sockmap.c
++++ b/kernel/bpf/sockmap.c
+@@ -58,6 +58,7 @@ struct bpf_stab {
+ 	struct bpf_map map;
+ 	struct sock **sock_map;
+ 	struct bpf_sock_progs progs;
++	raw_spinlock_t lock;
+ };
+ 
+ struct bucket {
+@@ -89,9 +90,9 @@ enum smap_psock_state {
+ 
+ struct smap_psock_map_entry {
+ 	struct list_head list;
++	struct bpf_map *map;
+ 	struct sock **entry;
+ 	struct htab_elem __rcu *hash_link;
+-	struct bpf_htab __rcu *htab;
+ };
+ 
+ struct smap_psock {
+@@ -343,13 +344,18 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
+ 	e = psock_map_pop(sk, psock);
+ 	while (e) {
+ 		if (e->entry) {
+-			osk = cmpxchg(e->entry, sk, NULL);
++			struct bpf_stab *stab = container_of(e->map, struct bpf_stab, map);
++
++			raw_spin_lock_bh(&stab->lock);
++			osk = *e->entry;
+ 			if (osk == sk) {
++				*e->entry = NULL;
+ 				smap_release_sock(psock, sk);
+ 			}
++			raw_spin_unlock_bh(&stab->lock);
+ 		} else {
+ 			struct htab_elem *link = rcu_dereference(e->hash_link);
+-			struct bpf_htab *htab = rcu_dereference(e->htab);
++			struct bpf_htab *htab = container_of(e->map, struct bpf_htab, map);
+ 			struct hlist_head *head;
+ 			struct htab_elem *l;
+ 			struct bucket *b;
+@@ -370,6 +376,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
+ 			}
+ 			raw_spin_unlock_bh(&b->lock);
+ 		}
++		kfree(e);
+ 		e = psock_map_pop(sk, psock);
+ 	}
+ 	rcu_read_unlock();
+@@ -1644,6 +1651,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	bpf_map_init_from_attr(&stab->map, attr);
++	raw_spin_lock_init(&stab->lock);
+ 
+ 	/* make sure page count doesn't overflow */
+ 	cost = (u64) stab->map.max_entries * sizeof(struct sock *);
+@@ -1678,8 +1686,10 @@ static void smap_list_map_remove(struct smap_psock *psock,
+ 
+ 	spin_lock_bh(&psock->maps_lock);
+ 	list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+-		if (e->entry == entry)
++		if (e->entry == entry) {
+ 			list_del(&e->list);
++			kfree(e);
++		}
+ 	}
+ 	spin_unlock_bh(&psock->maps_lock);
+ }
+@@ -1693,8 +1703,10 @@ static void smap_list_hash_remove(struct smap_psock *psock,
+ 	list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+ 		struct htab_elem *c = rcu_dereference(e->hash_link);
+ 
+-		if (c == hash_link)
++		if (c == hash_link) {
+ 			list_del(&e->list);
++			kfree(e);
++		}
+ 	}
+ 	spin_unlock_bh(&psock->maps_lock);
+ }
+@@ -1714,14 +1726,15 @@ static void sock_map_free(struct bpf_map *map)
+ 	 * and a grace period expire to ensure psock is really safe to remove.
+ 	 */
+ 	rcu_read_lock();
++	raw_spin_lock_bh(&stab->lock);
+ 	for (i = 0; i < stab->map.max_entries; i++) {
+ 		struct smap_psock *psock;
+ 		struct sock *sock;
+ 
+-		sock = xchg(&stab->sock_map[i], NULL);
++		sock = stab->sock_map[i];
+ 		if (!sock)
+ 			continue;
+-
++		stab->sock_map[i] = NULL;
+ 		psock = smap_psock_sk(sock);
+ 		/* This check handles a racing sock event that can get the
+ 		 * sk_callback_lock before this case but after xchg happens
+@@ -1733,6 +1746,7 @@ static void sock_map_free(struct bpf_map *map)
+ 			smap_release_sock(psock, sock);
+ 		}
+ 	}
++	raw_spin_unlock_bh(&stab->lock);
+ 	rcu_read_unlock();
+ 
+ 	sock_map_remove_complete(stab);
+@@ -1776,19 +1790,23 @@ static int sock_map_delete_elem(struct bpf_map *map, void *key)
+ 	if (k >= map->max_entries)
+ 		return -EINVAL;
+ 
+-	sock = xchg(&stab->sock_map[k], NULL);
++	raw_spin_lock_bh(&stab->lock);
++	sock = stab->sock_map[k];
++	stab->sock_map[k] = NULL;
++	raw_spin_unlock_bh(&stab->lock);
+ 	if (!sock)
+ 		return -EINVAL;
+ 
+ 	psock = smap_psock_sk(sock);
+ 	if (!psock)
+-		goto out;
+-
+-	if (psock->bpf_parse)
++		return 0;
++	if (psock->bpf_parse) {
++		write_lock_bh(&sock->sk_callback_lock);
+ 		smap_stop_sock(psock, sock);
++		write_unlock_bh(&sock->sk_callback_lock);
++	}
+ 	smap_list_map_remove(psock, &stab->sock_map[k]);
+ 	smap_release_sock(psock, sock);
+-out:
+ 	return 0;
+ }
+ 
+@@ -1824,11 +1842,9 @@ out:
+ static int __sock_map_ctx_update_elem(struct bpf_map *map,
+ 				      struct bpf_sock_progs *progs,
+ 				      struct sock *sock,
+-				      struct sock **map_link,
+ 				      void *key)
+ {
+ 	struct bpf_prog *verdict, *parse, *tx_msg;
+-	struct smap_psock_map_entry *e = NULL;
+ 	struct smap_psock *psock;
+ 	bool new = false;
+ 	int err = 0;
+@@ -1901,14 +1917,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
+ 		new = true;
+ 	}
+ 
+-	if (map_link) {
+-		e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
+-		if (!e) {
+-			err = -ENOMEM;
+-			goto out_free;
+-		}
+-	}
+-
+ 	/* 3. At this point we have a reference to a valid psock that is
+ 	 * running. Attach any BPF programs needed.
+ 	 */
+@@ -1930,17 +1938,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
+ 		write_unlock_bh(&sock->sk_callback_lock);
+ 	}
+ 
+-	/* 4. Place psock in sockmap for use and stop any programs on
+-	 * the old sock assuming its not the same sock we are replacing
+-	 * it with. Because we can only have a single set of programs if
+-	 * old_sock has a strp we can stop it.
+-	 */
+-	if (map_link) {
+-		e->entry = map_link;
+-		spin_lock_bh(&psock->maps_lock);
+-		list_add_tail(&e->list, &psock->maps);
+-		spin_unlock_bh(&psock->maps_lock);
+-	}
+ 	return err;
+ out_free:
+ 	smap_release_sock(psock, sock);
+@@ -1951,7 +1948,6 @@ out_progs:
+ 	}
+ 	if (tx_msg)
+ 		bpf_prog_put(tx_msg);
+-	kfree(e);
+ 	return err;
+ }
+ 
+@@ -1961,36 +1957,57 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
+ {
+ 	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
+ 	struct bpf_sock_progs *progs = &stab->progs;
+-	struct sock *osock, *sock;
++	struct sock *osock, *sock = skops->sk;
++	struct smap_psock_map_entry *e;
++	struct smap_psock *psock;
+ 	u32 i = *(u32 *)key;
+ 	int err;
+ 
+ 	if (unlikely(flags > BPF_EXIST))
+ 		return -EINVAL;
+-
+ 	if (unlikely(i >= stab->map.max_entries))
+ 		return -E2BIG;
+ 
+-	sock = READ_ONCE(stab->sock_map[i]);
+-	if (flags == BPF_EXIST && !sock)
+-		return -ENOENT;
+-	else if (flags == BPF_NOEXIST && sock)
+-		return -EEXIST;
++	e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
++	if (!e)
++		return -ENOMEM;
+ 
+-	sock = skops->sk;
+-	err = __sock_map_ctx_update_elem(map, progs, sock, &stab->sock_map[i],
+-					 key);
++	err = __sock_map_ctx_update_elem(map, progs, sock, key);
+ 	if (err)
+ 		goto out;
+ 
+-	osock = xchg(&stab->sock_map[i], sock);
+-	if (osock) {
+-		struct smap_psock *opsock = smap_psock_sk(osock);
++	/* psock guaranteed to be present. */
++	psock = smap_psock_sk(sock);
++	raw_spin_lock_bh(&stab->lock);
++	osock = stab->sock_map[i];
++	if (osock && flags == BPF_NOEXIST) {
++		err = -EEXIST;
++		goto out_unlock;
++	}
++	if (!osock && flags == BPF_EXIST) {
++		err = -ENOENT;
++		goto out_unlock;
++	}
+ 
+-		smap_list_map_remove(opsock, &stab->sock_map[i]);
+-		smap_release_sock(opsock, osock);
++	e->entry = &stab->sock_map[i];
++	e->map = map;
++	spin_lock_bh(&psock->maps_lock);
++	list_add_tail(&e->list, &psock->maps);
++	spin_unlock_bh(&psock->maps_lock);
++
++	stab->sock_map[i] = sock;
++	if (osock) {
++		psock = smap_psock_sk(osock);
++		smap_list_map_remove(psock, &stab->sock_map[i]);
++		smap_release_sock(psock, osock);
+ 	}
++	raw_spin_unlock_bh(&stab->lock);
++	return 0;
++out_unlock:
++	smap_release_sock(psock, sock);
++	raw_spin_unlock_bh(&stab->lock);
+ out:
++	kfree(e);
+ 	return err;
+ }
+ 
+@@ -2353,7 +2370,7 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
+ 	b = __select_bucket(htab, hash);
+ 	head = &b->head;
+ 
+-	err = __sock_map_ctx_update_elem(map, progs, sock, NULL, key);
++	err = __sock_map_ctx_update_elem(map, progs, sock, key);
+ 	if (err)
+ 		goto err;
+ 
+@@ -2379,8 +2396,7 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
+ 	}
+ 
+ 	rcu_assign_pointer(e->hash_link, l_new);
+-	rcu_assign_pointer(e->htab,
+-			   container_of(map, struct bpf_htab, map));
++	e->map = map;
+ 	spin_lock_bh(&psock->maps_lock);
+ 	list_add_tail(&e->list, &psock->maps);
+ 	spin_unlock_bh(&psock->maps_lock);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 1b27babc4c78..8ed48ca2cc43 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -549,8 +549,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ 			goto out;
+ 	}
+ 	/* a new mm has just been created */
+-	arch_dup_mmap(oldmm, mm);
+-	retval = 0;
++	retval = arch_dup_mmap(oldmm, mm);
+ out:
+ 	up_write(&mm->mmap_sem);
+ 	flush_tlb_mm(oldmm);
+@@ -1417,7 +1416,9 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
+ 		return -ENOMEM;
+ 
+ 	atomic_set(&sig->count, 1);
++	spin_lock_irq(&current->sighand->siglock);
+ 	memcpy(sig->action, current->sighand->action, sizeof(sig->action));
++	spin_unlock_irq(&current->sighand->siglock);
+ 	return 0;
+ }
+ 
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 5f78c6e41796..0280deac392e 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -2652,6 +2652,9 @@ void flush_workqueue(struct workqueue_struct *wq)
+ 	if (WARN_ON(!wq_online))
+ 		return;
+ 
++	lock_map_acquire(&wq->lockdep_map);
++	lock_map_release(&wq->lockdep_map);
++
+ 	mutex_lock(&wq->mutex);
+ 
+ 	/*
+@@ -2843,7 +2846,8 @@ reflush:
+ }
+ EXPORT_SYMBOL_GPL(drain_workqueue);
+ 
+-static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
++static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
++			     bool from_cancel)
+ {
+ 	struct worker *worker = NULL;
+ 	struct worker_pool *pool;
+@@ -2885,7 +2889,8 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
+ 	 * workqueues the deadlock happens when the rescuer stalls, blocking
+ 	 * forward progress.
+ 	 */
+-	if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) {
++	if (!from_cancel &&
++	    (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
+ 		lock_map_acquire(&pwq->wq->lockdep_map);
+ 		lock_map_release(&pwq->wq->lockdep_map);
+ 	}
+@@ -2896,6 +2901,27 @@ already_gone:
+ 	return false;
+ }
+ 
++static bool __flush_work(struct work_struct *work, bool from_cancel)
++{
++	struct wq_barrier barr;
++
++	if (WARN_ON(!wq_online))
++		return false;
++
++	if (!from_cancel) {
++		lock_map_acquire(&work->lockdep_map);
++		lock_map_release(&work->lockdep_map);
++	}
++
++	if (start_flush_work(work, &barr, from_cancel)) {
++		wait_for_completion(&barr.done);
++		destroy_work_on_stack(&barr.work);
++		return true;
++	} else {
++		return false;
++	}
++}
++
+ /**
+  * flush_work - wait for a work to finish executing the last queueing instance
+  * @work: the work to flush
+@@ -2909,18 +2935,7 @@ already_gone:
+  */
+ bool flush_work(struct work_struct *work)
+ {
+-	struct wq_barrier barr;
+-
+-	if (WARN_ON(!wq_online))
+-		return false;
+-
+-	if (start_flush_work(work, &barr)) {
+-		wait_for_completion(&barr.done);
+-		destroy_work_on_stack(&barr.work);
+-		return true;
+-	} else {
+-		return false;
+-	}
++	return __flush_work(work, false);
+ }
+ EXPORT_SYMBOL_GPL(flush_work);
+ 
+@@ -2986,7 +3001,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
+ 	 * isn't executing.
+ 	 */
+ 	if (wq_online)
+-		flush_work(work);
++		__flush_work(work, true);
+ 
+ 	clear_work_data(work);
+ 
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index 994be4805cec..24c1df0d7466 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -360,9 +360,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
+ 
+ 	limit++;
+ 	if (is_on_stack)
+-		pr_warn("object is on stack, but not annotated\n");
++		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
++			 task_stack_page(current));
+ 	else
+-		pr_warn("object is not on stack, but annotated\n");
++		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
++			 task_stack_page(current));
++
+ 	WARN_ON(1);
+ }
+ 
+diff --git a/mm/Kconfig b/mm/Kconfig
+index ce95491abd6a..94af022b7f3d 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -635,7 +635,7 @@ config DEFERRED_STRUCT_PAGE_INIT
+ 	bool "Defer initialisation of struct pages to kthreads"
+ 	default n
+ 	depends on NO_BOOTMEM
+-	depends on !FLATMEM
++	depends on SPARSEMEM
+ 	depends on !NEED_PER_CPU_KM
+ 	help
+ 	  Ordinarily all struct pages are initialised during early boot in a
+diff --git a/mm/fadvise.c b/mm/fadvise.c
+index afa41491d324..2d8376e3c640 100644
+--- a/mm/fadvise.c
++++ b/mm/fadvise.c
+@@ -72,8 +72,12 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
+ 		goto out;
+ 	}
+ 
+-	/* Careful about overflows. Len == 0 means "as much as possible" */
+-	endbyte = offset + len;
++	/*
++	 * Careful about overflows. Len == 0 means "as much as possible".  Use
++	 * unsigned math because signed overflows are undefined and UBSan
++	 * complains.
++	 */
++	endbyte = (u64)offset + (u64)len;
+ 	if (!len || endbyte < len)
+ 		endbyte = -1;
+ 	else
+diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
+index ef456395645a..7fb60dd4be79 100644
+--- a/net/9p/trans_fd.c
++++ b/net/9p/trans_fd.c
+@@ -199,15 +199,14 @@ static void p9_mux_poll_stop(struct p9_conn *m)
+ static void p9_conn_cancel(struct p9_conn *m, int err)
+ {
+ 	struct p9_req_t *req, *rtmp;
+-	unsigned long flags;
+ 	LIST_HEAD(cancel_list);
+ 
+ 	p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
+ 
+-	spin_lock_irqsave(&m->client->lock, flags);
++	spin_lock(&m->client->lock);
+ 
+ 	if (m->err) {
+-		spin_unlock_irqrestore(&m->client->lock, flags);
++		spin_unlock(&m->client->lock);
+ 		return;
+ 	}
+ 
+@@ -219,7 +218,6 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
+ 	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
+ 		list_move(&req->req_list, &cancel_list);
+ 	}
+-	spin_unlock_irqrestore(&m->client->lock, flags);
+ 
+ 	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
+ 		p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
+@@ -228,6 +226,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
+ 			req->t_err = err;
+ 		p9_client_cb(m->client, req, REQ_STATUS_ERROR);
+ 	}
++	spin_unlock(&m->client->lock);
+ }
+ 
+ static __poll_t
+@@ -375,8 +374,9 @@ static void p9_read_work(struct work_struct *work)
+ 		if (m->req->status != REQ_STATUS_ERROR)
+ 			status = REQ_STATUS_RCVD;
+ 		list_del(&m->req->req_list);
+-		spin_unlock(&m->client->lock);
++		/* update req->status while holding client->lock  */
+ 		p9_client_cb(m->client, m->req, status);
++		spin_unlock(&m->client->lock);
+ 		m->rc.sdata = NULL;
+ 		m->rc.offset = 0;
+ 		m->rc.capacity = 0;
+diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
+index 4c2da2513c8b..2dc1c293092b 100644
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -571,7 +571,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
+ 	chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
+ 	if (IS_ERR(chan->vq)) {
+ 		err = PTR_ERR(chan->vq);
+-		goto out_free_vq;
++		goto out_free_chan;
+ 	}
+ 	chan->vq->vdev->priv = chan;
+ 	spin_lock_init(&chan->lock);
+@@ -624,6 +624,7 @@ out_free_tag:
+ 	kfree(tag);
+ out_free_vq:
+ 	vdev->config->del_vqs(vdev);
++out_free_chan:
+ 	kfree(chan);
+ fail:
+ 	return err;
+diff --git a/net/core/xdp.c b/net/core/xdp.c
+index 6771f1855b96..2657056130a4 100644
+--- a/net/core/xdp.c
++++ b/net/core/xdp.c
+@@ -95,23 +95,15 @@ static void __xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
+ {
+ 	struct xdp_mem_allocator *xa;
+ 	int id = xdp_rxq->mem.id;
+-	int err;
+ 
+ 	if (id == 0)
+ 		return;
+ 
+ 	mutex_lock(&mem_id_lock);
+ 
+-	xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
+-	if (!xa) {
+-		mutex_unlock(&mem_id_lock);
+-		return;
+-	}
+-
+-	err = rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params);
+-	WARN_ON(err);
+-
+-	call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
++	xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
++	if (xa && !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
++		call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
+ 
+ 	mutex_unlock(&mem_id_lock);
+ }
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index 2d8efeecf619..055f4bbba86b 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -1511,11 +1511,14 @@ nla_put_failure:
+ 
+ static void erspan_setup(struct net_device *dev)
+ {
++	struct ip_tunnel *t = netdev_priv(dev);
++
+ 	ether_setup(dev);
+ 	dev->netdev_ops = &erspan_netdev_ops;
+ 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ 	ip_tunnel_setup(dev, erspan_net_id);
++	t->erspan_ver = 1;
+ }
+ 
+ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 3b2711e33e4c..488b201851d7 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -2516,6 +2516,12 @@ static int __net_init tcp_sk_init(struct net *net)
+ 		if (res)
+ 			goto fail;
+ 		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
++
++		/* Please enforce IP_DF and IPID==0 for RST and
++		 * ACK sent in SYN-RECV and TIME-WAIT state.
++		 */
++		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
++
+ 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
+ 	}
+ 
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 1dda1341a223..b690132f5da2 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -184,8 +184,9 @@ kill:
+ 				inet_twsk_deschedule_put(tw);
+ 				return TCP_TW_SUCCESS;
+ 			}
++		} else {
++			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
+ 		}
+-		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
+ 
+ 		if (tmp_opt.saw_tstamp) {
+ 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
+diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
+index 622caa4039e0..a5995bb2eaca 100644
+--- a/net/ipv4/tcp_ulp.c
++++ b/net/ipv4/tcp_ulp.c
+@@ -51,7 +51,7 @@ static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
+ #ifdef CONFIG_MODULES
+ 	if (!ulp && capable(CAP_NET_ADMIN)) {
+ 		rcu_read_unlock();
+-		request_module("%s", name);
++		request_module("tcp-ulp-%s", name);
+ 		rcu_read_lock();
+ 		ulp = tcp_ulp_find(name);
+ 	}
+@@ -129,6 +129,8 @@ void tcp_cleanup_ulp(struct sock *sk)
+ 	if (icsk->icsk_ulp_ops->release)
+ 		icsk->icsk_ulp_ops->release(sk);
+ 	module_put(icsk->icsk_ulp_ops->owner);
++
++	icsk->icsk_ulp_ops = NULL;
+ }
+ 
+ /* Change upper layer protocol for socket */
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index d212738e9d10..5516f55e214b 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -198,6 +198,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
+ 		}
+ 	}
+ 
++	lwtstate_put(f6i->fib6_nh.nh_lwtstate);
++
+ 	if (f6i->fib6_nh.nh_dev)
+ 		dev_put(f6i->fib6_nh.nh_dev);
+ 
+@@ -987,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
+ 					fib6_clean_expires(iter);
+ 				else
+ 					fib6_set_expires(iter, rt->expires);
+-				fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu);
++
++				if (rt->fib6_pmtu)
++					fib6_metric_set(iter, RTAX_MTU,
++							rt->fib6_pmtu);
+ 				return -EEXIST;
+ 			}
+ 			/* If we have the same destination and the same metric,
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index cd2cfb04e5d8..7ec997fcbc43 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -1776,6 +1776,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
+ 	if (data[IFLA_GRE_COLLECT_METADATA])
+ 		parms->collect_md = true;
+ 
++	parms->erspan_ver = 1;
+ 	if (data[IFLA_GRE_ERSPAN_VER])
+ 		parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+ 
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index c72ae3a4fe09..c31a7c4a9249 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ 	}
+ 
+ 	mtu = dst_mtu(dst);
+-	if (!skb->ignore_df && skb->len > mtu) {
++	if (skb->len > mtu) {
+ 		skb_dst_update_pmtu(skb, mtu);
+ 
+ 		if (skb->protocol == htons(ETH_P_IPV6)) {
+@@ -1102,7 +1102,8 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n,
+ 	}
+ 
+ 	t = rtnl_dereference(ip6n->tnls_wc[0]);
+-	unregister_netdevice_queue(t->dev, list);
++	if (t)
++		unregister_netdevice_queue(t->dev, list);
+ }
+ 
+ static int __net_init vti6_init_net(struct net *net)
+@@ -1114,6 +1115,8 @@ static int __net_init vti6_init_net(struct net *net)
+ 	ip6n->tnls[0] = ip6n->tnls_wc;
+ 	ip6n->tnls[1] = ip6n->tnls_r_l;
+ 
++	if (!net_has_fallback_tunnels(net))
++		return 0;
+ 	err = -ENOMEM;
+ 	ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6_vti0",
+ 					NET_NAME_UNKNOWN, vti6_dev_setup);
+diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
+index 0fe61ede77c6..c3c6b09acdc4 100644
+--- a/net/ipv6/netfilter/ip6t_rpfilter.c
++++ b/net/ipv6/netfilter/ip6t_rpfilter.c
+@@ -26,6 +26,12 @@ static bool rpfilter_addr_unicast(const struct in6_addr *addr)
+ 	return addr_type & IPV6_ADDR_UNICAST;
+ }
+ 
++static bool rpfilter_addr_linklocal(const struct in6_addr *addr)
++{
++	int addr_type = ipv6_addr_type(addr);
++	return addr_type & IPV6_ADDR_LINKLOCAL;
++}
++
+ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
+ 				     const struct net_device *dev, u8 flags)
+ {
+@@ -48,7 +54,11 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
+ 	}
+ 
+ 	fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
+-	if ((flags & XT_RPFILTER_LOOSE) == 0)
++
++	if (rpfilter_addr_linklocal(&iph->saddr)) {
++		lookup_flags |= RT6_LOOKUP_F_IFACE;
++		fl6.flowi6_oif = dev->ifindex;
++	} else if ((flags & XT_RPFILTER_LOOSE) == 0)
+ 		fl6.flowi6_oif = dev->ifindex;
+ 
+ 	rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 7208c16302f6..18e00ce1719a 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -956,7 +956,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
+ 	rt->dst.error = 0;
+ 	rt->dst.output = ip6_output;
+ 
+-	if (ort->fib6_type == RTN_LOCAL) {
++	if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) {
+ 		rt->dst.input = ip6_input;
+ 	} else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
+ 		rt->dst.input = ip6_mc_input;
+@@ -996,7 +996,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
+ 	rt->rt6i_src = ort->fib6_src;
+ #endif
+ 	rt->rt6i_prefsrc = ort->fib6_prefsrc;
+-	rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
+ }
+ 
+ static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 0679dd101e72..7ca926a03b81 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -1972,13 +1972,20 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
+ 	if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
+ 		/* the destination server is not available */
+ 
+-		if (sysctl_expire_nodest_conn(ipvs)) {
++		__u32 flags = cp->flags;
++
++		/* when timer already started, silently drop the packet.*/
++		if (timer_pending(&cp->timer))
++			__ip_vs_conn_put(cp);
++		else
++			ip_vs_conn_put(cp);
++
++		if (sysctl_expire_nodest_conn(ipvs) &&
++		    !(flags & IP_VS_CONN_F_ONE_PACKET)) {
+ 			/* try to expire the connection immediately */
+ 			ip_vs_conn_expire_now(cp);
+ 		}
+-		/* don't restart its timer, and silently
+-		   drop the packet. */
+-		__ip_vs_conn_put(cp);
++
+ 		return NF_DROP;
+ 	}
+ 
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 20a2e37c76d1..e952eedf44b4 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -821,6 +821,21 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[])
+ #endif
+ }
+ 
++static int ctnetlink_start(struct netlink_callback *cb)
++{
++	const struct nlattr * const *cda = cb->data;
++	struct ctnetlink_filter *filter = NULL;
++
++	if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
++		filter = ctnetlink_alloc_filter(cda);
++		if (IS_ERR(filter))
++			return PTR_ERR(filter);
++	}
++
++	cb->data = filter;
++	return 0;
++}
++
+ static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
+ {
+ 	struct ctnetlink_filter *filter = data;
+@@ -1240,19 +1255,12 @@ static int ctnetlink_get_conntrack(struct net *net, struct sock *ctnl,
+ 
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ 		struct netlink_dump_control c = {
++			.start = ctnetlink_start,
+ 			.dump = ctnetlink_dump_table,
+ 			.done = ctnetlink_done,
++			.data = (void *)cda,
+ 		};
+ 
+-		if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
+-			struct ctnetlink_filter *filter;
+-
+-			filter = ctnetlink_alloc_filter(cda);
+-			if (IS_ERR(filter))
+-				return PTR_ERR(filter);
+-
+-			c.data = filter;
+-		}
+ 		return netlink_dump_start(ctnl, skb, nlh, &c);
+ 	}
+ 
+diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
+index a0e5adf0b3b6..8fa8bf7c48e6 100644
+--- a/net/netfilter/nfnetlink_acct.c
++++ b/net/netfilter/nfnetlink_acct.c
+@@ -238,29 +238,33 @@ static const struct nla_policy filter_policy[NFACCT_FILTER_MAX + 1] = {
+ 	[NFACCT_FILTER_VALUE]	= { .type = NLA_U32 },
+ };
+ 
+-static struct nfacct_filter *
+-nfacct_filter_alloc(const struct nlattr * const attr)
++static int nfnl_acct_start(struct netlink_callback *cb)
+ {
+-	struct nfacct_filter *filter;
++	const struct nlattr *const attr = cb->data;
+ 	struct nlattr *tb[NFACCT_FILTER_MAX + 1];
++	struct nfacct_filter *filter;
+ 	int err;
+ 
++	if (!attr)
++		return 0;
++
+ 	err = nla_parse_nested(tb, NFACCT_FILTER_MAX, attr, filter_policy,
+ 			       NULL);
+ 	if (err < 0)
+-		return ERR_PTR(err);
++		return err;
+ 
+ 	if (!tb[NFACCT_FILTER_MASK] || !tb[NFACCT_FILTER_VALUE])
+-		return ERR_PTR(-EINVAL);
++		return -EINVAL;
+ 
+ 	filter = kzalloc(sizeof(struct nfacct_filter), GFP_KERNEL);
+ 	if (!filter)
+-		return ERR_PTR(-ENOMEM);
++		return -ENOMEM;
+ 
+ 	filter->mask = ntohl(nla_get_be32(tb[NFACCT_FILTER_MASK]));
+ 	filter->value = ntohl(nla_get_be32(tb[NFACCT_FILTER_VALUE]));
++	cb->data = filter;
+ 
+-	return filter;
++	return 0;
+ }
+ 
+ static int nfnl_acct_get(struct net *net, struct sock *nfnl,
+@@ -275,18 +279,11 @@ static int nfnl_acct_get(struct net *net, struct sock *nfnl,
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ 		struct netlink_dump_control c = {
+ 			.dump = nfnl_acct_dump,
++			.start = nfnl_acct_start,
+ 			.done = nfnl_acct_done,
++			.data = (void *)tb[NFACCT_FILTER],
+ 		};
+ 
+-		if (tb[NFACCT_FILTER]) {
+-			struct nfacct_filter *filter;
+-
+-			filter = nfacct_filter_alloc(tb[NFACCT_FILTER]);
+-			if (IS_ERR(filter))
+-				return PTR_ERR(filter);
+-
+-			c.data = filter;
+-		}
+ 		return netlink_dump_start(nfnl, skb, nlh, &c);
+ 	}
+ 
+diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
+index d0d8397c9588..aecadd471e1d 100644
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -1178,12 +1178,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
+ 	if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
+ 		return NULL;
+ 
+-	/* __GFP_NORETRY is not fully supported by kvmalloc but it should
+-	 * work reasonably well if sz is too large and bail out rather
+-	 * than shoot all processes down before realizing there is nothing
+-	 * more to reclaim.
+-	 */
+-	info = kvmalloc(sz, GFP_KERNEL | __GFP_NORETRY);
++	info = kvmalloc(sz, GFP_KERNEL_ACCOUNT);
+ 	if (!info)
+ 		return NULL;
+ 
+diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
+index d152e48ea371..8596eed6d9a8 100644
+--- a/net/rds/ib_frmr.c
++++ b/net/rds/ib_frmr.c
+@@ -61,6 +61,7 @@ static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
+ 			 pool->fmr_attr.max_pages);
+ 	if (IS_ERR(frmr->mr)) {
+ 		pr_warn("RDS/IB: %s failed to allocate MR", __func__);
++		err = PTR_ERR(frmr->mr);
+ 		goto out_no_cigar;
+ 	}
+ 
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
+index 20d7d36b2fc9..005cb21348c9 100644
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -265,10 +265,8 @@ static const char *ife_meta_id2name(u32 metaid)
+ #endif
+ 
+ /* called when adding new meta information
+- * under ife->tcf_lock for existing action
+ */
+-static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
+-				void *val, int len, bool exists)
++static int load_metaops_and_vet(u32 metaid, void *val, int len)
+ {
+ 	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
+ 	int ret = 0;
+@@ -276,13 +274,9 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
+ 	if (!ops) {
+ 		ret = -ENOENT;
+ #ifdef CONFIG_MODULES
+-		if (exists)
+-			spin_unlock_bh(&ife->tcf_lock);
+ 		rtnl_unlock();
+ 		request_module("ife-meta-%s", ife_meta_id2name(metaid));
+ 		rtnl_lock();
+-		if (exists)
+-			spin_lock_bh(&ife->tcf_lock);
+ 		ops = find_ife_oplist(metaid);
+ #endif
+ 	}
+@@ -299,24 +293,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
+ }
+ 
+ /* called when adding new meta information
+- * under ife->tcf_lock for existing action
+ */
+-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
+-			int len, bool atomic)
++static int __add_metainfo(const struct tcf_meta_ops *ops,
++			  struct tcf_ife_info *ife, u32 metaid, void *metaval,
++			  int len, bool atomic, bool exists)
+ {
+ 	struct tcf_meta_info *mi = NULL;
+-	struct tcf_meta_ops *ops = find_ife_oplist(metaid);
+ 	int ret = 0;
+ 
+-	if (!ops)
+-		return -ENOENT;
+-
+ 	mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
+-	if (!mi) {
+-		/*put back what find_ife_oplist took */
+-		module_put(ops->owner);
++	if (!mi)
+ 		return -ENOMEM;
+-	}
+ 
+ 	mi->metaid = metaid;
+ 	mi->ops = ops;
+@@ -324,17 +311,49 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
+ 		ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
+ 		if (ret != 0) {
+ 			kfree(mi);
+-			module_put(ops->owner);
+ 			return ret;
+ 		}
+ 	}
+ 
++	if (exists)
++		spin_lock_bh(&ife->tcf_lock);
+ 	list_add_tail(&mi->metalist, &ife->metalist);
++	if (exists)
++		spin_unlock_bh(&ife->tcf_lock);
+ 
+ 	return ret;
+ }
+ 
+-static int use_all_metadata(struct tcf_ife_info *ife)
++static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
++				    struct tcf_ife_info *ife, u32 metaid,
++				    bool exists)
++{
++	int ret;
++
++	if (!try_module_get(ops->owner))
++		return -ENOENT;
++	ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
++	if (ret)
++		module_put(ops->owner);
++	return ret;
++}
++
++static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
++			int len, bool exists)
++{
++	const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
++	int ret;
++
++	if (!ops)
++		return -ENOENT;
++	ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
++	if (ret)
++		/*put back what find_ife_oplist took */
++		module_put(ops->owner);
++	return ret;
++}
++
++static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
+ {
+ 	struct tcf_meta_ops *o;
+ 	int rc = 0;
+@@ -342,7 +361,7 @@ static int use_all_metadata(struct tcf_ife_info *ife)
+ 
+ 	read_lock(&ife_mod_lock);
+ 	list_for_each_entry(o, &ifeoplist, list) {
+-		rc = add_metainfo(ife, o->metaid, NULL, 0, true);
++		rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
+ 		if (rc == 0)
+ 			installed += 1;
+ 	}
+@@ -393,7 +412,6 @@ static void _tcf_ife_cleanup(struct tc_action *a)
+ 	struct tcf_meta_info *e, *n;
+ 
+ 	list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
+-		module_put(e->ops->owner);
+ 		list_del(&e->metalist);
+ 		if (e->metaval) {
+ 			if (e->ops->release)
+@@ -401,6 +419,7 @@ static void _tcf_ife_cleanup(struct tc_action *a)
+ 			else
+ 				kfree(e->metaval);
+ 		}
++		module_put(e->ops->owner);
+ 		kfree(e);
+ 	}
+ }
+@@ -419,7 +438,6 @@ static void tcf_ife_cleanup(struct tc_action *a)
+ 		kfree_rcu(p, rcu);
+ }
+ 
+-/* under ife->tcf_lock for existing action */
+ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+ 			     bool exists)
+ {
+@@ -433,7 +451,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+ 			val = nla_data(tb[i]);
+ 			len = nla_len(tb[i]);
+ 
+-			rc = load_metaops_and_vet(ife, i, val, len, exists);
++			rc = load_metaops_and_vet(i, val, len);
+ 			if (rc != 0)
+ 				return rc;
+ 
+@@ -531,8 +549,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ 		p->eth_type = ife_type;
+ 	}
+ 
+-	if (exists)
+-		spin_lock_bh(&ife->tcf_lock);
+ 
+ 	if (ret == ACT_P_CREATED)
+ 		INIT_LIST_HEAD(&ife->metalist);
+@@ -544,9 +560,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ metadata_parse_err:
+ 			if (ret == ACT_P_CREATED)
+ 				tcf_idr_release(*a, bind);
+-
+-			if (exists)
+-				spin_unlock_bh(&ife->tcf_lock);
+ 			kfree(p);
+ 			return err;
+ 		}
+@@ -561,18 +574,17 @@ metadata_parse_err:
+ 		 * as we can. You better have at least one else we are
+ 		 * going to bail out
+ 		 */
+-		err = use_all_metadata(ife);
++		err = use_all_metadata(ife, exists);
+ 		if (err) {
+ 			if (ret == ACT_P_CREATED)
+ 				tcf_idr_release(*a, bind);
+-
+-			if (exists)
+-				spin_unlock_bh(&ife->tcf_lock);
+ 			kfree(p);
+ 			return err;
+ 		}
+ 	}
+ 
++	if (exists)
++		spin_lock_bh(&ife->tcf_lock);
+ 	ife->tcf_action = parm->action;
+ 	if (exists)
+ 		spin_unlock_bh(&ife->tcf_lock);
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 8a925c72db5f..bad475c87688 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
+ {
+ 	struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX);
+ 
++	if (!keys_start)
++		goto nla_failure;
+ 	for (; n > 0; n--) {
+ 		struct nlattr *key_start;
+ 
+ 		key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX);
++		if (!key_start)
++			goto nla_failure;
+ 
+ 		if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) ||
+-		    nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) {
+-			nlmsg_trim(skb, keys_start);
+-			return -EINVAL;
+-		}
++		    nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd))
++			goto nla_failure;
+ 
+ 		nla_nest_end(skb, key_start);
+ 
+@@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
+ 	nla_nest_end(skb, keys_start);
+ 
+ 	return 0;
++nla_failure:
++	nla_nest_cancel(skb, keys_start);
++	return -EINVAL;
+ }
+ 
+ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
+@@ -395,7 +400,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
+ 	opt->bindcnt = p->tcf_bindcnt - bind;
+ 
+ 	if (p->tcfp_keys_ex) {
+-		tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys);
++		if (tcf_pedit_key_ex_dump(skb,
++					  p->tcfp_keys_ex,
++					  p->tcfp_nkeys))
++			goto nla_put_failure;
+ 
+ 		if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
+ 			goto nla_put_failure;
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index fb861f90fde6..260749956ef3 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -912,6 +912,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
+ 	struct nlattr *opt = tca[TCA_OPTIONS];
+ 	struct nlattr *tb[TCA_U32_MAX + 1];
+ 	u32 htid, flags = 0;
++	size_t sel_size;
+ 	int err;
+ #ifdef CONFIG_CLS_U32_PERF
+ 	size_t size;
+@@ -1074,8 +1075,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
+ 	}
+ 
+ 	s = nla_data(tb[TCA_U32_SEL]);
++	sel_size = struct_size(s, keys, s->nkeys);
++	if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
++		err = -EINVAL;
++		goto erridr;
++	}
+ 
+-	n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
++	n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
+ 	if (n == NULL) {
+ 		err = -ENOBUFS;
+ 		goto erridr;
+@@ -1090,7 +1096,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
+ 	}
+ #endif
+ 
+-	memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
++	memcpy(&n->sel, s, sel_size);
+ 	RCU_INIT_POINTER(n->ht_up, ht);
+ 	n->handle = handle;
+ 	n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
+diff --git a/net/sctp/proc.c b/net/sctp/proc.c
+index ef5c9a82d4e8..a644292f9faf 100644
+--- a/net/sctp/proc.c
++++ b/net/sctp/proc.c
+@@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = {
+ struct sctp_ht_iter {
+ 	struct seq_net_private p;
+ 	struct rhashtable_iter hti;
+-	int start_fail;
+ };
+ 
+ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
+@@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
+ 
+ 	sctp_transport_walk_start(&iter->hti);
+ 
+-	iter->start_fail = 0;
+ 	return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
+ }
+ 
+@@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
+ {
+ 	struct sctp_ht_iter *iter = seq->private;
+ 
+-	if (iter->start_fail)
+-		return;
+ 	sctp_transport_walk_stop(&iter->hti);
+ }
+ 
+@@ -264,8 +260,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
+ 	}
+ 
+ 	transport = (struct sctp_transport *)v;
+-	if (!sctp_transport_hold(transport))
+-		return 0;
+ 	assoc = transport->asoc;
+ 	epb = &assoc->base;
+ 	sk = epb->sk;
+@@ -322,8 +316,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
+ 	}
+ 
+ 	transport = (struct sctp_transport *)v;
+-	if (!sctp_transport_hold(transport))
+-		return 0;
+ 	assoc = transport->asoc;
+ 
+ 	list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index ce620e878538..50ee07cd20c4 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4881,9 +4881,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
+ 			break;
+ 		}
+ 
++		if (!sctp_transport_hold(t))
++			continue;
++
+ 		if (net_eq(sock_net(t->asoc->base.sk), net) &&
+ 		    t->asoc->peer.primary_path == t)
+ 			break;
++
++		sctp_transport_put(t);
+ 	}
+ 
+ 	return t;
+@@ -4893,13 +4898,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net,
+ 					      struct rhashtable_iter *iter,
+ 					      int pos)
+ {
+-	void *obj = SEQ_START_TOKEN;
++	struct sctp_transport *t;
+ 
+-	while (pos && (obj = sctp_transport_get_next(net, iter)) &&
+-	       !IS_ERR(obj))
+-		pos--;
++	if (!pos)
++		return SEQ_START_TOKEN;
+ 
+-	return obj;
++	while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
++		if (!--pos)
++			break;
++		sctp_transport_put(t);
++	}
++
++	return t;
+ }
+ 
+ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
+@@ -4958,8 +4968,6 @@ again:
+ 
+ 	tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
+ 	for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
+-		if (!sctp_transport_hold(tsp))
+-			continue;
+ 		ret = cb(tsp, p);
+ 		if (ret)
+ 			break;
+diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
+index 8654494b4d0a..834eb2b9e41b 100644
+--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
++++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
+@@ -169,7 +169,7 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
+ 	struct scatterlist              sg[1];
+ 	int err = -1;
+ 	u8 *checksumdata;
+-	u8 rc4salt[4];
++	u8 *rc4salt;
+ 	struct crypto_ahash *md5;
+ 	struct crypto_ahash *hmac_md5;
+ 	struct ahash_request *req;
+@@ -183,14 +183,18 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
+ 		return GSS_S_FAILURE;
+ 	}
+ 
++	rc4salt = kmalloc_array(4, sizeof(*rc4salt), GFP_NOFS);
++	if (!rc4salt)
++		return GSS_S_FAILURE;
++
+ 	if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
+ 		dprintk("%s: invalid usage value %u\n", __func__, usage);
+-		return GSS_S_FAILURE;
++		goto out_free_rc4salt;
+ 	}
+ 
+ 	checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
+ 	if (!checksumdata)
+-		return GSS_S_FAILURE;
++		goto out_free_rc4salt;
+ 
+ 	md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
+ 	if (IS_ERR(md5))
+@@ -258,6 +262,8 @@ out_free_md5:
+ 	crypto_free_ahash(md5);
+ out_free_cksum:
+ 	kfree(checksumdata);
++out_free_rc4salt:
++	kfree(rc4salt);
+ 	return err ? GSS_S_FAILURE : 0;
+ }
+ 
+diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
+index bebe88cae07b..ff968c7afef6 100644
+--- a/net/tipc/name_table.c
++++ b/net/tipc/name_table.c
+@@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
+ 
+ struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port)
+ {
+-	u64 value = (u64)node << 32 | port;
+ 	struct tipc_dest *dst;
+ 
+ 	list_for_each_entry(dst, l, list) {
+-		if (dst->value != value)
+-			continue;
+-		return dst;
++		if (dst->node == node && dst->port == port)
++			return dst;
+ 	}
+ 	return NULL;
+ }
+ 
+ bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
+ {
+-	u64 value = (u64)node << 32 | port;
+ 	struct tipc_dest *dst;
+ 
+ 	if (tipc_dest_find(l, node, port))
+@@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
+ 	dst = kmalloc(sizeof(*dst), GFP_ATOMIC);
+ 	if (unlikely(!dst))
+ 		return false;
+-	dst->value = value;
++	dst->node = node;
++	dst->port = port;
+ 	list_add(&dst->list, l);
+ 	return true;
+ }
+diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
+index 0febba41da86..892bd750b85f 100644
+--- a/net/tipc/name_table.h
++++ b/net/tipc/name_table.h
+@@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net);
+ 
+ struct tipc_dest {
+ 	struct list_head list;
+-	union {
+-		struct {
+-			u32 port;
+-			u32 node;
+-		};
+-		u64 value;
+-	};
++	u32 port;
++	u32 node;
+ };
+ 
+ struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port);
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 930852c54d7a..0a5fa347135e 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -2675,6 +2675,8 @@ void tipc_sk_reinit(struct net *net)
+ 
+ 		rhashtable_walk_stop(&iter);
+ 	} while (tsk == ERR_PTR(-EAGAIN));
++
++	rhashtable_walk_exit(&iter);
+ }
+ 
+ static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index 301f22430469..45188d920013 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -45,6 +45,7 @@
+ MODULE_AUTHOR("Mellanox Technologies");
+ MODULE_DESCRIPTION("Transport Layer Security Support");
+ MODULE_LICENSE("Dual BSD/GPL");
++MODULE_ALIAS_TCP_ULP("tls");
+ 
+ enum {
+ 	TLSV4,
+diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
+index 4b4d78fffe30..da9070889223 100644
+--- a/samples/bpf/xdp_redirect_cpu_user.c
++++ b/samples/bpf/xdp_redirect_cpu_user.c
+@@ -679,8 +679,9 @@ int main(int argc, char **argv)
+ 		return EXIT_FAIL_OPTION;
+ 	}
+ 
+-	/* Remove XDP program when program is interrupted */
++	/* Remove XDP program when program is interrupted or killed */
+ 	signal(SIGINT, int_exit);
++	signal(SIGTERM, int_exit);
+ 
+ 	if (bpf_set_link_xdp_fd(ifindex, prog_fd[prog_num], xdp_flags) < 0) {
+ 		fprintf(stderr, "link set xdp fd failed\n");
+diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
+index e4e9ba52bff0..bb278447299c 100644
+--- a/samples/bpf/xdp_rxq_info_user.c
++++ b/samples/bpf/xdp_rxq_info_user.c
+@@ -534,8 +534,9 @@ int main(int argc, char **argv)
+ 		exit(EXIT_FAIL_BPF);
+ 	}
+ 
+-	/* Remove XDP program when program is interrupted */
++	/* Remove XDP program when program is interrupted or killed */
+ 	signal(SIGINT, int_exit);
++	signal(SIGTERM, int_exit);
+ 
+ 	if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) {
+ 		fprintf(stderr, "link set xdp fd failed\n");
+diff --git a/scripts/coccicheck b/scripts/coccicheck
+index 9fedca611b7f..e04d328210ac 100755
+--- a/scripts/coccicheck
++++ b/scripts/coccicheck
+@@ -128,9 +128,10 @@ run_cmd_parmap() {
+ 	fi
+ 	echo $@ >>$DEBUG_FILE
+ 	$@ 2>>$DEBUG_FILE
+-	if [[ $? -ne 0 ]]; then
++	err=$?
++	if [[ $err -ne 0 ]]; then
+ 		echo "coccicheck failed"
+-		exit $?
++		exit $err
+ 	fi
+ }
+ 
+diff --git a/scripts/depmod.sh b/scripts/depmod.sh
+index 999d585eaa73..e5f0aad75b96 100755
+--- a/scripts/depmod.sh
++++ b/scripts/depmod.sh
+@@ -15,9 +15,9 @@ if ! test -r System.map ; then
+ fi
+ 
+ if [ -z $(command -v $DEPMOD) ]; then
+-	echo "'make modules_install' requires $DEPMOD. Please install it." >&2
++	echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
+ 	echo "This is probably in the kmod package." >&2
+-	exit 1
++	exit 0
+ fi
+ 
+ # older versions of depmod require the version string to start with three
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 1663fb19343a..b95cf57782a3 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -672,7 +672,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
+ 			if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER)
+ 				break;
+ 			if (symname[0] == '.') {
+-				char *munged = strdup(symname);
++				char *munged = NOFAIL(strdup(symname));
+ 				munged[0] = '_';
+ 				munged[1] = toupper(munged[1]);
+ 				symname = munged;
+@@ -1318,7 +1318,7 @@ static Elf_Sym *find_elf_symbol2(struct elf_info *elf, Elf_Addr addr,
+ static char *sec2annotation(const char *s)
+ {
+ 	if (match(s, init_exit_sections)) {
+-		char *p = malloc(20);
++		char *p = NOFAIL(malloc(20));
+ 		char *r = p;
+ 
+ 		*p++ = '_';
+@@ -1338,7 +1338,7 @@ static char *sec2annotation(const char *s)
+ 			strcat(p, " ");
+ 		return r;
+ 	} else {
+-		return strdup("");
++		return NOFAIL(strdup(""));
+ 	}
+ }
+ 
+@@ -2036,7 +2036,7 @@ void buf_write(struct buffer *buf, const char *s, int len)
+ {
+ 	if (buf->size - buf->pos < len) {
+ 		buf->size += len + SZ;
+-		buf->p = realloc(buf->p, buf->size);
++		buf->p = NOFAIL(realloc(buf->p, buf->size));
+ 	}
+ 	strncpy(buf->p + buf->pos, s, len);
+ 	buf->pos += len;
+diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c
+index b0f9dc3f765a..1a7cec5d9cac 100644
+--- a/security/apparmor/policy_ns.c
++++ b/security/apparmor/policy_ns.c
+@@ -255,7 +255,7 @@ static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name,
+ 
+ 	ns = alloc_ns(parent->base.hname, name);
+ 	if (!ns)
+-		return NULL;
++		return ERR_PTR(-ENOMEM);
+ 	ns->level = parent->level + 1;
+ 	mutex_lock_nested(&ns->lock, ns->level);
+ 	error = __aafs_ns_mkdir(ns, ns_subns_dir(parent), name, dir);
+diff --git a/security/keys/dh.c b/security/keys/dh.c
+index b203f7758f97..1a68d27e72b4 100644
+--- a/security/keys/dh.c
++++ b/security/keys/dh.c
+@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
+ 	}
+ 	dh_inputs.g_size = dlen;
+ 
+-	dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
++	dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key);
+ 	if (dlen < 0) {
+ 		ret = dlen;
+ 		goto out2;
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
+index 79d3709b0671..0b66d7283b00 100644
+--- a/security/selinux/selinuxfs.c
++++ b/security/selinux/selinuxfs.c
+@@ -1365,13 +1365,18 @@ static int sel_make_bools(struct selinux_fs_info *fsi)
+ 
+ 		ret = -ENOMEM;
+ 		inode = sel_make_inode(dir->d_sb, S_IFREG | S_IRUGO | S_IWUSR);
+-		if (!inode)
++		if (!inode) {
++			dput(dentry);
+ 			goto out;
++		}
+ 
+ 		ret = -ENAMETOOLONG;
+ 		len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
+-		if (len >= PAGE_SIZE)
++		if (len >= PAGE_SIZE) {
++			dput(dentry);
++			iput(inode);
+ 			goto out;
++		}
+ 
+ 		isec = (struct inode_security_struct *)inode->i_security;
+ 		ret = security_genfs_sid(fsi->state, "selinuxfs", page,
+@@ -1586,8 +1591,10 @@ static int sel_make_avc_files(struct dentry *dir)
+ 			return -ENOMEM;
+ 
+ 		inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode);
+-		if (!inode)
++		if (!inode) {
++			dput(dentry);
+ 			return -ENOMEM;
++		}
+ 
+ 		inode->i_fop = files[i].ops;
+ 		inode->i_ino = ++fsi->last_ino;
+@@ -1632,8 +1639,10 @@ static int sel_make_initcon_files(struct dentry *dir)
+ 			return -ENOMEM;
+ 
+ 		inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
+-		if (!inode)
++		if (!inode) {
++			dput(dentry);
+ 			return -ENOMEM;
++		}
+ 
+ 		inode->i_fop = &sel_initcon_ops;
+ 		inode->i_ino = i|SEL_INITCON_INO_OFFSET;
+@@ -1733,8 +1742,10 @@ static int sel_make_perm_files(char *objclass, int classvalue,
+ 
+ 		rc = -ENOMEM;
+ 		inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
+-		if (!inode)
++		if (!inode) {
++			dput(dentry);
+ 			goto out;
++		}
+ 
+ 		inode->i_fop = &sel_perm_ops;
+ 		/* i+1 since perm values are 1-indexed */
+@@ -1763,8 +1774,10 @@ static int sel_make_class_dir_entries(char *classname, int index,
+ 		return -ENOMEM;
+ 
+ 	inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
+-	if (!inode)
++	if (!inode) {
++		dput(dentry);
+ 		return -ENOMEM;
++	}
+ 
+ 	inode->i_fop = &sel_class_ops;
+ 	inode->i_ino = sel_class_to_ino(index);
+@@ -1838,8 +1851,10 @@ static int sel_make_policycap(struct selinux_fs_info *fsi)
+ 			return -ENOMEM;
+ 
+ 		inode = sel_make_inode(fsi->sb, S_IFREG | 0444);
+-		if (inode == NULL)
++		if (inode == NULL) {
++			dput(dentry);
+ 			return -ENOMEM;
++		}
+ 
+ 		inode->i_fop = &sel_policycap_ops;
+ 		inode->i_ino = iter | SEL_POLICYCAP_INO_OFFSET;
+@@ -1932,8 +1947,10 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
+ 
+ 	ret = -ENOMEM;
+ 	inode = sel_make_inode(sb, S_IFCHR | S_IRUGO | S_IWUGO);
+-	if (!inode)
++	if (!inode) {
++		dput(dentry);
+ 		goto err;
++	}
+ 
+ 	inode->i_ino = ++fsi->last_ino;
+ 	isec = (struct inode_security_struct *)inode->i_security;
+diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
+index 8a0181a2db08..47feef30dadb 100644
+--- a/sound/soc/codecs/rt5677.c
++++ b/sound/soc/codecs/rt5677.c
+@@ -5007,7 +5007,7 @@ static const struct regmap_config rt5677_regmap = {
+ };
+ 
+ static const struct of_device_id rt5677_of_match[] = {
+-	{ .compatible = "realtek,rt5677", RT5677 },
++	{ .compatible = "realtek,rt5677", .data = (const void *)RT5677 },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(of, rt5677_of_match);
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 7fdfdf3f6e67..14f1b0c0d286 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -2432,6 +2432,7 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
+ 			snd_soc_component_update_bits(component, WM8994_POWER_MANAGEMENT_2,
+ 					    WM8994_OPCLK_ENA, 0);
+ 		}
++		break;
+ 
+ 	default:
+ 		return -EINVAL;
+diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
+index 1120e39c1b00..5ccfce87e693 100644
+--- a/tools/perf/arch/arm64/util/arm-spe.c
++++ b/tools/perf/arch/arm64/util/arm-spe.c
+@@ -194,6 +194,7 @@ struct auxtrace_record *arm_spe_recording_init(int *err,
+ 	sper->itr.read_finish = arm_spe_read_finish;
+ 	sper->itr.alignment = 0;
+ 
++	*err = 0;
+ 	return &sper->itr;
+ }
+ 
+diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
+index 53d83d7e6a09..20e7d74d86cd 100644
+--- a/tools/perf/arch/powerpc/util/sym-handling.c
++++ b/tools/perf/arch/powerpc/util/sym-handling.c
+@@ -141,8 +141,10 @@ void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
+ 	for (i = 0; i < ntevs; i++) {
+ 		tev = &pev->tevs[i];
+ 		map__for_each_symbol(map, sym, tmp) {
+-			if (map->unmap_ip(map, sym->start) == tev->point.address)
++			if (map->unmap_ip(map, sym->start) == tev->point.address) {
+ 				arch__fix_tev_from_maps(pev, tev, map, sym);
++				break;
++			}
+ 		}
+ 	}
+ }
+diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
+index 5be021701f34..cf8bd123cf73 100644
+--- a/tools/perf/util/namespaces.c
++++ b/tools/perf/util/namespaces.c
+@@ -139,6 +139,9 @@ struct nsinfo *nsinfo__copy(struct nsinfo *nsi)
+ {
+ 	struct nsinfo *nnsi;
+ 
++	if (nsi == NULL)
++		return NULL;
++
+ 	nnsi = calloc(1, sizeof(*nnsi));
+ 	if (nnsi != NULL) {
+ 		nnsi->pid = nsi->pid;
+diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
+index 66d31de60b9a..9d7166dfad1e 100644
+--- a/tools/testing/selftests/powerpc/harness.c
++++ b/tools/testing/selftests/powerpc/harness.c
+@@ -85,13 +85,13 @@ wait:
+ 	return status;
+ }
+ 
+-static void alarm_handler(int signum)
++static void sig_handler(int signum)
+ {
+-	/* Jut wake us up from waitpid */
++	/* Just wake us up from waitpid */
+ }
+ 
+-static struct sigaction alarm_action = {
+-	.sa_handler = alarm_handler,
++static struct sigaction sig_action = {
++	.sa_handler = sig_handler,
+ };
+ 
+ void test_harness_set_timeout(uint64_t time)
+@@ -106,8 +106,14 @@ int test_harness(int (test_function)(void), char *name)
+ 	test_start(name);
+ 	test_set_git_version(GIT_VERSION);
+ 
+-	if (sigaction(SIGALRM, &alarm_action, NULL)) {
+-		perror("sigaction");
++	if (sigaction(SIGINT, &sig_action, NULL)) {
++		perror("sigaction (sigint)");
++		test_error(name);
++		return 1;
++	}
++
++	if (sigaction(SIGALRM, &sig_action, NULL)) {
++		perror("sigaction (sigalrm)");
+ 		test_error(name);
+ 		return 1;
+ 	}


             reply	other threads:[~2018-09-15 10:12 UTC|newest]

Thread overview: 75+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-15 10:12 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2018-11-21 12:28 [gentoo-commits] proj/linux-patches:4.18 commit in: / Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 13:15 Mike Pagano
2018-11-14 11:40 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-14 11:37 Mike Pagano
2018-11-13 21:17 Mike Pagano
2018-11-11  1:51 Mike Pagano
2018-11-10 21:33 Mike Pagano
2018-11-04 17:33 Alice Ferrazzi
2018-10-20 12:36 Mike Pagano
2018-10-18 10:27 Mike Pagano
2018-10-13 16:32 Mike Pagano
2018-10-10 11:16 Mike Pagano
2018-10-04 10:44 Mike Pagano
2018-09-29 13:36 Mike Pagano
2018-09-26 10:40 Mike Pagano
2018-09-19 22:41 Mike Pagano
2018-09-09 11:25 Mike Pagano
2018-09-05 15:30 Mike Pagano
2018-08-24 11:46 Mike Pagano
2018-08-22  9:59 Alice Ferrazzi
2018-08-18 18:13 Mike Pagano
2018-08-17 19:44 Mike Pagano
2018-08-17 19:28 Mike Pagano
2018-08-16 11:45 Mike Pagano
2018-08-15 16:36 Mike Pagano
2018-08-12 23:21 Mike Pagano
2018-08-12 23:15 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1537006366.f69bd2c4a51cabfc16f5a44334d1cd82613e7157.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox