public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:5.8 commit in: /
Date: Wed, 23 Sep 2020 12:14:17 +0000 (UTC)	[thread overview]
Message-ID: <1600863243.d90887db45c919719e541d2b025a26f8f23958c3.mpagano@gentoo> (raw)

commit:     d90887db45c919719e541d2b025a26f8f23958c3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 23 12:14:03 2020 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 23 12:14:03 2020 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d90887db

Linux patch 5.8.11

Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>

 0000_README             |    4 +
 1010_linux-5.8.11.patch | 3917 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3921 insertions(+)

diff --git a/0000_README b/0000_README
index f2e8a67..e5b0bab 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1009_linux-5.8.10.patch
 From:   http://www.kernel.org
 Desc:   Linux 5.8.10
 
+Patch:  1010_linux-5.8.11.patch
+From:   http://www.kernel.org
+Desc:   Linux 5.8.11
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1010_linux-5.8.11.patch b/1010_linux-5.8.11.patch
new file mode 100644
index 0000000..2bc9a4f
--- /dev/null
+++ b/1010_linux-5.8.11.patch
@@ -0,0 +1,3917 @@
+diff --git a/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml b/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
+index 64b2c64ca8065..a1e2be737eec9 100644
+--- a/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
++++ b/Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
+@@ -9,6 +9,14 @@ title: PCIe RC controller on Intel Gateway SoCs
+ maintainers:
+   - Dilip Kota <eswara.kota@linux.intel.com>
+ 
++select:
++  properties:
++    compatible:
++      contains:
++        const: intel,lgm-pcie
++  required:
++    - compatible
++
+ properties:
+   compatible:
+     items:
+diff --git a/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt b/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt
+index f5e518d099f2c..62d4ed2d7fd79 100644
+--- a/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt
++++ b/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt
+@@ -23,8 +23,8 @@ Required properties:
+ 
+ - compatible:
+     Must be one of :
+-    "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-qspi" : MSPI+BSPI on BRCMSTB SoCs
+-    "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
++    "brcm,spi-brcmstb-qspi", "brcm,spi-bcm-qspi" : MSPI+BSPI on BRCMSTB SoCs
++    "brcm,spi-brcmstb-mspi", "brcm,spi-bcm-qspi" : Second Instance of MSPI
+ 						   BRCMSTB  SoCs
+     "brcm,spi-bcm7425-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
+     			     			  			    BRCMSTB  SoCs
+@@ -36,8 +36,8 @@ Required properties:
+     			     			  			    BRCMSTB  SoCs
+     "brcm,spi-bcm7278-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
+     			     			  			    BRCMSTB  SoCs
+-    "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi"     : MSPI+BSPI on Cygnus, NSP
+-    "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi"     : NS2 SoCs
++    "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"     : MSPI+BSPI on Cygnus, NSP
++    "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi"     : NS2 SoCs
+ 
+ - reg:
+     Define the bases and ranges of the associated I/O address spaces.
+@@ -86,7 +86,7 @@ BRCMSTB SoC Example:
+     spi@f03e3400 {
+ 		#address-cells = <0x1>;
+ 		#size-cells = <0x0>;
+-		compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-brcmstb-qspi";
++		compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-bcm-qspi";
+ 		reg = <0xf03e0920 0x4 0xf03e3400 0x188 0xf03e3200 0x50>;
+ 		reg-names = "cs_reg", "mspi", "bspi";
+ 		interrupts = <0x6 0x5 0x4 0x3 0x2 0x1 0x0>;
+@@ -149,7 +149,7 @@ BRCMSTB SoC Example:
+ 		#address-cells = <1>;
+ 		#size-cells = <0>;
+ 		clocks = <&upg_fixed>;
+-		compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-brcmstb-mspi";
++		compatible = "brcm,spi-brcmstb-mspi", "brcm,spi-bcm-qspi";
+ 		reg = <0xf0416000 0x180>;
+ 		reg-names = "mspi";
+ 		interrupts = <0x14>;
+@@ -160,7 +160,7 @@ BRCMSTB SoC Example:
+ iProc SoC Example:
+ 
+     qspi: spi@18027200 {
+-	compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
++	compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
+ 	reg = <0x18027200 0x184>,
+ 	      <0x18027000 0x124>,
+ 	      <0x1811c408 0x004>,
+@@ -191,7 +191,7 @@ iProc SoC Example:
+  NS2 SoC Example:
+ 
+ 	       qspi: spi@66470200 {
+-		       compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi";
++		       compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi";
+ 		       reg = <0x66470200 0x184>,
+ 			     <0x66470000 0x124>,
+ 			     <0x67017408 0x004>,
+diff --git a/Makefile b/Makefile
+index d937530d33427..0b025b3a56401 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 8
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+ 
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 2c0b82db825ba..422ed2e38a6c8 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -910,8 +910,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ 		.desc = "ARM erratum 1418040",
+ 		.capability = ARM64_WORKAROUND_1418040,
+ 		ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
+-		.type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU |
+-			 ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU),
++		/*
++		 * We need to allow affected CPUs to come in late, but
++		 * also need the non-affected CPUs to be able to come
++		 * in at any point in time. Wonderful.
++		 */
++		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+ 	},
+ #endif
+ #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
+diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
+index 295d66490584b..c07d7a0349410 100644
+--- a/arch/arm64/kernel/paravirt.c
++++ b/arch/arm64/kernel/paravirt.c
+@@ -50,16 +50,19 @@ static u64 pv_steal_clock(int cpu)
+ 	struct pv_time_stolen_time_region *reg;
+ 
+ 	reg = per_cpu_ptr(&stolen_time_region, cpu);
+-	if (!reg->kaddr) {
+-		pr_warn_once("stolen time enabled but not configured for cpu %d\n",
+-			     cpu);
++
++	/*
++	 * paravirt_steal_clock() may be called before the CPU
++	 * online notification callback runs. Until the callback
++	 * has run we just return zero.
++	 */
++	if (!reg->kaddr)
+ 		return 0;
+-	}
+ 
+ 	return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
+ }
+ 
+-static int stolen_time_dying_cpu(unsigned int cpu)
++static int stolen_time_cpu_down_prepare(unsigned int cpu)
+ {
+ 	struct pv_time_stolen_time_region *reg;
+ 
+@@ -73,7 +76,7 @@ static int stolen_time_dying_cpu(unsigned int cpu)
+ 	return 0;
+ }
+ 
+-static int init_stolen_time_cpu(unsigned int cpu)
++static int stolen_time_cpu_online(unsigned int cpu)
+ {
+ 	struct pv_time_stolen_time_region *reg;
+ 	struct arm_smccc_res res;
+@@ -103,19 +106,20 @@ static int init_stolen_time_cpu(unsigned int cpu)
+ 	return 0;
+ }
+ 
+-static int pv_time_init_stolen_time(void)
++static int __init pv_time_init_stolen_time(void)
+ {
+ 	int ret;
+ 
+-	ret = cpuhp_setup_state(CPUHP_AP_ARM_KVMPV_STARTING,
+-				"hypervisor/arm/pvtime:starting",
+-				init_stolen_time_cpu, stolen_time_dying_cpu);
++	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
++				"hypervisor/arm/pvtime:online",
++				stolen_time_cpu_online,
++				stolen_time_cpu_down_prepare);
+ 	if (ret < 0)
+ 		return ret;
+ 	return 0;
+ }
+ 
+-static bool has_pv_steal_clock(void)
++static bool __init has_pv_steal_clock(void)
+ {
+ 	struct arm_smccc_res res;
+ 
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index 3cb25b43b368e..1b2d82755e41f 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -141,14 +141,17 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val,
+ 	}
+ }
+ 
+-static inline int bpf2a64_offset(int bpf_to, int bpf_from,
++static inline int bpf2a64_offset(int bpf_insn, int off,
+ 				 const struct jit_ctx *ctx)
+ {
+-	int to = ctx->offset[bpf_to];
+-	/* -1 to account for the Branch instruction */
+-	int from = ctx->offset[bpf_from] - 1;
+-
+-	return to - from;
++	/* BPF JMP offset is relative to the next instruction */
++	bpf_insn++;
++	/*
++	 * Whereas arm64 branch instructions encode the offset
++	 * from the branch itself, so we must subtract 1 from the
++	 * instruction offset.
++	 */
++	return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
+ }
+ 
+ static void jit_fill_hole(void *area, unsigned int size)
+@@ -578,7 +581,7 @@ emit_bswap_uxt:
+ 
+ 	/* JUMP off */
+ 	case BPF_JMP | BPF_JA:
+-		jmp_offset = bpf2a64_offset(i + off, i, ctx);
++		jmp_offset = bpf2a64_offset(i, off, ctx);
+ 		check_imm26(jmp_offset);
+ 		emit(A64_B(jmp_offset), ctx);
+ 		break;
+@@ -605,7 +608,7 @@ emit_bswap_uxt:
+ 	case BPF_JMP32 | BPF_JSLE | BPF_X:
+ 		emit(A64_CMP(is64, dst, src), ctx);
+ emit_cond_jmp:
+-		jmp_offset = bpf2a64_offset(i + off, i, ctx);
++		jmp_offset = bpf2a64_offset(i, off, ctx);
+ 		check_imm19(jmp_offset);
+ 		switch (BPF_OP(code)) {
+ 		case BPF_JEQ:
+@@ -837,10 +840,21 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
+ 	const struct bpf_prog *prog = ctx->prog;
+ 	int i;
+ 
++	/*
++	 * - offset[0] offset of the end of prologue,
++	 *   start of the 1st instruction.
++	 * - offset[1] - offset of the end of 1st instruction,
++	 *   start of the 2nd instruction
++	 * [....]
++	 * - offset[3] - offset of the end of 3rd instruction,
++	 *   start of 4th instruction
++	 */
+ 	for (i = 0; i < prog->len; i++) {
+ 		const struct bpf_insn *insn = &prog->insnsi[i];
+ 		int ret;
+ 
++		if (ctx->image == NULL)
++			ctx->offset[i] = ctx->idx;
+ 		ret = build_insn(insn, ctx, extra_pass);
+ 		if (ret > 0) {
+ 			i++;
+@@ -848,11 +862,16 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
+ 				ctx->offset[i] = ctx->idx;
+ 			continue;
+ 		}
+-		if (ctx->image == NULL)
+-			ctx->offset[i] = ctx->idx;
+ 		if (ret)
+ 			return ret;
+ 	}
++	/*
++	 * offset is allocated with prog->len + 1 so fill in
++	 * the last element with the offset after the last
++	 * instruction (end of program)
++	 */
++	if (ctx->image == NULL)
++		ctx->offset[i] = ctx->idx;
+ 
+ 	return 0;
+ }
+@@ -928,7 +947,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+ 	memset(&ctx, 0, sizeof(ctx));
+ 	ctx.prog = prog;
+ 
+-	ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
++	ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
+ 	if (ctx.offset == NULL) {
+ 		prog = orig_prog;
+ 		goto out_off;
+@@ -1008,7 +1027,7 @@ skip_init_ctx:
+ 	prog->jited_len = image_size;
+ 
+ 	if (!prog->is_func || extra_pass) {
+-		bpf_prog_fill_jited_linfo(prog, ctx.offset);
++		bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
+ out_off:
+ 		kfree(ctx.offset);
+ 		kfree(jit_data);
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index c43ad3b3cea4b..daa24f1e14831 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -876,6 +876,7 @@ config SNI_RM
+ 	select I8253
+ 	select I8259
+ 	select ISA
++	select MIPS_L1_CACHE_SHIFT_6
+ 	select SWAP_IO_SPACE if CPU_BIG_ENDIAN
+ 	select SYS_HAS_CPU_R4X00
+ 	select SYS_HAS_CPU_R5000
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index 666d3350b4ac1..6c6836669ce16 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -137,6 +137,8 @@ extern void kvm_init_loongson_ipi(struct kvm *kvm);
+ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ {
+ 	switch (type) {
++	case KVM_VM_MIPS_AUTO:
++		break;
+ #ifdef CONFIG_KVM_MIPS_VZ
+ 	case KVM_VM_MIPS_VZ:
+ #else
+diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
+index b09dc844985a8..eeeec18c420a6 100644
+--- a/arch/mips/sni/a20r.c
++++ b/arch/mips/sni/a20r.c
+@@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = {
+ 	},
+ };
+ 
+-static u32 a20r_ack_hwint(void)
++/*
++ * Trigger chipset to update CPU's CAUSE IP field
++ */
++static u32 a20r_update_cause_ip(void)
+ {
+ 	u32 status = read_c0_status();
+ 
+@@ -205,12 +208,14 @@ static void a20r_hwint(void)
+ 	int irq;
+ 
+ 	clear_c0_status(IE_IRQ0);
+-	status = a20r_ack_hwint();
++	status = a20r_update_cause_ip();
+ 	cause = read_c0_cause();
+ 
+ 	irq = ffs(((cause & status) >> 8) & 0xf8);
+ 	if (likely(irq > 0))
+ 		do_IRQ(SNI_A20R_IRQ_BASE + irq - 1);
++
++	a20r_update_cause_ip();
+ 	set_c0_status(IE_IRQ0);
+ }
+ 
+diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c
+index 08f56af387ac4..534a52ec5e667 100644
+--- a/arch/openrisc/mm/cache.c
++++ b/arch/openrisc/mm/cache.c
+@@ -16,7 +16,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
+ 
+-static void cache_loop(struct page *page, const unsigned int reg)
++static __always_inline void cache_loop(struct page *page, const unsigned int reg)
+ {
+ 	unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
+ 	unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
+diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
+index 5393a535240c7..dfbbffa0eb2e2 100644
+--- a/arch/powerpc/include/asm/book3s/64/mmu.h
++++ b/arch/powerpc/include/asm/book3s/64/mmu.h
+@@ -228,14 +228,14 @@ static inline void early_init_mmu_secondary(void)
+ 
+ extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
+ 					 phys_addr_t first_memblock_size);
+-extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
+-					 phys_addr_t first_memblock_size);
+ static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
+ 					      phys_addr_t first_memblock_size)
+ {
+-	if (early_radix_enabled())
+-		return radix__setup_initial_memory_limit(first_memblock_base,
+-						   first_memblock_size);
++	/*
++	 * Hash has more strict restrictions. At this point we don't
++	 * know which translations we will pick. Hence go with hash
++	 * restrictions.
++	 */
+ 	return hash__setup_initial_memory_limit(first_memblock_base,
+ 					   first_memblock_size);
+ }
+diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
+index e486d1d78de28..f4cb2c546adbb 100644
+--- a/arch/powerpc/kernel/dma-iommu.c
++++ b/arch/powerpc/kernel/dma-iommu.c
+@@ -160,7 +160,8 @@ u64 dma_iommu_get_required_mask(struct device *dev)
+ 			return bypass_mask;
+ 	}
+ 
+-	mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
++	mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
++			tbl->it_page_shift - 1);
+ 	mask += mask - 1;
+ 
+ 	return mask;
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index c2989c1718839..1e9a298020a63 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -654,21 +654,6 @@ void radix__mmu_cleanup_all(void)
+ 	}
+ }
+ 
+-void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
+-				phys_addr_t first_memblock_size)
+-{
+-	/*
+-	 * We don't currently support the first MEMBLOCK not mapping 0
+-	 * physical on those processors
+-	 */
+-	BUG_ON(first_memblock_base != 0);
+-
+-	/*
+-	 * Radix mode is not limited by RMA / VRMA addressing.
+-	 */
+-	ppc64_rma_size = ULONG_MAX;
+-}
+-
+ #ifdef CONFIG_MEMORY_HOTPLUG
+ static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
+ {
+diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
+index bc73abf0bc25e..ef566fc43933e 100644
+--- a/arch/powerpc/mm/init_64.c
++++ b/arch/powerpc/mm/init_64.c
+@@ -431,9 +431,16 @@ void __init mmu_early_init_devtree(void)
+ 	if (!(mfmsr() & MSR_HV))
+ 		early_check_vec5();
+ 
+-	if (early_radix_enabled())
++	if (early_radix_enabled()) {
+ 		radix__early_init_devtree();
+-	else
++		/*
++		 * We have finalized the translation we are going to use by now.
++		 * Radix mode is not limited by RMA / VRMA addressing.
++		 * Hence don't limit memblock allocations.
++		 */
++		ppc64_rma_size = ULONG_MAX;
++		memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
++	} else
+ 		hash__early_init_devtree();
+ }
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
+index 79e9d55bdf1ac..e229d95f470b8 100644
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -226,12 +226,11 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
+ 
+ 	ptep = &fixmap_pte[pte_index(addr)];
+ 
+-	if (pgprot_val(prot)) {
++	if (pgprot_val(prot))
+ 		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
+-	} else {
++	else
+ 		pte_clear(&init_mm, addr, ptep);
+-		local_flush_tlb_page(addr);
+-	}
++	local_flush_tlb_page(addr);
+ }
+ 
+ static pte_t *__init get_pte_virt(phys_addr_t pa)
+diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
+index faca269d5f278..a44ddc2f2dec5 100644
+--- a/arch/s390/kernel/entry.h
++++ b/arch/s390/kernel/entry.h
+@@ -26,6 +26,7 @@ void do_protection_exception(struct pt_regs *regs);
+ void do_dat_exception(struct pt_regs *regs);
+ void do_secure_storage_access(struct pt_regs *regs);
+ void do_non_secure_storage_access(struct pt_regs *regs);
++void do_secure_storage_violation(struct pt_regs *regs);
+ 
+ void addressing_exception(struct pt_regs *regs);
+ void data_exception(struct pt_regs *regs);
+diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
+index 2c27907a5ffcb..9a92638360eee 100644
+--- a/arch/s390/kernel/pgm_check.S
++++ b/arch/s390/kernel/pgm_check.S
+@@ -80,7 +80,7 @@ PGM_CHECK(do_dat_exception)		/* 3b */
+ PGM_CHECK_DEFAULT			/* 3c */
+ PGM_CHECK(do_secure_storage_access)	/* 3d */
+ PGM_CHECK(do_non_secure_storage_access)	/* 3e */
+-PGM_CHECK_DEFAULT			/* 3f */
++PGM_CHECK(do_secure_storage_violation)	/* 3f */
+ PGM_CHECK(monitor_event_exception)	/* 40 */
+ PGM_CHECK_DEFAULT			/* 41 */
+ PGM_CHECK_DEFAULT			/* 42 */
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index d53c2e2ea1fd2..48d9fc5b699b4 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -875,6 +875,21 @@ void do_non_secure_storage_access(struct pt_regs *regs)
+ }
+ NOKPROBE_SYMBOL(do_non_secure_storage_access);
+ 
++void do_secure_storage_violation(struct pt_regs *regs)
++{
++	/*
++	 * Either KVM messed up the secure guest mapping or the same
++	 * page is mapped into multiple secure guests.
++	 *
++	 * This exception is only triggered when a guest 2 is running
++	 * and can therefore never occur in kernel context.
++	 */
++	printk_ratelimited(KERN_WARNING
++			   "Secure storage violation in task: %s, pid %d\n",
++			   current->comm, current->pid);
++	send_sig(SIGSEGV, current, 0);
++}
++
+ #else
+ void do_secure_storage_access(struct pt_regs *regs)
+ {
+@@ -885,4 +900,9 @@ void do_non_secure_storage_access(struct pt_regs *regs)
+ {
+ 	default_trap_handler(regs);
+ }
++
++void do_secure_storage_violation(struct pt_regs *regs)
++{
++	default_trap_handler(regs);
++}
+ #endif
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 4b62d6b550246..1804230dd8d82 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -668,6 +668,10 @@ EXPORT_SYMBOL_GPL(zpci_enable_device);
+ int zpci_disable_device(struct zpci_dev *zdev)
+ {
+ 	zpci_dma_exit_device(zdev);
++	/*
++	 * The zPCI function may already be disabled by the platform, this is
++	 * detected in clp_disable_fh() which becomes a no-op.
++	 */
+ 	return clp_disable_fh(zdev);
+ }
+ EXPORT_SYMBOL_GPL(zpci_disable_device);
+diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
+index 9a3a291cad432..d9ae7456dd4c8 100644
+--- a/arch/s390/pci/pci_event.c
++++ b/arch/s390/pci/pci_event.c
+@@ -143,6 +143,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+ 			zpci_remove_device(zdev);
+ 		}
+ 
++		zdev->fh = ccdf->fh;
++		zpci_disable_device(zdev);
+ 		zdev->state = ZPCI_FN_STATE_STANDBY;
+ 		if (!clp_get_state(ccdf->fid, &state) &&
+ 		    state == ZPCI_FN_STATE_RESERVED) {
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 5a828fde7a42f..be38af7bea89d 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -42,6 +42,8 @@ KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+ KBUILD_CFLAGS += -Wno-pointer-sign
+ KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
+ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
++# Disable relocation relaxation in case the link is not PIE.
++KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
+ 
+ KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
+index 296b346184b27..fb42659f6e988 100644
+--- a/arch/x86/include/asm/frame.h
++++ b/arch/x86/include/asm/frame.h
+@@ -60,12 +60,26 @@
+ #define FRAME_END "pop %" _ASM_BP "\n"
+ 
+ #ifdef CONFIG_X86_64
++
+ #define ENCODE_FRAME_POINTER			\
+ 	"lea 1(%rsp), %rbp\n\t"
++
++static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
++{
++	return (unsigned long)regs + 1;
++}
++
+ #else /* !CONFIG_X86_64 */
++
+ #define ENCODE_FRAME_POINTER			\
+ 	"movl %esp, %ebp\n\t"			\
+ 	"andl $0x7fffffff, %ebp\n\t"
++
++static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
++{
++	return (unsigned long)regs & 0x7fffffff;
++}
++
+ #endif /* CONFIG_X86_64 */
+ 
+ #endif /* __ASSEMBLY__ */
+@@ -83,6 +97,11 @@
+ 
+ #define ENCODE_FRAME_POINTER
+ 
++static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
++{
++	return 0;
++}
++
+ #endif
+ 
+ #define FRAME_BEGIN
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index fe67dbd76e517..bff502e779e44 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -42,6 +42,7 @@
+ #include <asm/spec-ctrl.h>
+ #include <asm/io_bitmap.h>
+ #include <asm/proto.h>
++#include <asm/frame.h>
+ 
+ #include "process.h"
+ 
+@@ -133,7 +134,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+ 	fork_frame = container_of(childregs, struct fork_frame, regs);
+ 	frame = &fork_frame->frame;
+ 
+-	frame->bp = 0;
++	frame->bp = encode_frame_pointer(childregs);
+ 	frame->ret_addr = (unsigned long) ret_from_fork;
+ 	p->thread.sp = (unsigned long) fork_frame;
+ 	p->thread.io_bitmap = NULL;
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 50c8f034c01c5..caa4fa7f42b84 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -5895,18 +5895,6 @@ static void bfq_finish_requeue_request(struct request *rq)
+ 	struct bfq_queue *bfqq = RQ_BFQQ(rq);
+ 	struct bfq_data *bfqd;
+ 
+-	/*
+-	 * Requeue and finish hooks are invoked in blk-mq without
+-	 * checking whether the involved request is actually still
+-	 * referenced in the scheduler. To handle this fact, the
+-	 * following two checks make this function exit in case of
+-	 * spurious invocations, for which there is nothing to do.
+-	 *
+-	 * First, check whether rq has nothing to do with an elevator.
+-	 */
+-	if (unlikely(!(rq->rq_flags & RQF_ELVPRIV)))
+-		return;
+-
+ 	/*
+ 	 * rq either is not associated with any icq, or is an already
+ 	 * requeued request that has not (yet) been re-inserted into
+diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
+index 126021fc3a11f..e81ca1bf6e10b 100644
+--- a/block/blk-mq-sched.h
++++ b/block/blk-mq-sched.h
+@@ -66,7 +66,7 @@ static inline void blk_mq_sched_requeue_request(struct request *rq)
+ 	struct request_queue *q = rq->q;
+ 	struct elevator_queue *e = q->elevator;
+ 
+-	if (e && e->type->ops.requeue_request)
++	if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request)
+ 		e->type->ops.requeue_request(rq);
+ }
+ 
+diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
+index 933e2192fbe8a..d08efc77cf16a 100644
+--- a/drivers/base/firmware_loader/firmware.h
++++ b/drivers/base/firmware_loader/firmware.h
+@@ -142,10 +142,12 @@ int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags);
+ void fw_free_paged_buf(struct fw_priv *fw_priv);
+ int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed);
+ int fw_map_paged_buf(struct fw_priv *fw_priv);
++bool fw_is_paged_buf(struct fw_priv *fw_priv);
+ #else
+ static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {}
+ static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; }
+ static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; }
++static inline bool fw_is_paged_buf(struct fw_priv *fw_priv) { return false; }
+ #endif
+ 
+ #endif /* __FIRMWARE_LOADER_H */
+diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
+index ca871b13524e2..36bf45509e0b0 100644
+--- a/drivers/base/firmware_loader/main.c
++++ b/drivers/base/firmware_loader/main.c
+@@ -252,9 +252,11 @@ static void __free_fw_priv(struct kref *ref)
+ 	list_del(&fw_priv->list);
+ 	spin_unlock(&fwc->lock);
+ 
+-	fw_free_paged_buf(fw_priv); /* free leftover pages */
+-	if (!fw_priv->allocated_size)
++	if (fw_is_paged_buf(fw_priv))
++		fw_free_paged_buf(fw_priv);
++	else if (!fw_priv->allocated_size)
+ 		vfree(fw_priv->data);
++
+ 	kfree_const(fw_priv->fw_name);
+ 	kfree(fw_priv);
+ }
+@@ -268,6 +270,11 @@ static void free_fw_priv(struct fw_priv *fw_priv)
+ }
+ 
+ #ifdef CONFIG_FW_LOADER_PAGED_BUF
++bool fw_is_paged_buf(struct fw_priv *fw_priv)
++{
++	return fw_priv->is_paged_buf;
++}
++
+ void fw_free_paged_buf(struct fw_priv *fw_priv)
+ {
+ 	int i;
+@@ -275,6 +282,8 @@ void fw_free_paged_buf(struct fw_priv *fw_priv)
+ 	if (!fw_priv->pages)
+ 		return;
+ 
++	vunmap(fw_priv->data);
++
+ 	for (i = 0; i < fw_priv->nr_pages; i++)
+ 		__free_page(fw_priv->pages[i]);
+ 	kvfree(fw_priv->pages);
+@@ -328,10 +337,6 @@ int fw_map_paged_buf(struct fw_priv *fw_priv)
+ 	if (!fw_priv->data)
+ 		return -ENOMEM;
+ 
+-	/* page table is no longer needed after mapping, let's free */
+-	kvfree(fw_priv->pages);
+-	fw_priv->pages = NULL;
+-
+ 	return 0;
+ }
+ #endif
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 84433922aed16..dfc66038bef9f 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1114,8 +1114,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
+ 	mapping = file->f_mapping;
+ 	inode = mapping->host;
+ 
+-	size = get_loop_size(lo, file);
+-
+ 	if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
+ 		error = -EINVAL;
+ 		goto out_unlock;
+@@ -1165,6 +1163,8 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
+ 	loop_update_rotational(lo);
+ 	loop_update_dio(lo);
+ 	loop_sysfs_init(lo);
++
++	size = get_loop_size(lo, file);
+ 	loop_set_size(lo, size);
+ 
+ 	set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
+diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
+index 8a23d5dfd1f8d..5f063e1be4b14 100644
+--- a/drivers/clk/davinci/pll.c
++++ b/drivers/clk/davinci/pll.c
+@@ -491,7 +491,7 @@ struct clk *davinci_pll_clk_register(struct device *dev,
+ 		parent_name = postdiv_name;
+ 	}
+ 
+-	pllen = kzalloc(sizeof(*pllout), GFP_KERNEL);
++	pllen = kzalloc(sizeof(*pllen), GFP_KERNEL);
+ 	if (!pllen) {
+ 		ret = -ENOMEM;
+ 		goto err_unregister_postdiv;
+diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
+index d7243c09cc843..47d6482dda9df 100644
+--- a/drivers/clk/rockchip/clk-rk3228.c
++++ b/drivers/clk/rockchip/clk-rk3228.c
+@@ -137,7 +137,7 @@ PNAME(mux_usb480m_p)		= { "usb480m_phy", "xin24m" };
+ PNAME(mux_hdmiphy_p)		= { "hdmiphy_phy", "xin24m" };
+ PNAME(mux_aclk_cpu_src_p)	= { "cpll_aclk_cpu", "gpll_aclk_cpu", "hdmiphy_aclk_cpu" };
+ 
+-PNAME(mux_pll_src_4plls_p)	= { "cpll", "gpll", "hdmiphy" "usb480m" };
++PNAME(mux_pll_src_4plls_p)	= { "cpll", "gpll", "hdmiphy", "usb480m" };
+ PNAME(mux_pll_src_3plls_p)	= { "cpll", "gpll", "hdmiphy" };
+ PNAME(mux_pll_src_2plls_p)	= { "cpll", "gpll" };
+ PNAME(mux_sclk_hdmi_cec_p)	= { "cpll", "gpll", "xin24m" };
+diff --git a/drivers/dax/super.c b/drivers/dax/super.c
+index 8e32345be0f74..af95d7e723f76 100644
+--- a/drivers/dax/super.c
++++ b/drivers/dax/super.c
+@@ -318,11 +318,15 @@ EXPORT_SYMBOL_GPL(dax_direct_access);
+ bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
+ 		int blocksize, sector_t start, sector_t len)
+ {
++	if (!dax_dev)
++		return false;
++
+ 	if (!dax_alive(dax_dev))
+ 		return false;
+ 
+ 	return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
+ }
++EXPORT_SYMBOL_GPL(dax_supported);
+ 
+ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ 		size_t bytes, struct iov_iter *i)
+diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
+index 35dccc88ac0af..15a47539dc563 100644
+--- a/drivers/firmware/efi/efibc.c
++++ b/drivers/firmware/efi/efibc.c
+@@ -84,7 +84,7 @@ static int __init efibc_init(void)
+ {
+ 	int ret;
+ 
+-	if (!efi_enabled(EFI_RUNTIME_SERVICES))
++	if (!efivars_kobject() || !efivar_supports_writes())
+ 		return -ENODEV;
+ 
+ 	ret = register_reboot_notifier(&efibc_reboot_notifier);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index e9c4867abeffb..aa1e0f0550835 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1287,7 +1287,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ 	if (q->properties.is_active) {
+ 		increment_queue_count(dqm, q->properties.type);
+ 
+-		retval = execute_queues_cpsch(dqm,
++		execute_queues_cpsch(dqm,
+ 				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+index 30c229fcb4046..c5c549177d726 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
+@@ -440,29 +440,36 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
+ 	return __reset_engine(engine);
+ }
+ 
+-static struct intel_engine_cs *__active_engine(struct i915_request *rq)
++static bool
++__active_engine(struct i915_request *rq, struct intel_engine_cs **active)
+ {
+ 	struct intel_engine_cs *engine, *locked;
++	bool ret = false;
+ 
+ 	/*
+ 	 * Serialise with __i915_request_submit() so that it sees
+ 	 * is-banned?, or we know the request is already inflight.
++	 *
++	 * Note that rq->engine is unstable, and so we double
++	 * check that we have acquired the lock on the final engine.
+ 	 */
+ 	locked = READ_ONCE(rq->engine);
+ 	spin_lock_irq(&locked->active.lock);
+ 	while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+ 		spin_unlock(&locked->active.lock);
+-		spin_lock(&engine->active.lock);
+ 		locked = engine;
++		spin_lock(&locked->active.lock);
+ 	}
+ 
+-	engine = NULL;
+-	if (i915_request_is_active(rq) && rq->fence.error != -EIO)
+-		engine = rq->engine;
++	if (!i915_request_completed(rq)) {
++		if (i915_request_is_active(rq) && rq->fence.error != -EIO)
++			*active = locked;
++		ret = true;
++	}
+ 
+ 	spin_unlock_irq(&locked->active.lock);
+ 
+-	return engine;
++	return ret;
+ }
+ 
+ static struct intel_engine_cs *active_engine(struct intel_context *ce)
+@@ -473,17 +480,16 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
+ 	if (!ce->timeline)
+ 		return NULL;
+ 
+-	mutex_lock(&ce->timeline->mutex);
+-	list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
+-		if (i915_request_completed(rq))
+-			break;
++	rcu_read_lock();
++	list_for_each_entry_rcu(rq, &ce->timeline->requests, link) {
++		if (i915_request_is_active(rq) && i915_request_completed(rq))
++			continue;
+ 
+ 		/* Check with the backend if the request is inflight */
+-		engine = __active_engine(rq);
+-		if (engine)
++		if (__active_engine(rq, &engine))
+ 			break;
+ 	}
+-	mutex_unlock(&ce->timeline->mutex);
++	rcu_read_unlock();
+ 
+ 	return engine;
+ }
+@@ -714,6 +720,7 @@ __create_context(struct drm_i915_private *i915)
+ 	ctx->i915 = i915;
+ 	ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
+ 	mutex_init(&ctx->mutex);
++	INIT_LIST_HEAD(&ctx->link);
+ 
+ 	spin_lock_init(&ctx->stale.lock);
+ 	INIT_LIST_HEAD(&ctx->stale.engines);
+@@ -740,10 +747,6 @@ __create_context(struct drm_i915_private *i915)
+ 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
+ 		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
+ 
+-	spin_lock(&i915->gem.contexts.lock);
+-	list_add_tail(&ctx->link, &i915->gem.contexts.list);
+-	spin_unlock(&i915->gem.contexts.lock);
+-
+ 	return ctx;
+ 
+ err_free:
+@@ -931,6 +934,7 @@ static int gem_context_register(struct i915_gem_context *ctx,
+ 				struct drm_i915_file_private *fpriv,
+ 				u32 *id)
+ {
++	struct drm_i915_private *i915 = ctx->i915;
+ 	struct i915_address_space *vm;
+ 	int ret;
+ 
+@@ -949,8 +953,16 @@ static int gem_context_register(struct i915_gem_context *ctx,
+ 	/* And finally expose ourselves to userspace via the idr */
+ 	ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
+ 	if (ret)
+-		put_pid(fetch_and_zero(&ctx->pid));
++		goto err_pid;
++
++	spin_lock(&i915->gem.contexts.lock);
++	list_add_tail(&ctx->link, &i915->gem.contexts.list);
++	spin_unlock(&i915->gem.contexts.lock);
++
++	return 0;
+ 
++err_pid:
++	put_pid(fetch_and_zero(&ctx->pid));
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
+index 295b9829e2da5..4cd2038cbe359 100644
+--- a/drivers/gpu/drm/i915/i915_sw_fence.c
++++ b/drivers/gpu/drm/i915/i915_sw_fence.c
+@@ -164,9 +164,13 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
+ 
+ 		do {
+ 			list_for_each_entry_safe(pos, next, &x->head, entry) {
+-				pos->func(pos,
+-					  TASK_NORMAL, fence->error,
+-					  &extra);
++				int wake_flags;
++
++				wake_flags = fence->error;
++				if (pos->func == autoremove_wake_function)
++					wake_flags = 0;
++
++				pos->func(pos, TASK_NORMAL, wake_flags, &extra);
+ 			}
+ 
+ 			if (list_empty(&extra))
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+index 7cd8f415fd029..d8b43500f12d1 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+@@ -834,13 +834,19 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
+ 			drm_crtc_index(&mtk_crtc->base));
+ 		mtk_crtc->cmdq_client = NULL;
+ 	}
+-	ret = of_property_read_u32_index(priv->mutex_node,
+-					 "mediatek,gce-events",
+-					 drm_crtc_index(&mtk_crtc->base),
+-					 &mtk_crtc->cmdq_event);
+-	if (ret)
+-		dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
+-			drm_crtc_index(&mtk_crtc->base));
++
++	if (mtk_crtc->cmdq_client) {
++		ret = of_property_read_u32_index(priv->mutex_node,
++						 "mediatek,gce-events",
++						 drm_crtc_index(&mtk_crtc->base),
++						 &mtk_crtc->cmdq_event);
++		if (ret) {
++			dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
++				drm_crtc_index(&mtk_crtc->base));
++			cmdq_mbox_destroy(mtk_crtc->cmdq_client);
++			mtk_crtc->cmdq_client = NULL;
++		}
++	}
+ #endif
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+index 57c88de9a3293..526648885b97e 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+@@ -496,6 +496,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
+ #if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ 	if (of_address_to_resource(node, 0, &res) != 0) {
+ 		dev_err(dev, "Missing reg in %s node\n", node->full_name);
++		put_device(&larb_pdev->dev);
+ 		return -EINVAL;
+ 	}
+ 	comp->regs_pa = res.start;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index 040a8f393fe24..b77dc36be4224 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -165,7 +165,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
+ 
+ 	ret = drmm_mode_config_init(drm);
+ 	if (ret)
+-		return ret;
++		goto put_mutex_dev;
+ 
+ 	drm->mode_config.min_width = 64;
+ 	drm->mode_config.min_height = 64;
+@@ -182,7 +182,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
+ 
+ 	ret = component_bind_all(drm->dev, drm);
+ 	if (ret)
+-		return ret;
++		goto put_mutex_dev;
+ 
+ 	/*
+ 	 * We currently support two fixed data streams, each optional,
+@@ -229,7 +229,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
+ 	}
+ 	if (!dma_dev->dma_parms) {
+ 		ret = -ENOMEM;
+-		goto err_component_unbind;
++		goto put_dma_dev;
+ 	}
+ 
+ 	ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
+@@ -256,9 +256,12 @@ static int mtk_drm_kms_init(struct drm_device *drm)
+ err_unset_dma_parms:
+ 	if (private->dma_parms_allocated)
+ 		dma_dev->dma_parms = NULL;
++put_dma_dev:
++	put_device(private->dma_dev);
+ err_component_unbind:
+ 	component_unbind_all(drm->dev, drm);
+-
++put_mutex_dev:
++	put_device(private->mutex_dev);
+ 	return ret;
+ }
+ 
+@@ -544,8 +547,13 @@ err_pm:
+ 	pm_runtime_disable(dev);
+ err_node:
+ 	of_node_put(private->mutex_node);
+-	for (i = 0; i < DDP_COMPONENT_ID_MAX; i++)
++	for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) {
+ 		of_node_put(private->comp_node[i]);
++		if (private->ddp_comp[i]) {
++			put_device(private->ddp_comp[i]->larb_dev);
++			private->ddp_comp[i] = NULL;
++		}
++	}
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
+index 02ac55c13a80b..ee011a0633841 100644
+--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
++++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
+@@ -470,14 +470,13 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
+ 	horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
+ 
+ 	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+-		horizontal_backporch_byte =
+-			(vm->hback_porch * dsi_tmp_buf_bpp - 10);
++		horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp;
+ 	else
+-		horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
+-			dsi_tmp_buf_bpp - 10);
++		horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
++					    dsi_tmp_buf_bpp;
+ 
+ 	data_phy_cycles = timing->lpx + timing->da_hs_prepare +
+-			  timing->da_hs_zero + timing->da_hs_exit + 3;
++			  timing->da_hs_zero + timing->da_hs_exit;
+ 
+ 	if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ 		if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
+diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+index 1eebe310470af..a9704822c0334 100644
+--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
++++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
+@@ -1507,25 +1507,30 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
+ 		dev_err(dev,
+ 			"Failed to get system configuration registers: %d\n",
+ 			ret);
+-		return ret;
++		goto put_device;
+ 	}
+ 	hdmi->sys_regmap = regmap;
+ 
+ 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	hdmi->regs = devm_ioremap_resource(dev, mem);
+-	if (IS_ERR(hdmi->regs))
+-		return PTR_ERR(hdmi->regs);
++	if (IS_ERR(hdmi->regs)) {
++		ret = PTR_ERR(hdmi->regs);
++		goto put_device;
++	}
+ 
+ 	remote = of_graph_get_remote_node(np, 1, 0);
+-	if (!remote)
+-		return -EINVAL;
++	if (!remote) {
++		ret = -EINVAL;
++		goto put_device;
++	}
+ 
+ 	if (!of_device_is_compatible(remote, "hdmi-connector")) {
+ 		hdmi->next_bridge = of_drm_find_bridge(remote);
+ 		if (!hdmi->next_bridge) {
+ 			dev_err(dev, "Waiting for external bridge\n");
+ 			of_node_put(remote);
+-			return -EPROBE_DEFER;
++			ret = -EPROBE_DEFER;
++			goto put_device;
+ 		}
+ 	}
+ 
+@@ -1534,7 +1539,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
+ 		dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n",
+ 			remote);
+ 		of_node_put(remote);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto put_device;
+ 	}
+ 	of_node_put(remote);
+ 
+@@ -1542,10 +1548,14 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
+ 	of_node_put(i2c_np);
+ 	if (!hdmi->ddc_adpt) {
+ 		dev_err(dev, "Failed to get ddc i2c adapter by node\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto put_device;
+ 	}
+ 
+ 	return 0;
++put_device:
++	put_device(hdmi->cec_dev);
++	return ret;
+ }
+ 
+ /*
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index 417a95e5094dd..af7832e131674 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -750,7 +750,7 @@ static void vmbus_wait_for_unload(void)
+ 	void *page_addr;
+ 	struct hv_message *msg;
+ 	struct vmbus_channel_message_header *hdr;
+-	u32 message_type;
++	u32 message_type, i;
+ 
+ 	/*
+ 	 * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
+@@ -760,8 +760,11 @@ static void vmbus_wait_for_unload(void)
+ 	 * functional and vmbus_unload_response() will complete
+ 	 * vmbus_connection.unload_event. If not, the last thing we can do is
+ 	 * read message pages for all CPUs directly.
++	 *
++	 * Wait no more than 10 seconds so that the panic path can't get
++	 * hung forever in case the response message isn't seen.
+ 	 */
+-	while (1) {
++	for (i = 0; i < 1000; i++) {
+ 		if (completion_done(&vmbus_connection.unload_event))
+ 			break;
+ 
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index d69f4efa37198..dacdd8d2eb1b3 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -2383,7 +2383,10 @@ static int vmbus_bus_suspend(struct device *dev)
+ 	if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
+ 		wait_for_completion(&vmbus_connection.ready_for_suspend_event);
+ 
+-	WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0);
++	if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
++		pr_err("Can not suspend due to a previous failed resuming\n");
++		return -EBUSY;
++	}
+ 
+ 	mutex_lock(&vmbus_connection.channel_mutex);
+ 
+@@ -2459,7 +2462,9 @@ static int vmbus_bus_resume(struct device *dev)
+ 
+ 	vmbus_request_offers();
+ 
+-	wait_for_completion(&vmbus_connection.ready_for_resume_event);
++	if (wait_for_completion_timeout(
++		&vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
++		pr_err("Some vmbus device is missing after suspending?\n");
+ 
+ 	/* Reset the event for the next suspend. */
+ 	reinit_completion(&vmbus_connection.ready_for_suspend_event);
+diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
+index 388978775be04..edc6985c696f0 100644
+--- a/drivers/i2c/algos/i2c-algo-pca.c
++++ b/drivers/i2c/algos/i2c-algo-pca.c
+@@ -41,8 +41,22 @@ static void pca_reset(struct i2c_algo_pca_data *adap)
+ 		pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IPRESET);
+ 		pca_outw(adap, I2C_PCA_IND, 0xA5);
+ 		pca_outw(adap, I2C_PCA_IND, 0x5A);
++
++		/*
++		 * After a reset we need to re-apply any configuration
++		 * (calculated in pca_init) to get the bus in a working state.
++		 */
++		pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IMODE);
++		pca_outw(adap, I2C_PCA_IND, adap->bus_settings.mode);
++		pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLL);
++		pca_outw(adap, I2C_PCA_IND, adap->bus_settings.tlow);
++		pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLH);
++		pca_outw(adap, I2C_PCA_IND, adap->bus_settings.thi);
++
++		pca_set_con(adap, I2C_PCA_CON_ENSIO);
+ 	} else {
+ 		adap->reset_chip(adap->data);
++		pca_set_con(adap, I2C_PCA_CON_ENSIO | adap->bus_settings.clock_freq);
+ 	}
+ }
+ 
+@@ -423,13 +437,14 @@ static int pca_init(struct i2c_adapter *adap)
+ 				" Use the nominal frequency.\n", adap->name);
+ 		}
+ 
+-		pca_reset(pca_data);
+-
+ 		clock = pca_clock(pca_data);
+ 		printk(KERN_INFO "%s: Clock frequency is %dkHz\n",
+ 		     adap->name, freqs[clock]);
+ 
+-		pca_set_con(pca_data, I2C_PCA_CON_ENSIO | clock);
++		/* Store settings as these will be needed when the PCA chip is reset */
++		pca_data->bus_settings.clock_freq = clock;
++
++		pca_reset(pca_data);
+ 	} else {
+ 		int clock;
+ 		int mode;
+@@ -496,19 +511,15 @@ static int pca_init(struct i2c_adapter *adap)
+ 			thi = tlow * min_thi / min_tlow;
+ 		}
+ 
++		/* Store settings as these will be needed when the PCA chip is reset */
++		pca_data->bus_settings.mode = mode;
++		pca_data->bus_settings.tlow = tlow;
++		pca_data->bus_settings.thi = thi;
++
+ 		pca_reset(pca_data);
+ 
+ 		printk(KERN_INFO
+ 		     "%s: Clock frequency is %dHz\n", adap->name, clock * 100);
+-
+-		pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IMODE);
+-		pca_outw(pca_data, I2C_PCA_IND, mode);
+-		pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLL);
+-		pca_outw(pca_data, I2C_PCA_IND, tlow);
+-		pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLH);
+-		pca_outw(pca_data, I2C_PCA_IND, thi);
+-
+-		pca_set_con(pca_data, I2C_PCA_CON_ENSIO);
+ 	}
+ 	udelay(500); /* 500 us for oscillator to stabilise */
+ 
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index f206e28af5831..3843eabeddda3 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -1706,6 +1706,16 @@ static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
+ static inline void i801_acpi_remove(struct i801_priv *priv) { }
+ #endif
+ 
++static unsigned char i801_setup_hstcfg(struct i801_priv *priv)
++{
++	unsigned char hstcfg = priv->original_hstcfg;
++
++	hstcfg &= ~SMBHSTCFG_I2C_EN;	/* SMBus timing */
++	hstcfg |= SMBHSTCFG_HST_EN;
++	pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hstcfg);
++	return hstcfg;
++}
++
+ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ {
+ 	unsigned char temp;
+@@ -1826,14 +1836,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 		return err;
+ 	}
+ 
+-	pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &temp);
+-	priv->original_hstcfg = temp;
+-	temp &= ~SMBHSTCFG_I2C_EN;	/* SMBus timing */
+-	if (!(temp & SMBHSTCFG_HST_EN)) {
++	pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &priv->original_hstcfg);
++	temp = i801_setup_hstcfg(priv);
++	if (!(priv->original_hstcfg & SMBHSTCFG_HST_EN))
+ 		dev_info(&dev->dev, "Enabling SMBus device\n");
+-		temp |= SMBHSTCFG_HST_EN;
+-	}
+-	pci_write_config_byte(priv->pci_dev, SMBHSTCFG, temp);
+ 
+ 	if (temp & SMBHSTCFG_SMB_SMI_EN) {
+ 		dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n");
+@@ -1959,6 +1965,7 @@ static int i801_resume(struct device *dev)
+ {
+ 	struct i801_priv *priv = dev_get_drvdata(dev);
+ 
++	i801_setup_hstcfg(priv);
+ 	i801_enable_host_notify(&priv->adapter);
+ 
+ 	return 0;
+diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
+index deef69e569062..b099139cbb91e 100644
+--- a/drivers/i2c/busses/i2c-mt65xx.c
++++ b/drivers/i2c/busses/i2c-mt65xx.c
+@@ -658,8 +658,8 @@ static int mtk_i2c_calculate_speed(struct mtk_i2c *i2c, unsigned int clk_src,
+ 	unsigned int cnt_mul;
+ 	int ret = -EINVAL;
+ 
+-	if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ)
+-		target_speed = I2C_MAX_FAST_MODE_PLUS_FREQ;
++	if (target_speed > I2C_MAX_HIGH_SPEED_MODE_FREQ)
++		target_speed = I2C_MAX_HIGH_SPEED_MODE_FREQ;
+ 
+ 	max_step_cnt = mtk_i2c_max_step_cnt(target_speed);
+ 	base_step_cnt = max_step_cnt;
+diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
+index 9587347447f0f..c4b08a9244614 100644
+--- a/drivers/i2c/busses/i2c-mxs.c
++++ b/drivers/i2c/busses/i2c-mxs.c
+@@ -25,6 +25,7 @@
+ #include <linux/of_device.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/dmaengine.h>
++#include <linux/dma/mxs-dma.h>
+ 
+ #define DRIVER_NAME "mxs-i2c"
+ 
+@@ -200,7 +201,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
+ 		dma_map_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
+ 		desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[0], 1,
+ 					DMA_MEM_TO_DEV,
+-					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
++					DMA_PREP_INTERRUPT |
++					MXS_DMA_CTRL_WAIT4END);
+ 		if (!desc) {
+ 			dev_err(i2c->dev,
+ 				"Failed to get DMA data write descriptor.\n");
+@@ -228,7 +230,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
+ 		dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
+ 		desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1,
+ 					DMA_DEV_TO_MEM,
+-					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
++					DMA_PREP_INTERRUPT |
++					MXS_DMA_CTRL_WAIT4END);
+ 		if (!desc) {
+ 			dev_err(i2c->dev,
+ 				"Failed to get DMA data write descriptor.\n");
+@@ -260,7 +263,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
+ 		dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
+ 		desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2,
+ 					DMA_MEM_TO_DEV,
+-					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
++					DMA_PREP_INTERRUPT |
++					MXS_DMA_CTRL_WAIT4END);
+ 		if (!desc) {
+ 			dev_err(i2c->dev,
+ 				"Failed to get DMA data write descriptor.\n");
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+index 4cd475ea97a24..64d44f51db4b6 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+@@ -149,7 +149,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+ 	attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
+ 	attr->l2_db_size = (sb->l2_db_space_size + 1) *
+ 			    (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
+-	attr->max_sgid = le32_to_cpu(sb->max_gid);
++	attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
+ 
+ 	bnxt_qplib_query_version(rcfw, attr->fw_ver);
+ 
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+index 6404f0da10517..967890cd81f27 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+@@ -47,6 +47,7 @@
+ struct bnxt_qplib_dev_attr {
+ #define FW_VER_ARR_LEN			4
+ 	u8				fw_ver[FW_VER_ARR_LEN];
++#define BNXT_QPLIB_NUM_GIDS_SUPPORTED	256
+ 	u16				max_sgid;
+ 	u16				max_mrw;
+ 	u32				max_qp;
+diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
+index 3eefee2ee2a12..854d5e7587241 100644
+--- a/drivers/input/mouse/trackpoint.c
++++ b/drivers/input/mouse/trackpoint.c
+@@ -17,10 +17,12 @@
+ #include "trackpoint.h"
+ 
+ static const char * const trackpoint_variants[] = {
+-	[TP_VARIANT_IBM]	= "IBM",
+-	[TP_VARIANT_ALPS]	= "ALPS",
+-	[TP_VARIANT_ELAN]	= "Elan",
+-	[TP_VARIANT_NXP]	= "NXP",
++	[TP_VARIANT_IBM]		= "IBM",
++	[TP_VARIANT_ALPS]		= "ALPS",
++	[TP_VARIANT_ELAN]		= "Elan",
++	[TP_VARIANT_NXP]		= "NXP",
++	[TP_VARIANT_JYT_SYNAPTICS]	= "JYT_Synaptics",
++	[TP_VARIANT_SYNAPTICS]		= "Synaptics",
+ };
+ 
+ /*
+diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
+index 5cb93ed260856..eb5412904fe07 100644
+--- a/drivers/input/mouse/trackpoint.h
++++ b/drivers/input/mouse/trackpoint.h
+@@ -24,10 +24,12 @@
+  * 0x01 was the original IBM trackpoint, others implement very limited
+  * subset of trackpoint features.
+  */
+-#define TP_VARIANT_IBM		0x01
+-#define TP_VARIANT_ALPS		0x02
+-#define TP_VARIANT_ELAN		0x03
+-#define TP_VARIANT_NXP		0x04
++#define TP_VARIANT_IBM			0x01
++#define TP_VARIANT_ALPS			0x02
++#define TP_VARIANT_ELAN			0x03
++#define TP_VARIANT_NXP			0x04
++#define TP_VARIANT_JYT_SYNAPTICS	0x05
++#define TP_VARIANT_SYNAPTICS		0x06
+ 
+ /*
+  * Commands
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 7d7f737027264..37fb9aa88f9c3 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -548,6 +548,14 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
+ 		},
+ 	},
++	{
++		/* Entroware Proteus */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
++		},
++	},
+ 	{ }
+ };
+ 
+@@ -676,6 +684,14 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
+ 		},
+ 	},
++	{
++		/* Entroware Proteus */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
+index 9e1ab701785c7..0162a9af93237 100644
+--- a/drivers/interconnect/core.c
++++ b/drivers/interconnect/core.c
+@@ -55,12 +55,18 @@ static int icc_summary_show(struct seq_file *s, void *data)
+ 
+ 			icc_summary_show_one(s, n);
+ 			hlist_for_each_entry(r, &n->req_list, req_node) {
++				u32 avg_bw = 0, peak_bw = 0;
++
+ 				if (!r->dev)
+ 					continue;
+ 
++				if (r->enabled) {
++					avg_bw = r->avg_bw;
++					peak_bw = r->peak_bw;
++				}
++
+ 				seq_printf(s, "  %-27s %12u %12u %12u\n",
+-					   dev_name(r->dev), r->tag, r->avg_bw,
+-					   r->peak_bw);
++					   dev_name(r->dev), r->tag, avg_bw, peak_bw);
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
+index 37c74c842f3a3..a51dcf26b09f2 100644
+--- a/drivers/iommu/amd/iommu.c
++++ b/drivers/iommu/amd/iommu.c
+@@ -3831,14 +3831,18 @@ int amd_iommu_activate_guest_mode(void *data)
+ {
+ 	struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+ 	struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
++	u64 valid;
+ 
+ 	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+ 	    !entry || entry->lo.fields_vapic.guest_mode)
+ 		return 0;
+ 
++	valid = entry->lo.fields_vapic.valid;
++
+ 	entry->lo.val = 0;
+ 	entry->hi.val = 0;
+ 
++	entry->lo.fields_vapic.valid       = valid;
+ 	entry->lo.fields_vapic.guest_mode  = 1;
+ 	entry->lo.fields_vapic.ga_log_intr = 1;
+ 	entry->hi.fields.ga_root_ptr       = ir_data->ga_root_ptr;
+@@ -3855,12 +3859,14 @@ int amd_iommu_deactivate_guest_mode(void *data)
+ 	struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+ 	struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+ 	struct irq_cfg *cfg = ir_data->cfg;
+-	u64 valid = entry->lo.fields_remap.valid;
++	u64 valid;
+ 
+ 	if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+ 	    !entry || !entry->lo.fields_vapic.guest_mode)
+ 		return 0;
+ 
++	valid = entry->lo.fields_remap.valid;
++
+ 	entry->lo.val = 0;
+ 	entry->hi.val = 0;
+ 
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 8277b959e00bd..6a4057b844e24 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -865,10 +865,14 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
+ int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
+ 			sector_t start, sector_t len, void *data)
+ {
+-	int blocksize = *(int *) data;
++	int blocksize = *(int *) data, id;
++	bool rc;
+ 
+-	return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
+-				       start, len);
++	id = dax_read_lock();
++	rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
++	dax_read_unlock(id);
++
++	return rc;
+ }
+ 
+ /* Check devices support synchronous DAX */
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 88b391ff9bea7..49c758fef8cb6 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1136,15 +1136,16 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd
+ {
+ 	struct mapped_device *md = dax_get_private(dax_dev);
+ 	struct dm_table *map;
++	bool ret = false;
+ 	int srcu_idx;
+-	bool ret;
+ 
+ 	map = dm_get_live_table(md, &srcu_idx);
+ 	if (!map)
+-		return false;
++		goto out;
+ 
+ 	ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
+ 
++out:
+ 	dm_put_live_table(md, srcu_idx);
+ 
+ 	return ret;
+diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
+index 6c2b9cf45e831..650922061bdc7 100644
+--- a/drivers/misc/habanalabs/debugfs.c
++++ b/drivers/misc/habanalabs/debugfs.c
+@@ -982,7 +982,7 @@ static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
+ 		return 0;
+ 
+ 	sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask);
+-	rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
++	rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
+ 			strlen(tmp_buf) + 1);
+ 
+ 	return rc;
+diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+index 96f08050ef0fb..6c50f015eda47 100644
+--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
++++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+@@ -378,15 +378,15 @@ enum axi_id {
+ 	((((y) & RAZWI_INITIATOR_Y_MASK) << RAZWI_INITIATOR_Y_SHIFT) | \
+ 		(((x) & RAZWI_INITIATOR_X_MASK) << RAZWI_INITIATOR_X_SHIFT))
+ 
+-#define RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0	RAZWI_INITIATOR_ID_X_Y(1, 0)
+-#define RAZWI_INITIATOR_ID_X_Y_TPC1		RAZWI_INITIATOR_ID_X_Y(2, 0)
+-#define RAZWI_INITIATOR_ID_X_Y_MME0_0		RAZWI_INITIATOR_ID_X_Y(3, 0)
+-#define RAZWI_INITIATOR_ID_X_Y_MME0_1		RAZWI_INITIATOR_ID_X_Y(4, 0)
+-#define RAZWI_INITIATOR_ID_X_Y_MME1_0		RAZWI_INITIATOR_ID_X_Y(5, 0)
+-#define RAZWI_INITIATOR_ID_X_Y_MME1_1		RAZWI_INITIATOR_ID_X_Y(6, 0)
+-#define RAZWI_INITIATOR_ID_X_Y_TPC2		RAZWI_INITIATOR_ID_X_Y(7, 0)
++#define RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0	RAZWI_INITIATOR_ID_X_Y(1, 1)
++#define RAZWI_INITIATOR_ID_X_Y_TPC1		RAZWI_INITIATOR_ID_X_Y(2, 1)
++#define RAZWI_INITIATOR_ID_X_Y_MME0_0		RAZWI_INITIATOR_ID_X_Y(3, 1)
++#define RAZWI_INITIATOR_ID_X_Y_MME0_1		RAZWI_INITIATOR_ID_X_Y(4, 1)
++#define RAZWI_INITIATOR_ID_X_Y_MME1_0		RAZWI_INITIATOR_ID_X_Y(5, 1)
++#define RAZWI_INITIATOR_ID_X_Y_MME1_1		RAZWI_INITIATOR_ID_X_Y(6, 1)
++#define RAZWI_INITIATOR_ID_X_Y_TPC2		RAZWI_INITIATOR_ID_X_Y(7, 1)
+ #define RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC \
+-						RAZWI_INITIATOR_ID_X_Y(8, 0)
++						RAZWI_INITIATOR_ID_X_Y(8, 1)
+ #define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0	RAZWI_INITIATOR_ID_X_Y(0, 1)
+ #define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0	RAZWI_INITIATOR_ID_X_Y(9, 1)
+ #define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1	RAZWI_INITIATOR_ID_X_Y(0, 2)
+@@ -395,14 +395,14 @@ enum axi_id {
+ #define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0	RAZWI_INITIATOR_ID_X_Y(9, 3)
+ #define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1	RAZWI_INITIATOR_ID_X_Y(0, 4)
+ #define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1	RAZWI_INITIATOR_ID_X_Y(9, 4)
+-#define RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2	RAZWI_INITIATOR_ID_X_Y(1, 5)
+-#define RAZWI_INITIATOR_ID_X_Y_TPC5		RAZWI_INITIATOR_ID_X_Y(2, 5)
+-#define RAZWI_INITIATOR_ID_X_Y_MME2_0		RAZWI_INITIATOR_ID_X_Y(3, 5)
+-#define RAZWI_INITIATOR_ID_X_Y_MME2_1		RAZWI_INITIATOR_ID_X_Y(4, 5)
+-#define RAZWI_INITIATOR_ID_X_Y_MME3_0		RAZWI_INITIATOR_ID_X_Y(5, 5)
+-#define RAZWI_INITIATOR_ID_X_Y_MME3_1		RAZWI_INITIATOR_ID_X_Y(6, 5)
+-#define RAZWI_INITIATOR_ID_X_Y_TPC6		RAZWI_INITIATOR_ID_X_Y(7, 5)
+-#define RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5	RAZWI_INITIATOR_ID_X_Y(8, 5)
++#define RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2	RAZWI_INITIATOR_ID_X_Y(1, 6)
++#define RAZWI_INITIATOR_ID_X_Y_TPC5		RAZWI_INITIATOR_ID_X_Y(2, 6)
++#define RAZWI_INITIATOR_ID_X_Y_MME2_0		RAZWI_INITIATOR_ID_X_Y(3, 6)
++#define RAZWI_INITIATOR_ID_X_Y_MME2_1		RAZWI_INITIATOR_ID_X_Y(4, 6)
++#define RAZWI_INITIATOR_ID_X_Y_MME3_0		RAZWI_INITIATOR_ID_X_Y(5, 6)
++#define RAZWI_INITIATOR_ID_X_Y_MME3_1		RAZWI_INITIATOR_ID_X_Y(6, 6)
++#define RAZWI_INITIATOR_ID_X_Y_TPC6		RAZWI_INITIATOR_ID_X_Y(7, 6)
++#define RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5	RAZWI_INITIATOR_ID_X_Y(8, 6)
+ 
+ #define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT                           1
+ 
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 6b81c04ab5e29..47159b31e6b39 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -367,7 +367,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ 	}
+ 	rcu_read_unlock();
+ 
+-	while (unlikely(txq >= ndev->real_num_tx_queues))
++	while (txq >= ndev->real_num_tx_queues)
+ 		txq -= ndev->real_num_tx_queues;
+ 
+ 	return txq;
+diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
+index 1a2b6910509ca..92c966ac34c20 100644
+--- a/drivers/nvme/host/fc.c
++++ b/drivers/nvme/host/fc.c
+@@ -2158,6 +2158,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
+ 	struct nvme_fc_fcp_op *aen_op;
+ 	int i;
+ 
++	cancel_work_sync(&ctrl->ctrl.async_event_work);
+ 	aen_op = ctrl->aen_ops;
+ 	for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
+ 		__nvme_fc_exit_request(ctrl, aen_op);
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 6c07bb55b0f83..4a0bc8927048a 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -809,6 +809,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
+ 		blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
+ 	}
+ 	if (ctrl->async_event_sqe.data) {
++		cancel_work_sync(&ctrl->ctrl.async_event_work);
+ 		nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ 				sizeof(struct nvme_command), DMA_TO_DEVICE);
+ 		ctrl->async_event_sqe.data = NULL;
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index f1f66bf96cbb9..24467eea73999 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -1567,6 +1567,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
+ static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
+ {
+ 	if (to_tcp_ctrl(ctrl)->async_req.pdu) {
++		cancel_work_sync(&ctrl->async_event_work);
+ 		nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
+ 		to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
+ 	}
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index 6344e73c93548..9c4f257962423 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -583,6 +583,9 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
+ 	if (ret)
+ 		goto out_put_ctrl;
+ 
++	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
++	WARN_ON_ONCE(!changed);
++
+ 	ret = -ENOMEM;
+ 
+ 	ctrl->ctrl.sqsize = opts->queue_size - 1;
+diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
+index cb2dd3230fa76..507f79d14adb8 100644
+--- a/drivers/phy/ti/phy-omap-usb2.c
++++ b/drivers/phy/ti/phy-omap-usb2.c
+@@ -22,10 +22,15 @@
+ #include <linux/mfd/syscon.h>
+ #include <linux/regmap.h>
+ #include <linux/of_platform.h>
++#include <linux/sys_soc.h>
+ 
+ #define USB2PHY_ANA_CONFIG1		0x4c
+ #define USB2PHY_DISCON_BYP_LATCH	BIT(31)
+ 
++#define USB2PHY_CHRG_DET			0x14
++#define USB2PHY_CHRG_DET_USE_CHG_DET_REG	BIT(29)
++#define USB2PHY_CHRG_DET_DIS_CHG_DET		BIT(28)
++
+ /* SoC Specific USB2_OTG register definitions */
+ #define AM654_USB2_OTG_PD		BIT(8)
+ #define AM654_USB2_VBUS_DET_EN		BIT(5)
+@@ -43,6 +48,7 @@
+ #define OMAP_USB2_HAS_START_SRP			BIT(0)
+ #define OMAP_USB2_HAS_SET_VBUS			BIT(1)
+ #define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT	BIT(2)
++#define OMAP_USB2_DISABLE_CHRG_DET		BIT(3)
+ 
+ struct omap_usb {
+ 	struct usb_phy		phy;
+@@ -236,6 +242,13 @@ static int omap_usb_init(struct phy *x)
+ 		omap_usb_writel(phy->phy_base, USB2PHY_ANA_CONFIG1, val);
+ 	}
+ 
++	if (phy->flags & OMAP_USB2_DISABLE_CHRG_DET) {
++		val = omap_usb_readl(phy->phy_base, USB2PHY_CHRG_DET);
++		val |= USB2PHY_CHRG_DET_USE_CHG_DET_REG |
++		       USB2PHY_CHRG_DET_DIS_CHG_DET;
++		omap_usb_writel(phy->phy_base, USB2PHY_CHRG_DET, val);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -329,6 +342,26 @@ static const struct of_device_id omap_usb2_id_table[] = {
+ };
+ MODULE_DEVICE_TABLE(of, omap_usb2_id_table);
+ 
++static void omap_usb2_init_errata(struct omap_usb *phy)
++{
++	static const struct soc_device_attribute am65x_sr10_soc_devices[] = {
++		{ .family = "AM65X", .revision = "SR1.0" },
++		{ /* sentinel */ }
++	};
++
++	/*
++	 * Errata i2075: USB2PHY: USB2PHY Charger Detect is Enabled by
++	 * Default Without VBUS Presence.
++	 *
++	 * AM654x SR1.0 has a silicon bug due to which D+ is pulled high after
++	 * POR, which could cause enumeration failure with some USB hubs.
++	 * Disabling the USB2_PHY Charger Detect function will put D+
++	 * into the normal state.
++	 */
++	if (soc_device_match(am65x_sr10_soc_devices))
++		phy->flags |= OMAP_USB2_DISABLE_CHRG_DET;
++}
++
+ static int omap_usb2_probe(struct platform_device *pdev)
+ {
+ 	struct omap_usb	*phy;
+@@ -366,14 +399,14 @@ static int omap_usb2_probe(struct platform_device *pdev)
+ 	phy->mask		= phy_data->mask;
+ 	phy->power_on		= phy_data->power_on;
+ 	phy->power_off		= phy_data->power_off;
++	phy->flags		= phy_data->flags;
+ 
+-	if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
+-		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-		phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
+-		if (IS_ERR(phy->phy_base))
+-			return PTR_ERR(phy->phy_base);
+-		phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT;
+-	}
++	omap_usb2_init_errata(phy);
++
++	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
++	if (IS_ERR(phy->phy_base))
++		return PTR_ERR(phy->phy_base);
+ 
+ 	phy->syscon_phy_power = syscon_regmap_lookup_by_phandle(node,
+ 							"syscon-phy-power");
+diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
+index e4c422d806bee..b9f8514909bf0 100644
+--- a/drivers/rapidio/Kconfig
++++ b/drivers/rapidio/Kconfig
+@@ -37,7 +37,7 @@ config RAPIDIO_ENABLE_RX_TX_PORTS
+ config RAPIDIO_DMA_ENGINE
+ 	bool "DMA Engine support for RapidIO"
+ 	depends on RAPIDIO
+-	select DMADEVICES
++	depends on DMADEVICES
+ 	select DMA_ENGINE
+ 	help
+ 	  Say Y here if you want to use DMA Engine frameork for RapidIO data
+diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
+index 638329bd0745e..62ad7c4e7e7c8 100644
+--- a/drivers/regulator/pwm-regulator.c
++++ b/drivers/regulator/pwm-regulator.c
+@@ -279,7 +279,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
+ 		return ret;
+ 	}
+ 
+-	drvdata->state			= -EINVAL;
++	drvdata->state			= -ENOTRECOVERABLE;
+ 	drvdata->duty_cycle_table	= duty_cycle_table;
+ 	drvdata->desc.ops = &pwm_regulator_voltage_table_ops;
+ 	drvdata->desc.n_voltages	= length / sizeof(*duty_cycle_table);
+diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
+index 1b835398feec3..d1e3ee9ddf287 100644
+--- a/drivers/s390/crypto/zcrypt_ccamisc.c
++++ b/drivers/s390/crypto/zcrypt_ccamisc.c
+@@ -1685,9 +1685,9 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ 	*nr_apqns = 0;
+ 
+ 	/* fetch status of all crypto cards */
+-	device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+-				      sizeof(struct zcrypt_device_status_ext),
+-				      GFP_KERNEL);
++	device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
++				       sizeof(struct zcrypt_device_status_ext),
++				       GFP_KERNEL);
+ 	if (!device_status)
+ 		return -ENOMEM;
+ 	zcrypt_device_status_mask_ext(device_status);
+@@ -1755,7 +1755,7 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ 		verify = 0;
+ 	}
+ 
+-	kfree(device_status);
++	kvfree(device_status);
+ 	return rc;
+ }
+ EXPORT_SYMBOL(cca_findcard2);
+diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
+index e00dc4693fcbd..589ddf003886e 100644
+--- a/drivers/scsi/libfc/fc_disc.c
++++ b/drivers/scsi/libfc/fc_disc.c
+@@ -634,8 +634,6 @@ free_fp:
+ 	fc_frame_free(fp);
+ out:
+ 	kref_put(&rdata->kref, fc_rport_destroy);
+-	if (!IS_ERR(fp))
+-		fc_frame_free(fp);
+ }
+ 
+ /**
+diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
+index daf951b0b3f55..13ad2b3d314e2 100644
+--- a/drivers/scsi/libsas/sas_discover.c
++++ b/drivers/scsi/libsas/sas_discover.c
+@@ -182,10 +182,11 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
+ 		pr_warn("driver on host %s cannot handle device %016llx, error:%d\n",
+ 			dev_name(sas_ha->dev),
+ 			SAS_ADDR(dev->sas_addr), res);
++		return res;
+ 	}
+ 	set_bit(SAS_DEV_FOUND, &dev->state);
+ 	kref_get(&dev->kref);
+-	return res;
++	return 0;
+ }
+ 
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 3d670568a2760..519c7be404e75 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -3512,6 +3512,9 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
+ 				FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
+ 	prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT);
+ 	prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
++	prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY);
++	prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
++	prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION);
+ 
+ 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
+ 			      "Issue RDF:       did:x%x",
+@@ -4644,7 +4647,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ out:
+ 	if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
+ 		spin_lock_irq(shost->host_lock);
+-		ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
++		if (mbox)
++			ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
++		ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
+ 		spin_unlock_irq(shost->host_lock);
+ 
+ 		/* If the node is not being used by another discovery thread,
+diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
+index 6dfff03765471..c7085769170d7 100644
+--- a/drivers/scsi/lpfc/lpfc_hw4.h
++++ b/drivers/scsi/lpfc/lpfc_hw4.h
+@@ -4797,7 +4797,7 @@ struct send_frame_wqe {
+ 	uint32_t fc_hdr_wd5;           /* word 15 */
+ };
+ 
+-#define ELS_RDF_REG_TAG_CNT		1
++#define ELS_RDF_REG_TAG_CNT		4
+ struct lpfc_els_rdf_reg_desc {
+ 	struct fc_df_desc_fpin_reg	reg_desc;	/* descriptor header */
+ 	__be32				desc_tags[ELS_RDF_REG_TAG_CNT];
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index b7cbc312843e9..da9fd8a5f8cae 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -818,7 +818,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
+ 
+ 		res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
+ 		if (res)
+-			return res;
++			goto ex_err;
+ 		ccb = &pm8001_ha->ccb_info[ccb_tag];
+ 		ccb->device = pm8001_dev;
+ 		ccb->ccb_tag = ccb_tag;
+diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
+index b6d79cd156fb5..da1153ec9f0e3 100644
+--- a/drivers/spi/spi-loopback-test.c
++++ b/drivers/spi/spi-loopback-test.c
+@@ -90,7 +90,7 @@ static struct spi_test spi_tests[] = {
+ 	{
+ 		.description	= "tx/rx-transfer - crossing PAGE_SIZE",
+ 		.fill_option	= FILL_COUNT_8,
+-		.iterate_len    = { ITERATE_MAX_LEN },
++		.iterate_len    = { ITERATE_LEN },
+ 		.iterate_tx_align = ITERATE_ALIGN,
+ 		.iterate_rx_align = ITERATE_ALIGN,
+ 		.transfer_count = 1,
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 5c5a95792c0d3..65ca552654794 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1305,8 +1305,6 @@ out:
+ 	if (msg->status && ctlr->handle_err)
+ 		ctlr->handle_err(ctlr, msg);
+ 
+-	spi_res_release(ctlr, msg);
+-
+ 	spi_finalize_current_message(ctlr);
+ 
+ 	return ret;
+@@ -1694,6 +1692,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
+ 
+ 	spi_unmap_msg(ctlr, mesg);
+ 
++	/* In the prepare_messages callback the spi bus has the opportunity to
++	 * split a transfer to smaller chunks.
++	 * Release splited transfers here since spi_map_msg is done on the
++	 * splited transfers.
++	 */
++	spi_res_release(ctlr, mesg);
++
+ 	if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
+ 		ret = ctlr->unprepare_message(ctlr, mesg);
+ 		if (ret) {
+diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
+index b451a5aa90b50..2db6532d39230 100644
+--- a/drivers/thunderbolt/eeprom.c
++++ b/drivers/thunderbolt/eeprom.c
+@@ -7,6 +7,7 @@
+  */
+ 
+ #include <linux/crc32.h>
++#include <linux/delay.h>
+ #include <linux/property.h>
+ #include <linux/slab.h>
+ #include "tb.h"
+@@ -389,8 +390,8 @@ static int tb_drom_parse_entries(struct tb_switch *sw)
+ 		struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
+ 		if (pos + 1 == drom_size || pos + entry->len > drom_size
+ 				|| !entry->len) {
+-			tb_sw_warn(sw, "drom buffer overrun, aborting\n");
+-			return -EIO;
++			tb_sw_warn(sw, "DROM buffer overrun\n");
++			return -EILSEQ;
+ 		}
+ 
+ 		switch (entry->type) {
+@@ -526,7 +527,8 @@ int tb_drom_read(struct tb_switch *sw)
+ 	u16 size;
+ 	u32 crc;
+ 	struct tb_drom_header *header;
+-	int res;
++	int res, retries = 1;
++
+ 	if (sw->drom)
+ 		return 0;
+ 
+@@ -611,7 +613,17 @@ parse:
+ 		tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
+ 			header->device_rom_revision);
+ 
+-	return tb_drom_parse_entries(sw);
++	res = tb_drom_parse_entries(sw);
++	/* If the DROM parsing fails, wait a moment and retry once */
++	if (res == -EILSEQ && retries--) {
++		tb_sw_warn(sw, "parsing DROM failed, retrying\n");
++		msleep(100);
++		res = tb_drom_read_n(sw, 0, sw->drom, size);
++		if (!res)
++			goto parse;
++	}
++
++	return res;
+ err:
+ 	kfree(sw->drom);
+ 	sw->drom = NULL;
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 1a74d511b02a5..81c0b67f22640 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -5566,6 +5566,17 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_wch384_4 },
+ 
++	/*
++	 * Realtek RealManage
++	 */
++	{	PCI_VENDOR_ID_REALTEK, 0x816a,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0, pbn_b0_1_115200 },
++
++	{	PCI_VENDOR_ID_REALTEK, 0x816b,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0, pbn_b0_1_115200 },
++
+ 	/* Fintek PCI serial cards */
+ 	{ PCI_DEVICE(0x1c29, 0x1104), .driver_data = pbn_fintek_4 },
+ 	{ PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 },
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 5f3daabdc916e..ef1cdc82bc1f1 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1914,24 +1914,12 @@ static inline bool uart_console_enabled(struct uart_port *port)
+ 	return uart_console(port) && (port->cons->flags & CON_ENABLED);
+ }
+ 
+-static void __uart_port_spin_lock_init(struct uart_port *port)
++static void uart_port_spin_lock_init(struct uart_port *port)
+ {
+ 	spin_lock_init(&port->lock);
+ 	lockdep_set_class(&port->lock, &port_lock_key);
+ }
+ 
+-/*
+- * Ensure that the serial console lock is initialised early.
+- * If this port is a console, then the spinlock is already initialised.
+- */
+-static inline void uart_port_spin_lock_init(struct uart_port *port)
+-{
+-	if (uart_console(port))
+-		return;
+-
+-	__uart_port_spin_lock_init(port);
+-}
+-
+ #if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
+ /**
+  *	uart_console_write - write a console message to a serial port
+@@ -2084,7 +2072,15 @@ uart_set_options(struct uart_port *port, struct console *co,
+ 	struct ktermios termios;
+ 	static struct ktermios dummy;
+ 
+-	uart_port_spin_lock_init(port);
++	/*
++	 * Ensure that the serial-console lock is initialised early.
++	 *
++	 * Note that the console-enabled check is needed because of kgdboc,
++	 * which can end up calling uart_set_options() for an already enabled
++	 * console via tty_find_polling_driver() and uart_poll_init().
++	 */
++	if (!uart_console_enabled(port) && !port->console_reinit)
++		uart_port_spin_lock_init(port);
+ 
+ 	memset(&termios, 0, sizeof(struct ktermios));
+ 
+@@ -2375,13 +2371,6 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
+ 		/* Power up port for set_mctrl() */
+ 		uart_change_pm(state, UART_PM_STATE_ON);
+ 
+-		/*
+-		 * If this driver supports console, and it hasn't been
+-		 * successfully registered yet, initialise spin lock for it.
+-		 */
+-		if (port->cons && !(port->cons->flags & CON_ENABLED))
+-			__uart_port_spin_lock_init(port);
+-
+ 		/*
+ 		 * Ensure that the modem control lines are de-activated.
+ 		 * keep the DTR setting that is set in uart_set_options()
+@@ -2798,10 +2787,12 @@ static ssize_t console_store(struct device *dev,
+ 		if (oldconsole && !newconsole) {
+ 			ret = unregister_console(uport->cons);
+ 		} else if (!oldconsole && newconsole) {
+-			if (uart_console(uport))
++			if (uart_console(uport)) {
++				uport->console_reinit = 1;
+ 				register_console(uport->cons);
+-			else
++			} else {
+ 				ret = -ENOENT;
++			}
+ 		}
+ 	} else {
+ 		ret = -ENXIO;
+@@ -2897,7 +2888,12 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
+ 		goto out;
+ 	}
+ 
+-	uart_port_spin_lock_init(uport);
++	/*
++	 * If this port is in use as a console then the spinlock is already
++	 * initialised.
++	 */
++	if (!uart_console_enabled(uport))
++		uart_port_spin_lock_init(uport);
+ 
+ 	if (uport->cons && uport->dev)
+ 		of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
+diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
+index 084c48c5848fc..67cbd42421bee 100644
+--- a/drivers/usb/class/usblp.c
++++ b/drivers/usb/class/usblp.c
+@@ -827,6 +827,11 @@ static ssize_t usblp_read(struct file *file, char __user *buffer, size_t len, lo
+ 	if (rv < 0)
+ 		return rv;
+ 
++	if (!usblp->present) {
++		count = -ENODEV;
++		goto done;
++	}
++
+ 	if ((avail = usblp->rstatus) < 0) {
+ 		printk(KERN_ERR "usblp%d: error %d reading from printer\n",
+ 		    usblp->minor, (int)avail);
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 2f068e525a374..4ee8105310989 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -397,6 +397,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Generic RTL8153 based ethernet adapters */
+ 	{ USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
+ 
++	/* SONiX USB DEVICE Touchpad */
++	{ USB_DEVICE(0x0c45, 0x7056), .driver_info =
++			USB_QUIRK_IGNORE_REMOTE_WAKEUP },
++
+ 	/* Action Semiconductor flash disk */
+ 	{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
+ 			USB_QUIRK_STRING_FETCH_255 },
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index cf2b7ae93b7e9..0e5c56e065591 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -22,6 +22,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/usb.h>
+ #include <linux/usb/hcd.h>
++#include <linux/usb/otg.h>
+ #include <linux/moduleparam.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/debugfs.h>
+diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
+index ce0eaf7d7c12a..087402aec5cbe 100644
+--- a/drivers/usb/host/ehci-hub.c
++++ b/drivers/usb/host/ehci-hub.c
+@@ -14,7 +14,6 @@
+  */
+ 
+ /*-------------------------------------------------------------------------*/
+-#include <linux/usb/otg.h>
+ 
+ #define	PORT_WAKE_BITS	(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
+ 
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index d592071119ba6..13696f03f800d 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -662,8 +662,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
+ 	if (devinfo->resetting) {
+ 		cmnd->result = DID_ERROR << 16;
+ 		cmnd->scsi_done(cmnd);
+-		spin_unlock_irqrestore(&devinfo->lock, flags);
+-		return 0;
++		goto zombie;
+ 	}
+ 
+ 	/* Find a free uas-tag */
+@@ -699,6 +698,16 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
+ 		cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB);
+ 
+ 	err = uas_submit_urbs(cmnd, devinfo);
++	/*
++	 * in case of fatal errors the SCSI layer is peculiar
++	 * a command that has finished is a success for the purpose
++	 * of queueing, no matter how fatal the error
++	 */
++	if (err == -ENODEV) {
++		cmnd->result = DID_ERROR << 16;
++		cmnd->scsi_done(cmnd);
++		goto zombie;
++	}
+ 	if (err) {
+ 		/* If we did nothing, give up now */
+ 		if (cmdinfo->state & SUBMIT_STATUS_URB) {
+@@ -709,6 +718,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
+ 	}
+ 
+ 	devinfo->cmnd[idx] = cmnd;
++zombie:
+ 	spin_unlock_irqrestore(&devinfo->lock, flags);
+ 	return 0;
+ }
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index 2999217c81090..e2af10301c779 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -216,14 +216,18 @@ void ucsi_altmode_update_active(struct ucsi_connector *con)
+ 					    con->partner_altmode[i] == altmode);
+ }
+ 
+-static u8 ucsi_altmode_next_mode(struct typec_altmode **alt, u16 svid)
++static int ucsi_altmode_next_mode(struct typec_altmode **alt, u16 svid)
+ {
+ 	u8 mode = 1;
+ 	int i;
+ 
+-	for (i = 0; alt[i]; i++)
++	for (i = 0; alt[i]; i++) {
++		if (i > MODE_DISCOVERY_MAX)
++			return -ERANGE;
++
+ 		if (alt[i]->svid == svid)
+ 			mode++;
++	}
+ 
+ 	return mode;
+ }
+@@ -258,8 +262,11 @@ static int ucsi_register_altmode(struct ucsi_connector *con,
+ 			goto err;
+ 		}
+ 
+-		desc->mode = ucsi_altmode_next_mode(con->port_altmode,
+-						    desc->svid);
++		ret = ucsi_altmode_next_mode(con->port_altmode, desc->svid);
++		if (ret < 0)
++			return ret;
++
++		desc->mode = ret;
+ 
+ 		switch (desc->svid) {
+ 		case USB_TYPEC_DP_SID:
+@@ -292,8 +299,11 @@ static int ucsi_register_altmode(struct ucsi_connector *con,
+ 			goto err;
+ 		}
+ 
+-		desc->mode = ucsi_altmode_next_mode(con->partner_altmode,
+-						    desc->svid);
++		ret = ucsi_altmode_next_mode(con->partner_altmode, desc->svid);
++		if (ret < 0)
++			return ret;
++
++		desc->mode = ret;
+ 
+ 		alt = typec_partner_register_altmode(con->partner, desc);
+ 		if (IS_ERR(alt)) {
+diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
+index c0aca2f0f23f0..fbfe8f5933af8 100644
+--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
+@@ -78,7 +78,7 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
+ 	if (ret)
+ 		goto out_clear_bit;
+ 
+-	if (!wait_for_completion_timeout(&ua->complete, msecs_to_jiffies(5000)))
++	if (!wait_for_completion_timeout(&ua->complete, 60 * HZ))
+ 		ret = -ETIMEDOUT;
+ 
+ out_clear_bit:
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index b36bfe10c712c..09cb46e94f405 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -2018,7 +2018,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
+ 	struct fb_var_screeninfo var = info->var;
+ 	int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh;
+ 
+-	if (ops->p && ops->p->userfont && FNTSIZE(vc->vc_font.data)) {
++	if (p->userfont && FNTSIZE(vc->vc_font.data)) {
+ 		int size;
+ 		int pitch = PITCH(vc->vc_font.width);
+ 
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index ce95801e9b664..7708175062eba 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -1017,6 +1017,8 @@ handle_mnt_opt:
+ 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) {
+ 		rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, true,
+ 				       full_path, fid);
++		if (rc == -EREMOTE)
++			rc = 0;
+ 		if (rc) {
+ 			cifs_dbg(FYI, "%s: Get mode from SID failed. rc=%d\n",
+ 				 __func__, rc);
+@@ -1025,6 +1027,8 @@ handle_mnt_opt:
+ 	} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
+ 		rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, false,
+ 				       full_path, fid);
++		if (rc == -EREMOTE)
++			rc = 0;
+ 		if (rc) {
+ 			cifs_dbg(FYI, "%s: Getting ACL failed with error: %d\n",
+ 				 __func__, rc);
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 6e9017e6a8197..403e8033c974b 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -3463,6 +3463,9 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
+ 	unsigned long align = offset | iov_iter_alignment(iter);
+ 	struct block_device *bdev = inode->i_sb->s_bdev;
+ 
++	if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
++		return 1;
++
+ 	if (align & blocksize_mask) {
+ 		if (bdev)
+ 			blkbits = blksize_bits(bdev_logical_block_size(bdev));
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 98736d0598b8d..0fde35611df18 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -2375,6 +2375,9 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
+ 	if (unlikely(nid >= nm_i->max_nid))
+ 		nid = 0;
+ 
++	if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
++		nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
++
+ 	/* Enough entries */
+ 	if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
+ 		return 0;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 45e0585e0667c..08b1fb0a9225a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3272,8 +3272,10 @@ static int _nfs4_do_setattr(struct inode *inode,
+ 
+ 	/* Servers should only apply open mode checks for file size changes */
+ 	truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
+-	if (!truncate)
++	if (!truncate) {
++		nfs4_inode_make_writeable(inode);
+ 		goto zero_stateid;
++	}
+ 
+ 	if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
+ 		/* Use that stateid */
+@@ -7271,7 +7273,12 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
+ 	err = nfs4_set_lock_state(state, fl);
+ 	if (err != 0)
+ 		return err;
+-	err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
++	do {
++		err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
++		if (err != -NFS4ERR_DELAY)
++			break;
++		ssleep(1);
++	} while (err == -NFS4ERR_DELAY);
+ 	return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
+ }
+ 
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 191772d4a4d7d..67ba16c7e118b 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -141,7 +141,6 @@ enum cpuhp_state {
+ 	/* Must be the last timer callback */
+ 	CPUHP_AP_DUMMY_TIMER_STARTING,
+ 	CPUHP_AP_ARM_XEN_STARTING,
+-	CPUHP_AP_ARM_KVMPV_STARTING,
+ 	CPUHP_AP_ARM_CORESIGHT_STARTING,
+ 	CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
+ 	CPUHP_AP_ARM64_ISNDEP_STARTING,
+diff --git a/include/linux/dax.h b/include/linux/dax.h
+index 6904d4e0b2e0a..43b39ab9de1a9 100644
+--- a/include/linux/dax.h
++++ b/include/linux/dax.h
+@@ -58,6 +58,8 @@ static inline void set_dax_synchronous(struct dax_device *dax_dev)
+ {
+ 	__set_dax_synchronous(dax_dev);
+ }
++bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
++		int blocksize, sector_t start, sector_t len);
+ /*
+  * Check if given mapping is supported by the file / underlying device.
+  */
+@@ -104,6 +106,12 @@ static inline bool dax_synchronous(struct dax_device *dax_dev)
+ static inline void set_dax_synchronous(struct dax_device *dax_dev)
+ {
+ }
++static inline bool dax_supported(struct dax_device *dax_dev,
++		struct block_device *bdev, int blocksize, sector_t start,
++		sector_t len)
++{
++	return false;
++}
+ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
+ 				struct dax_device *dax_dev)
+ {
+@@ -189,14 +197,23 @@ static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
+ }
+ #endif
+ 
++#if IS_ENABLED(CONFIG_DAX)
+ int dax_read_lock(void);
+ void dax_read_unlock(int id);
++#else
++static inline int dax_read_lock(void)
++{
++	return 0;
++}
++
++static inline void dax_read_unlock(int id)
++{
++}
++#endif /* CONFIG_DAX */
+ bool dax_alive(struct dax_device *dax_dev);
+ void *dax_get_private(struct dax_device *dax_dev);
+ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
+ 		void **kaddr, pfn_t *pfn);
+-bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
+-		int blocksize, sector_t start, sector_t len);
+ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ 		size_t bytes, struct iov_iter *i);
+ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
+index d03071732db4a..7c522fdd9ea73 100644
+--- a/include/linux/i2c-algo-pca.h
++++ b/include/linux/i2c-algo-pca.h
+@@ -53,6 +53,20 @@
+ #define I2C_PCA_CON_SI		0x08 /* Serial Interrupt */
+ #define I2C_PCA_CON_CR		0x07 /* Clock Rate (MASK) */
+ 
++/**
++ * struct pca_i2c_bus_settings - The configured PCA i2c bus settings
++ * @mode: Configured i2c bus mode
++ * @tlow: Configured SCL LOW period
++ * @thi: Configured SCL HIGH period
++ * @clock_freq: The configured clock frequency
++ */
++struct pca_i2c_bus_settings {
++	int mode;
++	int tlow;
++	int thi;
++	int clock_freq;
++};
++
+ struct i2c_algo_pca_data {
+ 	void 				*data;	/* private low level data */
+ 	void (*write_byte)		(void *data, int reg, int val);
+@@ -64,6 +78,7 @@ struct i2c_algo_pca_data {
+ 	 * For PCA9665, use the frequency you want here. */
+ 	unsigned int			i2c_clock;
+ 	unsigned int			chip;
++	struct pca_i2c_bus_settings		bus_settings;
+ };
+ 
+ int i2c_pca_add_bus(struct i2c_adapter *);
+diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
+index 5e033fe1ff4e9..5fda40f97fe91 100644
+--- a/include/linux/percpu-rwsem.h
++++ b/include/linux/percpu-rwsem.h
+@@ -60,7 +60,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+ 	 * anything we did within this RCU-sched read-size critical section.
+ 	 */
+ 	if (likely(rcu_sync_is_idle(&sem->rss)))
+-		__this_cpu_inc(*sem->read_count);
++		this_cpu_inc(*sem->read_count);
+ 	else
+ 		__percpu_down_read(sem, false); /* Unconditional memory barrier */
+ 	/*
+@@ -79,7 +79,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
+ 	 * Same as in percpu_down_read().
+ 	 */
+ 	if (likely(rcu_sync_is_idle(&sem->rss)))
+-		__this_cpu_inc(*sem->read_count);
++		this_cpu_inc(*sem->read_count);
+ 	else
+ 		ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
+ 	preempt_enable();
+@@ -103,7 +103,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+ 	 * Same as in percpu_down_read().
+ 	 */
+ 	if (likely(rcu_sync_is_idle(&sem->rss))) {
+-		__this_cpu_dec(*sem->read_count);
++		this_cpu_dec(*sem->read_count);
+ 	} else {
+ 		/*
+ 		 * slowpath; reader will only ever wake a single blocked
+@@ -115,7 +115,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+ 		 * aggregate zero, as that is the only time it matters) they
+ 		 * will also see our critical section.
+ 		 */
+-		__this_cpu_dec(*sem->read_count);
++		this_cpu_dec(*sem->read_count);
+ 		rcuwait_wake_up(&sem->writer);
+ 	}
+ 	preempt_enable();
+diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
+index 791f4844efeb9..d3266caefe049 100644
+--- a/include/linux/serial_core.h
++++ b/include/linux/serial_core.h
+@@ -248,6 +248,7 @@ struct uart_port {
+ 
+ 	unsigned char		hub6;			/* this should be in the 8250 driver */
+ 	unsigned char		suspended;
++	unsigned char		console_reinit;
+ 	const char		*name;			/* port name */
+ 	struct attribute_group	*attr_group;		/* port specific attributes */
+ 	const struct attribute_group **tty_groups;	/* all attributes (serial core use only) */
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index 3ce7f0f5aa929..ca765062787b0 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -1205,6 +1205,8 @@ struct snd_soc_pcm_runtime {
+ 	     ((i) < (rtd)->num_cpus + (rtd)->num_codecs) &&		\
+ 		     ((dai) = (rtd)->dais[i]);				\
+ 	     (i)++)
++#define for_each_rtd_dais_rollback(rtd, i, dai)		\
++	for (; (--(i) >= 0) && ((dai) = (rtd)->dais[i]);)
+ 
+ void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd);
+ 
+@@ -1373,6 +1375,8 @@ void snd_soc_unregister_dai(struct snd_soc_dai *dai);
+ 
+ struct snd_soc_dai *snd_soc_find_dai(
+ 	const struct snd_soc_dai_link_component *dlc);
++struct snd_soc_dai *snd_soc_find_dai_with_mutex(
++	const struct snd_soc_dai_link_component *dlc);
+ 
+ #include <sound/soc-dai.h>
+ 
+diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
+index 4fdf303165827..65fd95f9784ce 100644
+--- a/include/uapi/linux/kvm.h
++++ b/include/uapi/linux/kvm.h
+@@ -789,9 +789,10 @@ struct kvm_ppc_resize_hpt {
+ #define KVM_VM_PPC_HV 1
+ #define KVM_VM_PPC_PR 2
+ 
+-/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
+-#define KVM_VM_MIPS_TE		0
++/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */
++#define KVM_VM_MIPS_AUTO	0
+ #define KVM_VM_MIPS_VZ		1
++#define KVM_VM_MIPS_TE		2
+ 
+ #define KVM_S390_SIE_PAGE_OFFSET 1
+ 
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 72af5d37e9ff1..a264246ff85aa 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -2108,6 +2108,9 @@ static void kill_kprobe(struct kprobe *p)
+ 
+ 	lockdep_assert_held(&kprobe_mutex);
+ 
++	if (WARN_ON_ONCE(kprobe_gone(p)))
++		return;
++
+ 	p->flags |= KPROBE_FLAG_GONE;
+ 	if (kprobe_aggrprobe(p)) {
+ 		/*
+@@ -2365,7 +2368,10 @@ static int kprobes_module_callback(struct notifier_block *nb,
+ 	mutex_lock(&kprobe_mutex);
+ 	for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ 		head = &kprobe_table[i];
+-		hlist_for_each_entry(p, head, hlist)
++		hlist_for_each_entry(p, head, hlist) {
++			if (kprobe_gone(p))
++				continue;
++
+ 			if (within_module_init((unsigned long)p->addr, mod) ||
+ 			    (checkcore &&
+ 			     within_module_core((unsigned long)p->addr, mod))) {
+@@ -2382,6 +2388,7 @@ static int kprobes_module_callback(struct notifier_block *nb,
+ 				 */
+ 				kill_kprobe(p);
+ 			}
++		}
+ 	}
+ 	if (val == MODULE_STATE_GOING)
+ 		remove_module_kprobe_blacklist(mod);
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 29a8de4c50b90..a611dedac7d60 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3923,13 +3923,18 @@ static int separate_irq_context(struct task_struct *curr,
+ static int mark_lock(struct task_struct *curr, struct held_lock *this,
+ 			     enum lock_usage_bit new_bit)
+ {
+-	unsigned int new_mask = 1 << new_bit, ret = 1;
++	unsigned int old_mask, new_mask, ret = 1;
+ 
+ 	if (new_bit >= LOCK_USAGE_STATES) {
+ 		DEBUG_LOCKS_WARN_ON(1);
+ 		return 0;
+ 	}
+ 
++	if (new_bit == LOCK_USED && this->read)
++		new_bit = LOCK_USED_READ;
++
++	new_mask = 1 << new_bit;
++
+ 	/*
+ 	 * If already set then do not dirty the cacheline,
+ 	 * nor do any checks:
+@@ -3942,13 +3947,22 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
+ 	/*
+ 	 * Make sure we didn't race:
+ 	 */
+-	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
+-		graph_unlock();
+-		return 1;
+-	}
++	if (unlikely(hlock_class(this)->usage_mask & new_mask))
++		goto unlock;
+ 
++	old_mask = hlock_class(this)->usage_mask;
+ 	hlock_class(this)->usage_mask |= new_mask;
+ 
++	/*
++	 * Save one usage_traces[] entry and map both LOCK_USED and
++	 * LOCK_USED_READ onto the same entry.
++	 */
++	if (new_bit == LOCK_USED || new_bit == LOCK_USED_READ) {
++		if (old_mask & (LOCKF_USED | LOCKF_USED_READ))
++			goto unlock;
++		new_bit = LOCK_USED;
++	}
++
+ 	if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
+ 		return 0;
+ 
+@@ -3962,6 +3976,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
+ 			return 0;
+ 	}
+ 
++unlock:
+ 	graph_unlock();
+ 
+ 	/*
+@@ -4896,12 +4911,20 @@ static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock
+ {
+ #ifdef CONFIG_PROVE_LOCKING
+ 	struct lock_class *class = look_up_lock_class(lock, subclass);
++	unsigned long mask = LOCKF_USED;
+ 
+ 	/* if it doesn't have a class (yet), it certainly hasn't been used yet */
+ 	if (!class)
+ 		return;
+ 
+-	if (!(class->usage_mask & LOCK_USED))
++	/*
++	 * READ locks only conflict with USED, such that if we only ever use
++	 * READ locks, there is no deadlock possible -- RCU.
++	 */
++	if (!hlock->read)
++		mask |= LOCKF_USED_READ;
++
++	if (!(class->usage_mask & mask))
+ 		return;
+ 
+ 	hlock->class_idx = class - lock_classes;
+diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
+index baca699b94e91..b0be1560ed17a 100644
+--- a/kernel/locking/lockdep_internals.h
++++ b/kernel/locking/lockdep_internals.h
+@@ -19,6 +19,7 @@ enum lock_usage_bit {
+ #include "lockdep_states.h"
+ #undef LOCKDEP_STATE
+ 	LOCK_USED,
++	LOCK_USED_READ,
+ 	LOCK_USAGE_STATES
+ };
+ 
+@@ -40,6 +41,7 @@ enum {
+ #include "lockdep_states.h"
+ #undef LOCKDEP_STATE
+ 	__LOCKF(USED)
++	__LOCKF(USED_READ)
+ };
+ 
+ #define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE |
+diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
+index 8bbafe3e5203d..70a32a576f3f2 100644
+--- a/kernel/locking/percpu-rwsem.c
++++ b/kernel/locking/percpu-rwsem.c
+@@ -45,7 +45,7 @@ EXPORT_SYMBOL_GPL(percpu_free_rwsem);
+ 
+ static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
+ {
+-	__this_cpu_inc(*sem->read_count);
++	this_cpu_inc(*sem->read_count);
+ 
+ 	/*
+ 	 * Due to having preemption disabled the decrement happens on
+@@ -71,7 +71,7 @@ static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
+ 	if (likely(!atomic_read_acquire(&sem->block)))
+ 		return true;
+ 
+-	__this_cpu_dec(*sem->read_count);
++	this_cpu_dec(*sem->read_count);
+ 
+ 	/* Prod writer to re-evaluate readers_active_check() */
+ 	rcuwait_wake_up(&sem->writer);
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 78c84bee7e294..74300e337c3c7 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2048,7 +2048,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ 		put_page(page);
+ 		add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
+ 		return;
+-	} else if (is_huge_zero_pmd(*pmd)) {
++	} else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
+ 		/*
+ 		 * FIXME: Do we want to invalidate secondary mmu by calling
+ 		 * mmu_notifier_invalidate_range() see comments below inside
+@@ -2142,30 +2142,34 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ 		pte = pte_offset_map(&_pmd, addr);
+ 		BUG_ON(!pte_none(*pte));
+ 		set_pte_at(mm, addr, pte, entry);
+-		atomic_inc(&page[i]._mapcount);
+-		pte_unmap(pte);
+-	}
+-
+-	/*
+-	 * Set PG_double_map before dropping compound_mapcount to avoid
+-	 * false-negative page_mapped().
+-	 */
+-	if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
+-		for (i = 0; i < HPAGE_PMD_NR; i++)
++		if (!pmd_migration)
+ 			atomic_inc(&page[i]._mapcount);
++		pte_unmap(pte);
+ 	}
+ 
+-	lock_page_memcg(page);
+-	if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
+-		/* Last compound_mapcount is gone. */
+-		__dec_lruvec_page_state(page, NR_ANON_THPS);
+-		if (TestClearPageDoubleMap(page)) {
+-			/* No need in mapcount reference anymore */
++	if (!pmd_migration) {
++		/*
++		 * Set PG_double_map before dropping compound_mapcount to avoid
++		 * false-negative page_mapped().
++		 */
++		if (compound_mapcount(page) > 1 &&
++		    !TestSetPageDoubleMap(page)) {
+ 			for (i = 0; i < HPAGE_PMD_NR; i++)
+-				atomic_dec(&page[i]._mapcount);
++				atomic_inc(&page[i]._mapcount);
++		}
++
++		lock_page_memcg(page);
++		if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
++			/* Last compound_mapcount is gone. */
++			__dec_lruvec_page_state(page, NR_ANON_THPS);
++			if (TestClearPageDoubleMap(page)) {
++				/* No need in mapcount reference anymore */
++				for (i = 0; i < HPAGE_PMD_NR; i++)
++					atomic_dec(&page[i]._mapcount);
++			}
+ 		}
++		unlock_page_memcg(page);
+ 	}
+-	unlock_page_memcg(page);
+ 
+ 	smp_wmb(); /* make pte visible before pmd */
+ 	pmd_populate(mm, pmd, pgtable);
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 4102034cd55a1..f16983394b228 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -2585,6 +2585,10 @@ struct page *ksm_might_need_to_copy(struct page *page,
+ 		return page;		/* let do_swap_page report the error */
+ 
+ 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
++	if (new_page && mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL)) {
++		put_page(new_page);
++		new_page = NULL;
++	}
+ 	if (new_page) {
+ 		copy_user_highpage(new_page, page, address, vma);
+ 
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index 76c75a599da3f..e76de2067bfd1 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1557,6 +1557,20 @@ static int __ref __offline_pages(unsigned long start_pfn,
+ 		/* check again */
+ 		ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
+ 					    NULL, check_pages_isolated_cb);
++		/*
++		 * per-cpu pages are drained in start_isolate_page_range, but if
++		 * there are still pages that are not free, make sure that we
++		 * drain again, because when we isolated range we might
++		 * have raced with another thread that was adding pages to pcp
++		 * list.
++		 *
++		 * Forward progress should be still guaranteed because
++		 * pages on the pcp list can only belong to MOVABLE_ZONE
++		 * because has_unmovable_pages explicitly checks for
++		 * PageBuddy on freed pages on other zones.
++		 */
++		if (ret)
++			drain_all_pages(zone);
+ 	} while (ret);
+ 
+ 	/* Ok, all of our target is isolated.
+diff --git a/mm/page_isolation.c b/mm/page_isolation.c
+index f6d07c5f0d34d..5b4a28b2dbf56 100644
+--- a/mm/page_isolation.c
++++ b/mm/page_isolation.c
+@@ -170,6 +170,14 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
+  * pageblocks we may have modified and return -EBUSY to caller. This
+  * prevents two threads from simultaneously working on overlapping ranges.
+  *
++ * Please note that there is no strong synchronization with the page allocator
++ * either. Pages might be freed while their page blocks are marked ISOLATED.
++ * In some cases pages might still end up on pcp lists and that would allow
++ * for their allocation even when they are in fact isolated already. Depending
++ * on how strong of a guarantee the caller needs drain_all_pages might be needed
++ * (e.g. __offline_pages will need to call it after check for isolated range for
++ * a next retry).
++ *
+  * Return: the number of isolated pageblocks on success and -EBUSY if any part
+  * of range cannot be isolated.
+  */
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 696367b182221..d83e0032cb209 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1300,7 +1300,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
+ 
+ 	/* allocate chunk */
+ 	alloc_size = sizeof(struct pcpu_chunk) +
+-		BITS_TO_LONGS(region_size >> PAGE_SHIFT);
++		BITS_TO_LONGS(region_size >> PAGE_SHIFT) * sizeof(unsigned long);
+ 	chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
+ 	if (!chunk)
+ 		panic("%s: Failed to allocate %zu bytes\n", __func__,
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 749d239c62b2b..8b97bc615d8c0 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2619,6 +2619,14 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
+ 		unsigned long reclaimed;
+ 		unsigned long scanned;
+ 
++		/*
++		 * This loop can become CPU-bound when target memcgs
++		 * aren't eligible for reclaim - either because they
++		 * don't have any reclaimable pages, or because their
++		 * memory is explicitly protected. Avoid soft lockups.
++		 */
++		cond_resched();
++
+ 		switch (mem_cgroup_protected(target_memcg, memcg)) {
+ 		case MEMCG_PROT_MIN:
+ 			/*
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 7afe52bd038ba..c50bd7a7943ab 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -5988,9 +5988,13 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
+ 	if (skb_has_frag_list(skb))
+ 		skb_clone_fraglist(skb);
+ 
+-	if (k == 0) {
+-		/* split line is in frag list */
+-		pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
++	/* split line is in frag list */
++	if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) {
++		/* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
++		if (skb_has_frag_list(skb))
++			kfree_skb_list(skb_shinfo(skb)->frag_list);
++		kfree(data);
++		return -ENOMEM;
+ 	}
+ 	skb_release_data(skb);
+ 
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 939e445d5188c..5542e8061955f 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -605,8 +605,10 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 		if (!psize)
+ 			return -EINVAL;
+ 
+-		if (!sk_wmem_schedule(sk, psize + dfrag->overhead))
++		if (!sk_wmem_schedule(sk, psize + dfrag->overhead)) {
++			iov_iter_revert(&msg->msg_iter, psize);
+ 			return -ENOMEM;
++		}
+ 	} else {
+ 		offset = dfrag->offset;
+ 		psize = min_t(size_t, dfrag->data_len, avail_size);
+@@ -617,8 +619,10 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+ 	 */
+ 	ret = do_tcp_sendpages(ssk, page, offset, psize,
+ 			       msg->msg_flags | MSG_SENDPAGE_NOTLAST | MSG_DONTWAIT);
+-	if (ret <= 0)
++	if (ret <= 0) {
++		iov_iter_revert(&msg->msg_iter, psize);
+ 		return ret;
++	}
+ 
+ 	frag_truesize += ret;
+ 	if (!retransmission) {
+diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
+index c27123e6ba80c..4a67685c83eb4 100644
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -982,8 +982,8 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
+ 	p = xdr_inline_decode(xdr, len);
+ 	if (unlikely(p == NULL))
+ 		goto out_fail;
+-	dprintk("RPC: %5u RPCB_%s reply: %s\n", req->rq_task->tk_pid,
+-			req->rq_task->tk_msg.rpc_proc->p_name, (char *)p);
++	dprintk("RPC: %5u RPCB_%s reply: %*pE\n", req->rq_task->tk_pid,
++			req->rq_task->tk_msg.rpc_proc->p_name, len, (char *)p);
+ 
+ 	if (rpc_uaddr2sockaddr(req->rq_xprt->xprt_net, (char *)p, len,
+ 				sap, sizeof(address)) == 0)
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 75c646743df3e..ca89f24a1590b 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -933,6 +933,8 @@ static void rpcrdma_req_reset(struct rpcrdma_req *req)
+ 
+ 	rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
+ 	rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
++
++	frwr_reset(req);
+ }
+ 
+ /* ASSUMPTION: the rb_allreqs list is stable for the duration,
+diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
+index 5ceb93010a973..aedcc3343719e 100644
+--- a/scripts/kconfig/qconf.cc
++++ b/scripts/kconfig/qconf.cc
+@@ -1263,7 +1263,7 @@ void ConfigInfoView::clicked(const QUrl &url)
+ 	}
+ 
+ 	free(result);
+-	delete data;
++	delete[] data;
+ }
+ 
+ QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 0b9907c9cd84f..77e2e6ede31dc 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2467,7 +2467,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
+-	SND_PCI_QUIRK(0x1462, 0x9c37, "MSI X570-A PRO", ALC1220_FIXUP_CLEVO_P950),
+ 	SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ 	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+ 	SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
+@@ -6005,6 +6004,40 @@ static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
+ 		snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
+ }
+ 
++
++static void alc294_gx502_toggle_output(struct hda_codec *codec,
++					struct hda_jack_callback *cb)
++{
++	/* The Windows driver sets the codec up in a very different way where
++	 * it appears to leave 0x10 = 0x8a20 set. For Linux we need to toggle it
++	 */
++	if (snd_hda_jack_detect_state(codec, 0x21) == HDA_JACK_PRESENT)
++		alc_write_coef_idx(codec, 0x10, 0x8a20);
++	else
++		alc_write_coef_idx(codec, 0x10, 0x0a20);
++}
++
++static void alc294_fixup_gx502_hp(struct hda_codec *codec,
++					const struct hda_fixup *fix, int action)
++{
++	/* Pin 0x21: headphones/headset mic */
++	if (!is_jack_detectable(codec, 0x21))
++		return;
++
++	switch (action) {
++	case HDA_FIXUP_ACT_PRE_PROBE:
++		snd_hda_jack_detect_enable_callback(codec, 0x21,
++				alc294_gx502_toggle_output);
++		break;
++	case HDA_FIXUP_ACT_INIT:
++		/* Make sure to start in a correct state, i.e. if
++		 * headphones have been plugged in before powering up the system
++		 */
++		alc294_gx502_toggle_output(codec, NULL);
++		break;
++	}
++}
++
+ static void  alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
+ 			      const struct hda_fixup *fix, int action)
+ {
+@@ -6185,6 +6218,9 @@ enum {
+ 	ALC285_FIXUP_THINKPAD_HEADSET_JACK,
+ 	ALC294_FIXUP_ASUS_HPE,
+ 	ALC294_FIXUP_ASUS_COEF_1B,
++	ALC294_FIXUP_ASUS_GX502_HP,
++	ALC294_FIXUP_ASUS_GX502_PINS,
++	ALC294_FIXUP_ASUS_GX502_VERBS,
+ 	ALC285_FIXUP_HP_GPIO_LED,
+ 	ALC285_FIXUP_HP_MUTE_LED,
+ 	ALC236_FIXUP_HP_MUTE_LED,
+@@ -6203,6 +6239,7 @@ enum {
+ 	ALC269_FIXUP_LEMOTE_A1802,
+ 	ALC269_FIXUP_LEMOTE_A190X,
+ 	ALC256_FIXUP_INTEL_NUC8_RUGGED,
++	ALC255_FIXUP_XIAOMI_HEADSET_MIC,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -7350,6 +7387,33 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
+ 	},
++	[ALC294_FIXUP_ASUS_GX502_PINS] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x03a11050 }, /* front HP mic */
++			{ 0x1a, 0x01a11830 }, /* rear external mic */
++			{ 0x21, 0x03211020 }, /* front HP out */
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC294_FIXUP_ASUS_GX502_VERBS
++	},
++	[ALC294_FIXUP_ASUS_GX502_VERBS] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			/* set 0x15 to HP-OUT ctrl */
++			{ 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
++			/* unmute the 0x15 amp */
++			{ 0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000 },
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC294_FIXUP_ASUS_GX502_HP
++	},
++	[ALC294_FIXUP_ASUS_GX502_HP] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc294_fixup_gx502_hp,
++	},
+ 	[ALC294_FIXUP_ASUS_COEF_1B] = {
+ 		.type = HDA_FIXUP_VERBS,
+ 		.v.verbs = (const struct hda_verb[]) {
+@@ -7539,6 +7603,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_HEADSET_MODE
+ 	},
++	[ALC255_FIXUP_XIAOMI_HEADSET_MIC] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC289_FIXUP_ASUS_GA401
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7723,6 +7797,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+ 	SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
++	SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
+ 	SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+ 	SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
+@@ -7835,6 +7910,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
+ 	SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
+ 	SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
++	SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
+@@ -8012,6 +8088,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
+ 	{.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
+ 	{.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
++	{.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
+ 	{}
+ };
+ #define ALC225_STANDARD_PINS \
+diff --git a/sound/soc/codecs/rt1308-sdw.c b/sound/soc/codecs/rt1308-sdw.c
+index b0ba0d2acbdd6..56e952a904a39 100644
+--- a/sound/soc/codecs/rt1308-sdw.c
++++ b/sound/soc/codecs/rt1308-sdw.c
+@@ -684,8 +684,8 @@ static int rt1308_sdw_probe(struct sdw_slave *slave,
+ 
+ 	/* Regmap Initialization */
+ 	regmap = devm_regmap_init_sdw(slave, &rt1308_sdw_regmap);
+-	if (!regmap)
+-		return -EINVAL;
++	if (IS_ERR(regmap))
++		return PTR_ERR(regmap);
+ 
+ 	rt1308_sdw_init(&slave->dev, regmap, slave);
+ 
+diff --git a/sound/soc/codecs/rt700-sdw.c b/sound/soc/codecs/rt700-sdw.c
+index 4d14048d11976..1d24bf0407182 100644
+--- a/sound/soc/codecs/rt700-sdw.c
++++ b/sound/soc/codecs/rt700-sdw.c
+@@ -452,8 +452,8 @@ static int rt700_sdw_probe(struct sdw_slave *slave,
+ 
+ 	/* Regmap Initialization */
+ 	sdw_regmap = devm_regmap_init_sdw(slave, &rt700_sdw_regmap);
+-	if (!sdw_regmap)
+-		return -EINVAL;
++	if (IS_ERR(sdw_regmap))
++		return PTR_ERR(sdw_regmap);
+ 
+ 	regmap = devm_regmap_init(&slave->dev, NULL,
+ 		&slave->dev, &rt700_regmap);
+diff --git a/sound/soc/codecs/rt711-sdw.c b/sound/soc/codecs/rt711-sdw.c
+index 45b928954b580..7efff130a638c 100644
+--- a/sound/soc/codecs/rt711-sdw.c
++++ b/sound/soc/codecs/rt711-sdw.c
+@@ -452,8 +452,8 @@ static int rt711_sdw_probe(struct sdw_slave *slave,
+ 
+ 	/* Regmap Initialization */
+ 	sdw_regmap = devm_regmap_init_sdw(slave, &rt711_sdw_regmap);
+-	if (!sdw_regmap)
+-		return -EINVAL;
++	if (IS_ERR(sdw_regmap))
++		return PTR_ERR(sdw_regmap);
+ 
+ 	regmap = devm_regmap_init(&slave->dev, NULL,
+ 		&slave->dev, &rt711_regmap);
+diff --git a/sound/soc/codecs/rt715-sdw.c b/sound/soc/codecs/rt715-sdw.c
+index d11b23d6b240a..68a36739f1b0d 100644
+--- a/sound/soc/codecs/rt715-sdw.c
++++ b/sound/soc/codecs/rt715-sdw.c
+@@ -527,8 +527,8 @@ static int rt715_sdw_probe(struct sdw_slave *slave,
+ 
+ 	/* Regmap Initialization */
+ 	sdw_regmap = devm_regmap_init_sdw(slave, &rt715_sdw_regmap);
+-	if (!sdw_regmap)
+-		return -EINVAL;
++	if (IS_ERR(sdw_regmap))
++		return PTR_ERR(sdw_regmap);
+ 
+ 	regmap = devm_regmap_init(&slave->dev, NULL, &slave->dev,
+ 		&rt715_regmap);
+diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c
+index 35fe8ee5bce9f..03fb50175d876 100644
+--- a/sound/soc/codecs/tlv320adcx140.c
++++ b/sound/soc/codecs/tlv320adcx140.c
+@@ -930,6 +930,8 @@ static int adcx140_i2c_probe(struct i2c_client *i2c,
+ 	if (!adcx140)
+ 		return -ENOMEM;
+ 
++	adcx140->dev = &i2c->dev;
++
+ 	adcx140->gpio_reset = devm_gpiod_get_optional(adcx140->dev,
+ 						      "reset", GPIOD_OUT_LOW);
+ 	if (IS_ERR(adcx140->gpio_reset))
+@@ -957,7 +959,7 @@ static int adcx140_i2c_probe(struct i2c_client *i2c,
+ 			ret);
+ 		return ret;
+ 	}
+-	adcx140->dev = &i2c->dev;
++
+ 	i2c_set_clientdata(i2c, adcx140);
+ 
+ 	return devm_snd_soc_register_component(&i2c->dev,
+diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+index ca4900036ead9..bc50eda297ab7 100644
+--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
++++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
+@@ -181,7 +181,7 @@ static void skl_set_hda_codec_autosuspend_delay(struct snd_soc_card *card)
+ 	struct snd_soc_dai *dai;
+ 
+ 	for_each_card_rtds(card, rtd) {
+-		if (!strstr(rtd->dai_link->codecs->name, "ehdaudio"))
++		if (!strstr(rtd->dai_link->codecs->name, "ehdaudio0D0"))
+ 			continue;
+ 		dai = asoc_rtd_to_codec(rtd, 0);
+ 		hda_pvt = snd_soc_component_get_drvdata(dai->component);
+diff --git a/sound/soc/intel/haswell/sst-haswell-dsp.c b/sound/soc/intel/haswell/sst-haswell-dsp.c
+index de80e19454c13..88c3f63bded90 100644
+--- a/sound/soc/intel/haswell/sst-haswell-dsp.c
++++ b/sound/soc/intel/haswell/sst-haswell-dsp.c
+@@ -243,92 +243,45 @@ static irqreturn_t hsw_irq(int irq, void *context)
+ 	return ret;
+ }
+ 
+-#define CSR_DEFAULT_VALUE 0x8480040E
+-#define ISC_DEFAULT_VALUE 0x0
+-#define ISD_DEFAULT_VALUE 0x0
+-#define IMC_DEFAULT_VALUE 0x7FFF0003
+-#define IMD_DEFAULT_VALUE 0x7FFF0003
+-#define IPCC_DEFAULT_VALUE 0x0
+-#define IPCD_DEFAULT_VALUE 0x0
+-#define CLKCTL_DEFAULT_VALUE 0x7FF
+-#define CSR2_DEFAULT_VALUE 0x0
+-#define LTR_CTRL_DEFAULT_VALUE 0x0
+-#define HMD_CTRL_DEFAULT_VALUE 0x0
+-
+-static void hsw_set_shim_defaults(struct sst_dsp *sst)
+-{
+-	sst_dsp_shim_write_unlocked(sst, SST_CSR, CSR_DEFAULT_VALUE);
+-	sst_dsp_shim_write_unlocked(sst, SST_ISRX, ISC_DEFAULT_VALUE);
+-	sst_dsp_shim_write_unlocked(sst, SST_ISRD, ISD_DEFAULT_VALUE);
+-	sst_dsp_shim_write_unlocked(sst, SST_IMRX, IMC_DEFAULT_VALUE);
+-	sst_dsp_shim_write_unlocked(sst, SST_IMRD, IMD_DEFAULT_VALUE);
+-	sst_dsp_shim_write_unlocked(sst, SST_IPCX, IPCC_DEFAULT_VALUE);
+-	sst_dsp_shim_write_unlocked(sst, SST_IPCD, IPCD_DEFAULT_VALUE);
+-	sst_dsp_shim_write_unlocked(sst, SST_CLKCTL, CLKCTL_DEFAULT_VALUE);
+-	sst_dsp_shim_write_unlocked(sst, SST_CSR2, CSR2_DEFAULT_VALUE);
+-	sst_dsp_shim_write_unlocked(sst, SST_LTRC, LTR_CTRL_DEFAULT_VALUE);
+-	sst_dsp_shim_write_unlocked(sst, SST_HMDC, HMD_CTRL_DEFAULT_VALUE);
+-}
+-
+-/* all clock-gating minus DCLCGE and DTCGE */
+-#define SST_VDRTCL2_CG_OTHER	0xB7D
+-
+ static void hsw_set_dsp_D3(struct sst_dsp *sst)
+ {
++	u32 val;
+ 	u32 reg;
+ 
+-	/* disable clock core gating */
++	/* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
+ 	reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+-	reg &= ~(SST_VDRTCL2_DCLCGE);
++	reg &= ~(SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE);
+ 	writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+ 
+-	/* stall, reset and set 24MHz XOSC */
+-	sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
+-			SST_CSR_24MHZ_LPCS | SST_CSR_STALL | SST_CSR_RST,
+-			SST_CSR_24MHZ_LPCS | SST_CSR_STALL | SST_CSR_RST);
+-
+-	/* DRAM power gating all */
+-	reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
+-	reg |= SST_VDRTCL0_ISRAMPGE_MASK |
+-		SST_VDRTCL0_DSRAMPGE_MASK;
+-	reg &= ~(SST_VDRTCL0_D3SRAMPGD);
+-	reg |= SST_VDRTCL0_D3PGD;
+-	writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
+-	udelay(50);
++	/* enable power gating and switch off DRAM & IRAM blocks */
++	val = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
++	val |= SST_VDRTCL0_DSRAMPGE_MASK |
++		SST_VDRTCL0_ISRAMPGE_MASK;
++	val &= ~(SST_VDRTCL0_D3PGD | SST_VDRTCL0_D3SRAMPGD);
++	writel(val, sst->addr.pci_cfg + SST_VDRTCTL0);
+ 
+-	/* PLL shutdown enable */
+-	reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+-	reg |= SST_VDRTCL2_APLLSE_MASK;
+-	writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
++	/* switch off audio PLL */
++	val = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
++	val |= SST_VDRTCL2_APLLSE_MASK;
++	writel(val, sst->addr.pci_cfg + SST_VDRTCTL2);
+ 
+-	/* disable MCLK */
++	/* disable MCLK(clkctl.smos = 0) */
+ 	sst_dsp_shim_update_bits_unlocked(sst, SST_CLKCTL,
+-			SST_CLKCTL_MASK, 0);
+-
+-	/* switch clock gating */
+-	reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+-	reg |= SST_VDRTCL2_CG_OTHER;
+-	reg &= ~(SST_VDRTCL2_DTCGE);
+-	writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+-	/* enable DTCGE separatelly */
+-	reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+-	reg |= SST_VDRTCL2_DTCGE;
+-	writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
++		SST_CLKCTL_MASK, 0);
+ 
+-	/* set shim defaults */
+-	hsw_set_shim_defaults(sst);
+-
+-	/* set D3 */
+-	reg = readl(sst->addr.pci_cfg + SST_PMCS);
+-	reg |= SST_PMCS_PS_MASK;
+-	writel(reg, sst->addr.pci_cfg + SST_PMCS);
++	/* Set D3 state, delay 50 us */
++	val = readl(sst->addr.pci_cfg + SST_PMCS);
++	val |= SST_PMCS_PS_MASK;
++	writel(val, sst->addr.pci_cfg + SST_PMCS);
+ 	udelay(50);
+ 
+-	/* enable clock core gating */
++	/* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
+ 	reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+-	reg |= SST_VDRTCL2_DCLCGE;
++	reg |= SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE;
+ 	writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
++
+ 	udelay(50);
++
+ }
+ 
+ static void hsw_reset(struct sst_dsp *sst)
+@@ -346,62 +299,75 @@ static void hsw_reset(struct sst_dsp *sst)
+ 		SST_CSR_RST | SST_CSR_STALL, SST_CSR_STALL);
+ }
+ 
+-/* recommended CSR state for power-up */
+-#define SST_CSR_D0_MASK (0x18A09C0C | SST_CSR_DCS_MASK)
+-
+ static int hsw_set_dsp_D0(struct sst_dsp *sst)
+ {
+-	u32 reg;
++	int tries = 10;
++	u32 reg, fw_dump_bit;
+ 
+-	/* disable clock core gating */
++	/* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
+ 	reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+-	reg &= ~(SST_VDRTCL2_DCLCGE);
++	reg &= ~(SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE);
+ 	writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+ 
+-	/* switch clock gating */
+-	reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+-	reg |= SST_VDRTCL2_CG_OTHER;
+-	reg &= ~(SST_VDRTCL2_DTCGE);
+-	writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
++	/* Disable D3PG (VDRTCTL0.D3PGD = 1) */
++	reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
++	reg |= SST_VDRTCL0_D3PGD;
++	writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
+ 
+-	/* set D0 */
++	/* Set D0 state */
+ 	reg = readl(sst->addr.pci_cfg + SST_PMCS);
+-	reg &= ~(SST_PMCS_PS_MASK);
++	reg &= ~SST_PMCS_PS_MASK;
+ 	writel(reg, sst->addr.pci_cfg + SST_PMCS);
+ 
+-	/* DRAM power gating none */
+-	reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
+-	reg &= ~(SST_VDRTCL0_ISRAMPGE_MASK |
+-		SST_VDRTCL0_DSRAMPGE_MASK);
+-	reg |= SST_VDRTCL0_D3SRAMPGD;
+-	reg |= SST_VDRTCL0_D3PGD;
+-	writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
+-	mdelay(10);
++	/* check that ADSP shim is enabled */
++	while (tries--) {
++		reg = readl(sst->addr.pci_cfg + SST_PMCS) & SST_PMCS_PS_MASK;
++		if (reg == 0)
++			goto finish;
++
++		msleep(1);
++	}
++
++	return -ENODEV;
+ 
+-	/* set shim defaults */
+-	hsw_set_shim_defaults(sst);
++finish:
++	/* select SSP1 19.2MHz base clock, SSP clock 0, turn off Low Power Clock */
++	sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
++		SST_CSR_S1IOCS | SST_CSR_SBCS1 | SST_CSR_LPCS, 0x0);
++
++	/* stall DSP core, set clk to 192/96Mhz */
++	sst_dsp_shim_update_bits_unlocked(sst,
++		SST_CSR, SST_CSR_STALL | SST_CSR_DCS_MASK,
++		SST_CSR_STALL | SST_CSR_DCS(4));
+ 
+-	/* restore MCLK */
++	/* Set 24MHz MCLK, prevent local clock gating, enable SSP0 clock */
+ 	sst_dsp_shim_update_bits_unlocked(sst, SST_CLKCTL,
+-			SST_CLKCTL_MASK, SST_CLKCTL_MASK);
++		SST_CLKCTL_MASK | SST_CLKCTL_DCPLCG | SST_CLKCTL_SCOE0,
++		SST_CLKCTL_MASK | SST_CLKCTL_DCPLCG | SST_CLKCTL_SCOE0);
+ 
+-	/* PLL shutdown disable */
++	/* Stall and reset core, set CSR */
++	hsw_reset(sst);
++
++	/* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
+ 	reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+-	reg &= ~(SST_VDRTCL2_APLLSE_MASK);
++	reg |= SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE;
+ 	writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+ 
+-	sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
+-			SST_CSR_D0_MASK, SST_CSR_SBCS0 | SST_CSR_SBCS1 |
+-			SST_CSR_STALL | SST_CSR_DCS(4));
+ 	udelay(50);
+ 
+-	/* enable clock core gating */
++	/* switch on audio PLL */
+ 	reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+-	reg |= SST_VDRTCL2_DCLCGE;
++	reg &= ~SST_VDRTCL2_APLLSE_MASK;
+ 	writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+ 
+-	/* clear reset */
+-	sst_dsp_shim_update_bits_unlocked(sst, SST_CSR, SST_CSR_RST, 0);
++	/* set default power gating control, enable power gating control for all blocks. that is,
++	can't be accessed, please enable each block before accessing. */
++	reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
++	reg |= SST_VDRTCL0_DSRAMPGE_MASK | SST_VDRTCL0_ISRAMPGE_MASK;
++	/* for D0, always enable the block(DSRAM[0]) used for FW dump */
++	fw_dump_bit = 1 << SST_VDRTCL0_DSRAMPGE_SHIFT;
++	writel(reg & ~fw_dump_bit, sst->addr.pci_cfg + SST_VDRTCTL0);
++
+ 
+ 	/* disable DMA finish function for SSP0 & SSP1 */
+ 	sst_dsp_shim_update_bits_unlocked(sst, SST_CSR2, SST_CSR2_SDFD_SSP1,
+@@ -418,6 +384,12 @@ static int hsw_set_dsp_D0(struct sst_dsp *sst)
+ 	sst_dsp_shim_update_bits(sst, SST_IMRD, (SST_IMRD_DONE | SST_IMRD_BUSY |
+ 				SST_IMRD_SSP0 | SST_IMRD_DMAC), 0x0);
+ 
++	/* clear IPC registers */
++	sst_dsp_shim_write(sst, SST_IPCX, 0x0);
++	sst_dsp_shim_write(sst, SST_IPCD, 0x0);
++	sst_dsp_shim_write(sst, 0x80, 0x6);
++	sst_dsp_shim_write(sst, 0xe0, 0x300a);
++
+ 	return 0;
+ }
+ 
+@@ -443,6 +415,11 @@ static void hsw_sleep(struct sst_dsp *sst)
+ {
+ 	dev_dbg(sst->dev, "HSW_PM dsp runtime suspend\n");
+ 
++	/* put DSP into reset and stall */
++	sst_dsp_shim_update_bits(sst, SST_CSR,
++		SST_CSR_24MHZ_LPCS | SST_CSR_RST | SST_CSR_STALL,
++		SST_CSR_RST | SST_CSR_STALL | SST_CSR_24MHZ_LPCS);
++
+ 	hsw_set_dsp_D3(sst);
+ 	dev_dbg(sst->dev, "HSW_PM dsp runtime suspend exit\n");
+ }
+diff --git a/sound/soc/meson/axg-toddr.c b/sound/soc/meson/axg-toddr.c
+index e711abcf8c124..d6adf7edea41f 100644
+--- a/sound/soc/meson/axg-toddr.c
++++ b/sound/soc/meson/axg-toddr.c
+@@ -18,6 +18,7 @@
+ #define CTRL0_TODDR_SEL_RESAMPLE	BIT(30)
+ #define CTRL0_TODDR_EXT_SIGNED		BIT(29)
+ #define CTRL0_TODDR_PP_MODE		BIT(28)
++#define CTRL0_TODDR_SYNC_CH		BIT(27)
+ #define CTRL0_TODDR_TYPE_MASK		GENMASK(15, 13)
+ #define CTRL0_TODDR_TYPE(x)		((x) << 13)
+ #define CTRL0_TODDR_MSB_POS_MASK	GENMASK(12, 8)
+@@ -189,10 +190,31 @@ static const struct axg_fifo_match_data axg_toddr_match_data = {
+ 	.dai_drv		= &axg_toddr_dai_drv
+ };
+ 
++static int g12a_toddr_dai_startup(struct snd_pcm_substream *substream,
++				 struct snd_soc_dai *dai)
++{
++	struct axg_fifo *fifo = snd_soc_dai_get_drvdata(dai);
++	int ret;
++
++	ret = axg_toddr_dai_startup(substream, dai);
++	if (ret)
++		return ret;
++
++	/*
++	 * Make sure the first channel ends up in the at beginning of the output
++	 * As weird as it looks, without this the first channel may be misplaced
++	 * in memory, with a random shift of 2 channels.
++	 */
++	regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_SYNC_CH,
++			   CTRL0_TODDR_SYNC_CH);
++
++	return 0;
++}
++
+ static const struct snd_soc_dai_ops g12a_toddr_ops = {
+ 	.prepare	= g12a_toddr_dai_prepare,
+ 	.hw_params	= axg_toddr_dai_hw_params,
+-	.startup	= axg_toddr_dai_startup,
++	.startup	= g12a_toddr_dai_startup,
+ 	.shutdown	= axg_toddr_dai_shutdown,
+ };
+ 
+diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
+index 2ef090f4af9e9..8abc1a95184b2 100644
+--- a/sound/soc/qcom/apq8016_sbc.c
++++ b/sound/soc/qcom/apq8016_sbc.c
+@@ -234,6 +234,7 @@ static int apq8016_sbc_platform_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	card->dev = dev;
++	card->owner = THIS_MODULE;
+ 	card->dapm_widgets = apq8016_sbc_dapm_widgets;
+ 	card->num_dapm_widgets = ARRAY_SIZE(apq8016_sbc_dapm_widgets);
+ 	data = apq8016_sbc_parse_of(card);
+diff --git a/sound/soc/qcom/apq8096.c b/sound/soc/qcom/apq8096.c
+index 287ad2aa27f37..d47bedc259c59 100644
+--- a/sound/soc/qcom/apq8096.c
++++ b/sound/soc/qcom/apq8096.c
+@@ -114,6 +114,7 @@ static int apq8096_platform_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	card->dev = dev;
++	card->owner = THIS_MODULE;
+ 	dev_set_drvdata(dev, card);
+ 	ret = qcom_snd_parse_of(card);
+ 	if (ret)
+diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
+index 8ada4ecba8472..10322690c0eaa 100644
+--- a/sound/soc/qcom/common.c
++++ b/sound/soc/qcom/common.c
+@@ -45,8 +45,10 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
+ 
+ 	for_each_child_of_node(dev->of_node, np) {
+ 		dlc = devm_kzalloc(dev, 2 * sizeof(*dlc), GFP_KERNEL);
+-		if (!dlc)
+-			return -ENOMEM;
++		if (!dlc) {
++			ret = -ENOMEM;
++			goto err;
++		}
+ 
+ 		link->cpus	= &dlc[0];
+ 		link->platforms	= &dlc[1];
+diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
+index 68e9388ff46f1..b5b8465caf56f 100644
+--- a/sound/soc/qcom/sdm845.c
++++ b/sound/soc/qcom/sdm845.c
+@@ -557,6 +557,7 @@ static int sdm845_snd_platform_probe(struct platform_device *pdev)
+ 	card->dapm_widgets = sdm845_snd_widgets;
+ 	card->num_dapm_widgets = ARRAY_SIZE(sdm845_snd_widgets);
+ 	card->dev = dev;
++	card->owner = THIS_MODULE;
+ 	dev_set_drvdata(dev, card);
+ 	ret = qcom_snd_parse_of(card);
+ 	if (ret)
+diff --git a/sound/soc/qcom/storm.c b/sound/soc/qcom/storm.c
+index 3a6e18709b9e2..4ba111c841375 100644
+--- a/sound/soc/qcom/storm.c
++++ b/sound/soc/qcom/storm.c
+@@ -96,6 +96,7 @@ static int storm_platform_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 
+ 	card->dev = &pdev->dev;
++	card->owner = THIS_MODULE;
+ 
+ 	ret = snd_soc_of_parse_card_name(card, "qcom,model");
+ 	if (ret) {
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index f1d641cd48da9..20ca1d38b4b87 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -834,6 +834,19 @@ struct snd_soc_dai *snd_soc_find_dai(
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_find_dai);
+ 
++struct snd_soc_dai *snd_soc_find_dai_with_mutex(
++	const struct snd_soc_dai_link_component *dlc)
++{
++	struct snd_soc_dai *dai;
++
++	mutex_lock(&client_mutex);
++	dai = snd_soc_find_dai(dlc);
++	mutex_unlock(&client_mutex);
++
++	return dai;
++}
++EXPORT_SYMBOL_GPL(snd_soc_find_dai_with_mutex);
++
+ static int soc_dai_link_sanity_check(struct snd_soc_card *card,
+ 				     struct snd_soc_dai_link *link)
+ {
+diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
+index cecbbed2de9d5..0e04ad7689cd9 100644
+--- a/sound/soc/soc-dai.c
++++ b/sound/soc/soc-dai.c
+@@ -410,14 +410,14 @@ void snd_soc_dai_link_set_capabilities(struct snd_soc_dai_link *dai_link)
+ 		supported_codec = false;
+ 
+ 		for_each_link_cpus(dai_link, i, cpu) {
+-			dai = snd_soc_find_dai(cpu);
++			dai = snd_soc_find_dai_with_mutex(cpu);
+ 			if (dai && snd_soc_dai_stream_valid(dai, direction)) {
+ 				supported_cpu = true;
+ 				break;
+ 			}
+ 		}
+ 		for_each_link_codecs(dai_link, i, codec) {
+-			dai = snd_soc_find_dai(codec);
++			dai = snd_soc_find_dai_with_mutex(codec);
+ 			if (dai && snd_soc_dai_stream_valid(dai, direction)) {
+ 				supported_codec = true;
+ 				break;
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 74baf1fce053f..918ed77726cc0 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -811,7 +811,7 @@ dynamic:
+ 	return 0;
+ 
+ config_err:
+-	for_each_rtd_dais(rtd, i, dai)
++	for_each_rtd_dais_rollback(rtd, i, dai)
+ 		snd_soc_dai_shutdown(dai, substream);
+ 
+ 	snd_soc_link_shutdown(substream);
+diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
+index da8ec1e8e0648..cc9fbcedb3646 100644
+--- a/tools/perf/tests/bp_signal.c
++++ b/tools/perf/tests/bp_signal.c
+@@ -45,10 +45,13 @@ volatile long the_var;
+ #if defined (__x86_64__)
+ extern void __test_function(volatile long *ptr);
+ asm (
++	".pushsection .text;"
+ 	".globl __test_function\n"
++	".type __test_function, @function;"
+ 	"__test_function:\n"
+ 	"incq (%rdi)\n"
+-	"ret\n");
++	"ret\n"
++	".popsection\n");
+ #else
+ static void __test_function(volatile long *ptr)
+ {
+diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
+index ab64b4a4e2848..343d36965836a 100644
+--- a/tools/perf/tests/pmu-events.c
++++ b/tools/perf/tests/pmu-events.c
+@@ -274,6 +274,7 @@ static int __test__pmu_event_aliases(char *pmu_name, int *count)
+ 	int res = 0;
+ 	bool use_uncore_table;
+ 	struct pmu_events_map *map = __test_pmu_get_events_map();
++	struct perf_pmu_alias *a, *tmp;
+ 
+ 	if (!map)
+ 		return -1;
+@@ -347,6 +348,10 @@ static int __test__pmu_event_aliases(char *pmu_name, int *count)
+ 			  pmu_name, alias->name);
+ 	}
+ 
++	list_for_each_entry_safe(a, tmp, &aliases, list) {
++		list_del(&a->list);
++		perf_pmu_free_alias(a);
++	}
+ 	free(pmu);
+ 	return res;
+ }
+diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
+index 5c11fe2b30406..714e6830a758f 100644
+--- a/tools/perf/tests/pmu.c
++++ b/tools/perf/tests/pmu.c
+@@ -173,6 +173,7 @@ int test__pmu(struct test *test __maybe_unused, int subtest __maybe_unused)
+ 		ret = 0;
+ 	} while (0);
+ 
++	perf_pmu__del_formats(&formats);
+ 	test_format_dir_put(format);
+ 	return ret;
+ }
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index ab48be4cf2584..b279888bb1aab 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -946,6 +946,10 @@ int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
+ 
+ 	perf_evlist__set_maps(&evlist->core, cpus, threads);
+ 
++	/* as evlist now has references, put count here */
++	perf_cpu_map__put(cpus);
++	perf_thread_map__put(threads);
++
+ 	return 0;
+ 
+ out_delete_threads:
+@@ -1273,11 +1277,12 @@ static int perf_evlist__create_syswide_maps(struct evlist *evlist)
+ 		goto out_put;
+ 
+ 	perf_evlist__set_maps(&evlist->core, cpus, threads);
+-out:
+-	return err;
++
++	perf_thread_map__put(threads);
+ out_put:
+ 	perf_cpu_map__put(cpus);
+-	goto out;
++out:
++	return err;
+ }
+ 
+ int evlist__open(struct evlist *evlist)
+diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
+index 9e21aa767e417..344a75718afc3 100644
+--- a/tools/perf/util/metricgroup.c
++++ b/tools/perf/util/metricgroup.c
+@@ -443,6 +443,9 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
+ 						continue;
+ 					strlist__add(me->metrics, s);
+ 				}
++
++				if (!raw)
++					free(s);
+ 			}
+ 			free(omg);
+ 		}
+@@ -726,7 +729,7 @@ int metricgroup__parse_groups(const struct option *opt,
+ 	ret = metricgroup__add_metric_list(str, metric_no_group,
+ 					   &extra_events, &group_list);
+ 	if (ret)
+-		return ret;
++		goto out;
+ 	pr_debug("adding %s\n", extra_events.buf);
+ 	bzero(&parse_error, sizeof(parse_error));
+ 	ret = parse_events(perf_evlist, extra_events.buf, &parse_error);
+@@ -734,11 +737,11 @@ int metricgroup__parse_groups(const struct option *opt,
+ 		parse_events_print_error(&parse_error, extra_events.buf);
+ 		goto out;
+ 	}
+-	strbuf_release(&extra_events);
+ 	ret = metricgroup__setup_events(&group_list, metric_no_merge,
+ 					perf_evlist, metric_events);
+ out:
+ 	metricgroup__free_egroups(&group_list);
++	strbuf_release(&extra_events);
+ 	return ret;
+ }
+ 
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index 4476de0e678aa..c1120d8196fae 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -410,7 +410,7 @@ static int add_event_tool(struct list_head *list, int *idx,
+ 		return -ENOMEM;
+ 	evsel->tool_event = tool_event;
+ 	if (tool_event == PERF_TOOL_DURATION_TIME)
+-		evsel->unit = strdup("ns");
++		evsel->unit = "ns";
+ 	return 0;
+ }
+ 
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index 93fe72a9dc0b2..483da97ac4459 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -272,7 +272,7 @@ static void perf_pmu_update_alias(struct perf_pmu_alias *old,
+ }
+ 
+ /* Delete an alias entry. */
+-static void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
++void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
+ {
+ 	zfree(&newalias->name);
+ 	zfree(&newalias->desc);
+@@ -1352,6 +1352,17 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
+ 		set_bit(b, bits);
+ }
+ 
++void perf_pmu__del_formats(struct list_head *formats)
++{
++	struct perf_pmu_format *fmt, *tmp;
++
++	list_for_each_entry_safe(fmt, tmp, formats, list) {
++		list_del(&fmt->list);
++		free(fmt->name);
++		free(fmt);
++	}
++}
++
+ static int sub_non_neg(int a, int b)
+ {
+ 	if (b > a)
+diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
+index f971d9aa4570a..28778b47fb4b7 100644
+--- a/tools/perf/util/pmu.h
++++ b/tools/perf/util/pmu.h
+@@ -92,6 +92,7 @@ int perf_pmu__new_format(struct list_head *list, char *name,
+ 			 int config, unsigned long *bits);
+ void perf_pmu__set_format(unsigned long *bits, long from, long to);
+ int perf_pmu__format_parse(char *dir, struct list_head *head);
++void perf_pmu__del_formats(struct list_head *formats);
+ 
+ struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
+ 
+@@ -111,6 +112,7 @@ void pmu_add_cpu_aliases_map(struct list_head *head, struct perf_pmu *pmu,
+ 
+ struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu);
+ bool pmu_uncore_alias_match(const char *pmu_name, const char *name);
++void perf_pmu_free_alias(struct perf_pmu_alias *alias);
+ 
+ int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
+ 
+diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
+index a4cc11592f6b3..ea9aa1d7cf501 100644
+--- a/tools/perf/util/record.c
++++ b/tools/perf/util/record.c
+@@ -2,6 +2,7 @@
+ #include "debug.h"
+ #include "evlist.h"
+ #include "evsel.h"
++#include "evsel_config.h"
+ #include "parse-events.h"
+ #include <errno.h>
+ #include <limits.h>
+@@ -33,11 +34,24 @@ static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evl
+ 	return leader;
+ }
+ 
++static u64 evsel__config_term_mask(struct evsel *evsel)
++{
++	struct evsel_config_term *term;
++	struct list_head *config_terms = &evsel->config_terms;
++	u64 term_types = 0;
++
++	list_for_each_entry(term, config_terms, list) {
++		term_types |= 1 << term->type;
++	}
++	return term_types;
++}
++
+ static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist)
+ {
+ 	struct perf_event_attr *attr = &evsel->core.attr;
+ 	struct evsel *leader = evsel->leader;
+ 	struct evsel *read_sampler;
++	u64 term_types, freq_mask;
+ 
+ 	if (!leader->sample_read)
+ 		return;
+@@ -47,16 +61,20 @@ static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *ev
+ 	if (evsel == read_sampler)
+ 		return;
+ 
++	term_types = evsel__config_term_mask(evsel);
+ 	/*
+-	 * Disable sampling for all group members other than the leader in
+-	 * case the leader 'leads' the sampling, except when the leader is an
+-	 * AUX area event, in which case the 2nd event in the group is the one
+-	 * that 'leads' the sampling.
++	 * Disable sampling for all group members except those with explicit
++	 * config terms or the leader. In the case of an AUX area event, the 2nd
++	 * event in the group is the one that 'leads' the sampling.
+ 	 */
+-	attr->freq           = 0;
+-	attr->sample_freq    = 0;
+-	attr->sample_period  = 0;
+-	attr->write_backward = 0;
++	freq_mask = (1 << EVSEL__CONFIG_TERM_FREQ) | (1 << EVSEL__CONFIG_TERM_PERIOD);
++	if ((term_types & freq_mask) == 0) {
++		attr->freq           = 0;
++		attr->sample_freq    = 0;
++		attr->sample_period  = 0;
++	}
++	if ((term_types & (1 << EVSEL__CONFIG_TERM_OVERWRITE)) == 0)
++		attr->write_backward = 0;
+ 
+ 	/*
+ 	 * We don't get a sample for slave events, we make them when delivering
+diff --git a/tools/testing/selftests/vm/map_hugetlb.c b/tools/testing/selftests/vm/map_hugetlb.c
+index 6af951900aa39..312889edb84ab 100644
+--- a/tools/testing/selftests/vm/map_hugetlb.c
++++ b/tools/testing/selftests/vm/map_hugetlb.c
+@@ -83,7 +83,7 @@ int main(int argc, char **argv)
+ 	}
+ 
+ 	if (shift)
+-		printf("%u kB hugepages\n", 1 << shift);
++		printf("%u kB hugepages\n", 1 << (shift - 10));
+ 	else
+ 		printf("Default size hugepages\n");
+ 	printf("Mapping %lu Mbytes\n", (unsigned long)length >> 20);


             reply	other threads:[~2020-09-23 12:14 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-23 12:14 Mike Pagano [this message]
  -- strict thread matches above, loose matches on Subject: below --
2020-11-01 20:32 [gentoo-commits] proj/linux-patches:5.8 commit in: / Mike Pagano
2020-10-29 11:20 Mike Pagano
2020-10-17 10:19 Mike Pagano
2020-10-14 20:38 Mike Pagano
2020-10-07 12:47 Mike Pagano
2020-10-01 19:00 Mike Pagano
2020-09-26 21:50 Mike Pagano
2020-09-24 15:37 Mike Pagano
2020-09-24 15:37 Mike Pagano
2020-09-24 15:37 Mike Pagano
2020-09-23 13:06 Mike Pagano
2020-09-17 14:57 Mike Pagano
2020-09-14 17:36 Mike Pagano
2020-09-12 18:14 Mike Pagano
2020-09-09 18:02 Mike Pagano
2020-09-05 10:48 Mike Pagano
2020-09-03 12:52 Mike Pagano
2020-09-03 12:37 Mike Pagano
2020-08-27 13:22 Mike Pagano
2020-08-26 11:18 Mike Pagano
2020-08-21 11:41 Alice Ferrazzi
2020-08-19 14:58 Mike Pagano
2020-08-19  9:16 Alice Ferrazzi
2020-08-13 11:55 Alice Ferrazzi
2020-08-03 14:42 Mike Pagano
2020-08-03 11:35 Mike Pagano
2020-06-29 17:33 Mike Pagano
2020-06-29 17:25 Mike Pagano
2020-06-16 18:22 Mike Pagano

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1600863243.d90887db45c919719e541d2b025a26f8f23958c3.mpagano@gentoo \
    --to=mpagano@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox