* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-08-29 14:27 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-08-29 14:27 UTC (permalink / raw
To: gentoo-commits
commit: 26fab6b6424908ae47ad7e488177505589c3bc52
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Aug 29 14:27:12 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Aug 29 14:27:12 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=26fab6b6
Linux patch 5.2.11
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1010_linux-5.2.11.patch | 6480 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 6484 insertions(+)
diff --git a/0000_README b/0000_README
index 2056b84..374124c 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch: 1009_linux-5.2.10.patch
From: https://www.kernel.org
Desc: Linux 5.2.10
+Patch: 1010_linux-5.2.11.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.11
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1010_linux-5.2.11.patch b/1010_linux-5.2.11.patch
new file mode 100644
index 0000000..4df5b0e
--- /dev/null
+++ b/1010_linux-5.2.11.patch
@@ -0,0 +1,6480 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 0d40729d080f..cb17fde4164f 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -4055,6 +4055,13 @@
+ Run specified binary instead of /init from the ramdisk,
+ used for early userspace startup. See initrd.
+
++ rdrand= [X86]
++ force - Override the decision by the kernel to hide the
++ advertisement of RDRAND support (this affects
++ certain AMD processors because of buggy BIOS
++ support, specifically around the suspend/resume
++ path).
++
+ rdt= [HW,X86,RDT]
+ Turn on/off individual RDT features. List is:
+ cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
+diff --git a/Documentation/devicetree/bindings/riscv/sifive.yaml b/Documentation/devicetree/bindings/riscv/sifive.yaml
+index 9d17dc2f3f84..3ab532713dc1 100644
+--- a/Documentation/devicetree/bindings/riscv/sifive.yaml
++++ b/Documentation/devicetree/bindings/riscv/sifive.yaml
+@@ -19,7 +19,7 @@ properties:
+ compatible:
+ items:
+ - enum:
+- - sifive,freedom-unleashed-a00
++ - sifive,hifive-unleashed-a00
+ - const: sifive,fu540-c000
+ - const: sifive,fu540
+ ...
+diff --git a/Makefile b/Makefile
+index 35fee16d5006..a3b26dcfc5c8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
+index d2806bcff8bb..07745ee022a1 100644
+--- a/arch/arm/kvm/coproc.c
++++ b/arch/arm/kvm/coproc.c
+@@ -651,13 +651,22 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ }
+
+ static void reset_coproc_regs(struct kvm_vcpu *vcpu,
+- const struct coproc_reg *table, size_t num)
++ const struct coproc_reg *table, size_t num,
++ unsigned long *bmap)
+ {
+ unsigned long i;
+
+ for (i = 0; i < num; i++)
+- if (table[i].reset)
++ if (table[i].reset) {
++ int reg = table[i].reg;
++
+ table[i].reset(vcpu, &table[i]);
++ if (reg > 0 && reg < NR_CP15_REGS) {
++ set_bit(reg, bmap);
++ if (table[i].is_64bit)
++ set_bit(reg + 1, bmap);
++ }
++ }
+ }
+
+ static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
+@@ -1432,17 +1441,15 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
+ {
+ size_t num;
+ const struct coproc_reg *table;
+-
+- /* Catch someone adding a register without putting in reset entry. */
+- memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
++ DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
+
+ /* Generic chip reset first (so target could override). */
+- reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
++ reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
+
+ table = get_target_table(vcpu->arch.target, &num);
+- reset_coproc_regs(vcpu, table, num);
++ reset_coproc_regs(vcpu, table, num, bmap);
+
+ for (num = 1; num < NR_CP15_REGS; num++)
+- WARN(vcpu_cp15(vcpu, num) == 0x42424242,
++ WARN(!test_bit(num, bmap),
+ "Didn't reset vcpu_cp15(vcpu, %zi)", num);
+ }
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index ce933f296049..5b7085ca213d 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -632,7 +632,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+ */
+ val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
+ | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
+- __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
++ __vcpu_sys_reg(vcpu, r->reg) = val;
+ }
+
+ static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
+@@ -981,13 +981,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
+ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
+ { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
+- trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
++ trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
+ { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
+- trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
++ trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
+ { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
+- trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
++ trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
+ { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
+- trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
++ trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
+
+ /* Macro to expand the PMEVCNTRn_EL0 register */
+ #define PMU_PMEVCNTR_EL0(n) \
+@@ -1540,7 +1540,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+ { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
+ { SYS_DESC(SYS_CTR_EL0), access_ctr },
+
+- { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
++ { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
+ { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
+ { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
+ { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
+@@ -2254,13 +2254,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
+ }
+
+ static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
+- const struct sys_reg_desc *table, size_t num)
++ const struct sys_reg_desc *table, size_t num,
++ unsigned long *bmap)
+ {
+ unsigned long i;
+
+ for (i = 0; i < num; i++)
+- if (table[i].reset)
++ if (table[i].reset) {
++ int reg = table[i].reg;
++
+ table[i].reset(vcpu, &table[i]);
++ if (reg > 0 && reg < NR_SYS_REGS)
++ set_bit(reg, bmap);
++ }
+ }
+
+ /**
+@@ -2774,18 +2780,16 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
+ {
+ size_t num;
+ const struct sys_reg_desc *table;
+-
+- /* Catch someone adding a register without putting in reset entry. */
+- memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
++ DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
+
+ /* Generic chip reset first (so target could override). */
+- reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
++ reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
+
+ table = get_target_table(vcpu->arch.target, true, &num);
+- reset_sys_reg_descs(vcpu, table, num);
++ reset_sys_reg_descs(vcpu, table, num, bmap);
+
+ for (num = 1; num < NR_SYS_REGS; num++) {
+- if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
++ if (WARN(!test_bit(num, bmap),
+ "Didn't reset __vcpu_sys_reg(%zi)\n", num))
+ break;
+ }
+diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
+index e0dd66881da6..f777e44653d5 100644
+--- a/arch/mips/kernel/cacheinfo.c
++++ b/arch/mips/kernel/cacheinfo.c
+@@ -69,6 +69,8 @@ static int __populate_cache_leaves(unsigned int cpu)
+ if (c->tcache.waysize)
+ populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
+
++ this_cpu_ci->cpu_map_populated = true;
++
+ return 0;
+ }
+
+diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
+index 5f209f111e59..df7ddd246eaa 100644
+--- a/arch/mips/kernel/i8253.c
++++ b/arch/mips/kernel/i8253.c
+@@ -32,7 +32,8 @@ void __init setup_pit_timer(void)
+
+ static int __init init_pit_clocksource(void)
+ {
+- if (num_possible_cpus() > 1) /* PIT does not scale! */
++ if (num_possible_cpus() > 1 || /* PIT does not scale! */
++ !clockevent_state_periodic(&i8253_clockevent))
+ return 0;
+
+ return clocksource_i8253_init();
+diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
+index 1ad4089dd110..d4d096f80f4b 100644
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -130,7 +130,7 @@ _GLOBAL_TOC(flush_dcache_range)
+ subf r8,r6,r4 /* compute length */
+ add r8,r8,r5 /* ensure we get enough */
+ lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of dcache block size */
+- srw. r8,r8,r9 /* compute line count */
++ srd. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ mtctr r8
+ 0: dcbst 0,r6
+@@ -148,7 +148,7 @@ _GLOBAL(flush_inval_dcache_range)
+ subf r8,r6,r4 /* compute length */
+ add r8,r8,r5 /* ensure we get enough */
+ lwz r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
+- srw. r8,r8,r9 /* compute line count */
++ srd. r8,r8,r9 /* compute line count */
+ beqlr /* nothing to do? */
+ sync
+ isync
+diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
+index 3c49bde8aa5e..b8aa6a9f937b 100644
+--- a/arch/s390/boot/ipl_parm.c
++++ b/arch/s390/boot/ipl_parm.c
+@@ -48,9 +48,7 @@ void store_ipl_parmblock(void)
+ {
+ int rc;
+
+- uv_set_shared(__pa(&ipl_block));
+ rc = __diag308(DIAG308_STORE, &ipl_block);
+- uv_remove_shared(__pa(&ipl_block));
+ if (rc == DIAG308_RC_OK &&
+ ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
+ ipl_block_valid = 1;
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index 2c0a515428d6..6837affc19e8 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -31,7 +31,6 @@
+ #include <asm/os_info.h>
+ #include <asm/sections.h>
+ #include <asm/boot_data.h>
+-#include <asm/uv.h>
+ #include "entry.h"
+
+ #define IPL_PARM_BLOCK_VERSION 0
+@@ -892,21 +891,15 @@ static void __reipl_run(void *unused)
+ {
+ switch (reipl_type) {
+ case IPL_TYPE_CCW:
+- uv_set_shared(__pa(reipl_block_ccw));
+ diag308(DIAG308_SET, reipl_block_ccw);
+- uv_remove_shared(__pa(reipl_block_ccw));
+ diag308(DIAG308_LOAD_CLEAR, NULL);
+ break;
+ case IPL_TYPE_FCP:
+- uv_set_shared(__pa(reipl_block_fcp));
+ diag308(DIAG308_SET, reipl_block_fcp);
+- uv_remove_shared(__pa(reipl_block_fcp));
+ diag308(DIAG308_LOAD_CLEAR, NULL);
+ break;
+ case IPL_TYPE_NSS:
+- uv_set_shared(__pa(reipl_block_nss));
+ diag308(DIAG308_SET, reipl_block_nss);
+- uv_remove_shared(__pa(reipl_block_nss));
+ diag308(DIAG308_LOAD_CLEAR, NULL);
+ break;
+ case IPL_TYPE_UNKNOWN:
+@@ -1176,9 +1169,7 @@ static struct kset *dump_kset;
+
+ static void diag308_dump(void *dump_block)
+ {
+- uv_set_shared(__pa(dump_block));
+ diag308(DIAG308_SET, dump_block);
+- uv_remove_shared(__pa(dump_block));
+ while (1) {
+ if (diag308(DIAG308_LOAD_NORMAL_DUMP, NULL) != 0x302)
+ break;
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 49d55327de0b..7e0eb4020917 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -32,10 +32,9 @@ PHDRS {
+ SECTIONS
+ {
+ . = 0x100000;
+- _stext = .; /* Start of text section */
+ .text : {
+- /* Text and read-only data */
+- _text = .;
++ _stext = .; /* Start of text section */
++ _text = .; /* Text and read-only data */
+ HEAD_TEXT
+ TEXT_TEXT
+ SCHED_TEXT
+@@ -47,11 +46,10 @@ SECTIONS
+ *(.text.*_indirect_*)
+ *(.fixup)
+ *(.gnu.warning)
++ . = ALIGN(PAGE_SIZE);
++ _etext = .; /* End of text section */
+ } :text = 0x0700
+
+- . = ALIGN(PAGE_SIZE);
+- _etext = .; /* End of text section */
+-
+ NOTES :text :note
+
+ .dummy : { *(.dummy) } :data
+diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
+index 3b93ba0b5d8d..5d67b81c704a 100644
+--- a/arch/s390/mm/dump_pagetables.c
++++ b/arch/s390/mm/dump_pagetables.c
+@@ -161,9 +161,9 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
+ }
+ #endif
+
+- for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
++ pmd = pmd_offset(pud, addr);
++ for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++, pmd++) {
+ st->current_address = addr;
+- pmd = pmd_offset(pud, addr);
+ if (!pmd_none(*pmd)) {
+ if (pmd_large(*pmd)) {
+ prot = pmd_val(*pmd) &
+@@ -192,9 +192,9 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
+ }
+ #endif
+
+- for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
++ pud = pud_offset(p4d, addr);
++ for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++, pud++) {
+ st->current_address = addr;
+- pud = pud_offset(p4d, addr);
+ if (!pud_none(*pud))
+ if (pud_large(*pud)) {
+ prot = pud_val(*pud) &
+@@ -222,9 +222,9 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
+ }
+ #endif
+
+- for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
++ p4d = p4d_offset(pgd, addr);
++ for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++, p4d++) {
+ st->current_address = addr;
+- p4d = p4d_offset(pgd, addr);
+ if (!p4d_none(*p4d))
+ walk_pud_level(m, st, p4d, addr);
+ else
+diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
+index f6f6ef436599..b16a6c7da6eb 100644
+--- a/arch/x86/include/asm/bootparam_utils.h
++++ b/arch/x86/include/asm/bootparam_utils.h
+@@ -18,6 +18,20 @@
+ * Note: efi_info is commonly left uninitialized, but that field has a
+ * private magic, so it is better to leave it unchanged.
+ */
++
++#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
++
++#define BOOT_PARAM_PRESERVE(struct_member) \
++ { \
++ .start = offsetof(struct boot_params, struct_member), \
++ .len = sizeof_mbr(struct boot_params, struct_member), \
++ }
++
++struct boot_params_to_save {
++ unsigned int start;
++ unsigned int len;
++};
++
+ static void sanitize_boot_params(struct boot_params *boot_params)
+ {
+ /*
+@@ -35,21 +49,40 @@ static void sanitize_boot_params(struct boot_params *boot_params)
+ * problems again.
+ */
+ if (boot_params->sentinel) {
+- /* fields in boot_params are left uninitialized, clear them */
+- boot_params->acpi_rsdp_addr = 0;
+- memset(&boot_params->ext_ramdisk_image, 0,
+- (char *)&boot_params->efi_info -
+- (char *)&boot_params->ext_ramdisk_image);
+- memset(&boot_params->kbd_status, 0,
+- (char *)&boot_params->hdr -
+- (char *)&boot_params->kbd_status);
+- memset(&boot_params->_pad7[0], 0,
+- (char *)&boot_params->edd_mbr_sig_buffer[0] -
+- (char *)&boot_params->_pad7[0]);
+- memset(&boot_params->_pad8[0], 0,
+- (char *)&boot_params->eddbuf[0] -
+- (char *)&boot_params->_pad8[0]);
+- memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
++ static struct boot_params scratch;
++ char *bp_base = (char *)boot_params;
++ char *save_base = (char *)&scratch;
++ int i;
++
++ const struct boot_params_to_save to_save[] = {
++ BOOT_PARAM_PRESERVE(screen_info),
++ BOOT_PARAM_PRESERVE(apm_bios_info),
++ BOOT_PARAM_PRESERVE(tboot_addr),
++ BOOT_PARAM_PRESERVE(ist_info),
++ BOOT_PARAM_PRESERVE(hd0_info),
++ BOOT_PARAM_PRESERVE(hd1_info),
++ BOOT_PARAM_PRESERVE(sys_desc_table),
++ BOOT_PARAM_PRESERVE(olpc_ofw_header),
++ BOOT_PARAM_PRESERVE(efi_info),
++ BOOT_PARAM_PRESERVE(alt_mem_k),
++ BOOT_PARAM_PRESERVE(scratch),
++ BOOT_PARAM_PRESERVE(e820_entries),
++ BOOT_PARAM_PRESERVE(eddbuf_entries),
++ BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
++ BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
++ BOOT_PARAM_PRESERVE(hdr),
++ BOOT_PARAM_PRESERVE(e820_table),
++ BOOT_PARAM_PRESERVE(eddbuf),
++ };
++
++ memset(&scratch, 0, sizeof(scratch));
++
++ for (i = 0; i < ARRAY_SIZE(to_save); i++) {
++ memcpy(save_base + to_save[i].start,
++ bp_base + to_save[i].start, to_save[i].len);
++ }
++
++ memcpy(boot_params, save_base, sizeof(*boot_params));
+ }
+ }
+
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index 979ef971cc78..41e41ec5d646 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -372,6 +372,7 @@
+ #define MSR_AMD64_PATCH_LEVEL 0x0000008b
+ #define MSR_AMD64_TSC_RATIO 0xc0000104
+ #define MSR_AMD64_NB_CFG 0xc001001f
++#define MSR_AMD64_CPUID_FN_1 0xc0011004
+ #define MSR_AMD64_PATCH_LOADER 0xc0010020
+ #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
+ #define MSR_AMD64_OSVW_STATUS 0xc0010141
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index 109f974f9835..80bc209c0708 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -192,7 +192,7 @@
+ " lfence;\n" \
+ " jmp 902b;\n" \
+ " .align 16\n" \
+- "903: addl $4, %%esp;\n" \
++ "903: lea 4(%%esp), %%esp;\n" \
+ " pushl %[thunk_target];\n" \
+ " ret;\n" \
+ " .align 16\n" \
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 530cf1fd68a2..2f067b443326 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -722,7 +722,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
+ static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
+
+ /*
+- * Temporary interrupt handler.
++ * Temporary interrupt handler and polled calibration function.
+ */
+ static void __init lapic_cal_handler(struct clock_event_device *dev)
+ {
+@@ -824,7 +824,8 @@ static int __init lapic_init_clockevent(void)
+ static int __init calibrate_APIC_clock(void)
+ {
+ struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
+- void (*real_handler)(struct clock_event_device *dev);
++ u64 tsc_perj = 0, tsc_start = 0;
++ unsigned long jif_start;
+ unsigned long deltaj;
+ long delta, deltatsc;
+ int pm_referenced = 0;
+@@ -851,28 +852,64 @@ static int __init calibrate_APIC_clock(void)
+ apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
+ "calibrating APIC timer ...\n");
+
++ /*
++ * There are platforms w/o global clockevent devices. Instead of
++ * making the calibration conditional on that, use a polling based
++ * approach everywhere.
++ */
+ local_irq_disable();
+
+- /* Replace the global interrupt handler */
+- real_handler = global_clock_event->event_handler;
+- global_clock_event->event_handler = lapic_cal_handler;
+-
+ /*
+ * Setup the APIC counter to maximum. There is no way the lapic
+ * can underflow in the 100ms detection time frame
+ */
+ __setup_APIC_LVTT(0xffffffff, 0, 0);
+
+- /* Let the interrupts run */
++ /*
++ * Methods to terminate the calibration loop:
++ * 1) Global clockevent if available (jiffies)
++ * 2) TSC if available and frequency is known
++ */
++ jif_start = READ_ONCE(jiffies);
++
++ if (tsc_khz) {
++ tsc_start = rdtsc();
++ tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
++ }
++
++ /*
++ * Enable interrupts so the tick can fire, if a global
++ * clockevent device is available
++ */
+ local_irq_enable();
+
+- while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
+- cpu_relax();
++ while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
++ /* Wait for a tick to elapse */
++ while (1) {
++ if (tsc_khz) {
++ u64 tsc_now = rdtsc();
++ if ((tsc_now - tsc_start) >= tsc_perj) {
++ tsc_start += tsc_perj;
++ break;
++ }
++ } else {
++ unsigned long jif_now = READ_ONCE(jiffies);
+
+- local_irq_disable();
++ if (time_after(jif_now, jif_start)) {
++ jif_start = jif_now;
++ break;
++ }
++ }
++ cpu_relax();
++ }
+
+- /* Restore the real event handler */
+- global_clock_event->event_handler = real_handler;
++ /* Invoke the calibration routine */
++ local_irq_disable();
++ lapic_cal_handler(NULL);
++ local_irq_enable();
++ }
++
++ local_irq_disable();
+
+ /* Build delta t1-t2 as apic timer counts down */
+ delta = lapic_cal_t1 - lapic_cal_t2;
+@@ -916,10 +953,11 @@ static int __init calibrate_APIC_clock(void)
+ levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
+
+ /*
+- * PM timer calibration failed or not turned on
+- * so lets try APIC timer based calibration
++ * PM timer calibration failed or not turned on so lets try APIC
++ * timer based calibration, if a global clockevent device is
++ * available.
+ */
+- if (!pm_referenced) {
++ if (!pm_referenced && global_clock_event) {
+ apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
+
+ /*
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 8d4e50428b68..68c363c341bf 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -804,6 +804,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
+ msr_set_bit(MSR_AMD64_DE_CFG, 31);
+ }
+
++static bool rdrand_force;
++
++static int __init rdrand_cmdline(char *str)
++{
++ if (!str)
++ return -EINVAL;
++
++ if (!strcmp(str, "force"))
++ rdrand_force = true;
++ else
++ return -EINVAL;
++
++ return 0;
++}
++early_param("rdrand", rdrand_cmdline);
++
++static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
++{
++ /*
++ * Saving of the MSR used to hide the RDRAND support during
++ * suspend/resume is done by arch/x86/power/cpu.c, which is
++ * dependent on CONFIG_PM_SLEEP.
++ */
++ if (!IS_ENABLED(CONFIG_PM_SLEEP))
++ return;
++
++ /*
++ * The nordrand option can clear X86_FEATURE_RDRAND, so check for
++ * RDRAND support using the CPUID function directly.
++ */
++ if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
++ return;
++
++ msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
++
++ /*
++ * Verify that the CPUID change has occurred in case the kernel is
++ * running virtualized and the hypervisor doesn't support the MSR.
++ */
++ if (cpuid_ecx(1) & BIT(30)) {
++ pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
++ return;
++ }
++
++ clear_cpu_cap(c, X86_FEATURE_RDRAND);
++ pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
++}
++
++static void init_amd_jg(struct cpuinfo_x86 *c)
++{
++ /*
++ * Some BIOS implementations do not restore proper RDRAND support
++ * across suspend and resume. Check on whether to hide the RDRAND
++ * instruction support via CPUID.
++ */
++ clear_rdrand_cpuid_bit(c);
++}
++
+ static void init_amd_bd(struct cpuinfo_x86 *c)
+ {
+ u64 value;
+@@ -818,6 +876,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
+ wrmsrl_safe(MSR_F15H_IC_CFG, value);
+ }
+ }
++
++ /*
++ * Some BIOS implementations do not restore proper RDRAND support
++ * across suspend and resume. Check on whether to hide the RDRAND
++ * instruction support via CPUID.
++ */
++ clear_rdrand_cpuid_bit(c);
+ }
+
+ static void init_amd_zn(struct cpuinfo_x86 *c)
+@@ -860,6 +925,7 @@ static void init_amd(struct cpuinfo_x86 *c)
+ case 0x10: init_amd_gh(c); break;
+ case 0x12: init_amd_ln(c); break;
+ case 0x15: init_amd_bd(c); break;
++ case 0x16: init_amd_jg(c); break;
+ case 0x17: init_amd_zn(c); break;
+ }
+
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 8d95c81b2c82..01f04db1fa61 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -5649,38 +5649,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node)
+ {
+- struct kvm_mmu_page *sp;
+- LIST_HEAD(invalid_list);
+- unsigned long i;
+- bool flush;
+- gfn_t gfn;
+-
+- spin_lock(&kvm->mmu_lock);
+-
+- if (list_empty(&kvm->arch.active_mmu_pages))
+- goto out_unlock;
+-
+- flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
+-
+- for (i = 0; i < slot->npages; i++) {
+- gfn = slot->base_gfn + i;
+-
+- for_each_valid_sp(kvm, sp, gfn) {
+- if (sp->gfn != gfn)
+- continue;
+-
+- kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+- }
+- if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+- kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
+- flush = false;
+- cond_resched_lock(&kvm->mmu_lock);
+- }
+- }
+- kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
+-
+-out_unlock:
+- spin_unlock(&kvm->mmu_lock);
++ kvm_mmu_zap_all(kvm);
+ }
+
+ void kvm_mmu_init_vm(struct kvm *kvm)
+diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
+index 04967cdce5d1..7ad68917a51e 100644
+--- a/arch/x86/lib/cpu.c
++++ b/arch/x86/lib/cpu.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ #include <linux/types.h>
+ #include <linux/export.h>
++#include <asm/cpu.h>
+
+ unsigned int x86_family(unsigned int sig)
+ {
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index 24b079e94bc2..c9ef6a7a4a1a 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -12,6 +12,7 @@
+ #include <linux/smp.h>
+ #include <linux/perf_event.h>
+ #include <linux/tboot.h>
++#include <linux/dmi.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/proto.h>
+@@ -23,7 +24,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/cpu.h>
+ #include <asm/mmu_context.h>
+-#include <linux/dmi.h>
++#include <asm/cpu_device_id.h>
+
+ #ifdef CONFIG_X86_32
+ __visible unsigned long saved_context_ebx;
+@@ -397,15 +398,14 @@ static int __init bsp_pm_check_init(void)
+
+ core_initcall(bsp_pm_check_init);
+
+-static int msr_init_context(const u32 *msr_id, const int total_num)
++static int msr_build_context(const u32 *msr_id, const int num)
+ {
+- int i = 0;
++ struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
+ struct saved_msr *msr_array;
++ int total_num;
++ int i, j;
+
+- if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
+- pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
+- return -EINVAL;
+- }
++ total_num = saved_msrs->num + num;
+
+ msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
+ if (!msr_array) {
+@@ -413,19 +413,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
+ return -ENOMEM;
+ }
+
+- for (i = 0; i < total_num; i++) {
+- msr_array[i].info.msr_no = msr_id[i];
++ if (saved_msrs->array) {
++ /*
++ * Multiple callbacks can invoke this function, so copy any
++ * MSR save requests from previous invocations.
++ */
++ memcpy(msr_array, saved_msrs->array,
++ sizeof(struct saved_msr) * saved_msrs->num);
++
++ kfree(saved_msrs->array);
++ }
++
++ for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
++ msr_array[i].info.msr_no = msr_id[j];
+ msr_array[i].valid = false;
+ msr_array[i].info.reg.q = 0;
+ }
+- saved_context.saved_msrs.num = total_num;
+- saved_context.saved_msrs.array = msr_array;
++ saved_msrs->num = total_num;
++ saved_msrs->array = msr_array;
+
+ return 0;
+ }
+
+ /*
+- * The following section is a quirk framework for problematic BIOSen:
++ * The following sections are a quirk framework for problematic BIOSen:
+ * Sometimes MSRs are modified by the BIOSen after suspended to
+ * RAM, this might cause unexpected behavior after wakeup.
+ * Thus we save/restore these specified MSRs across suspend/resume
+@@ -440,7 +451,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
+ u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
+
+ pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
+- return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
++ return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
+ }
+
+ static const struct dmi_system_id msr_save_dmi_table[] = {
+@@ -455,9 +466,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
+ {}
+ };
+
++static int msr_save_cpuid_features(const struct x86_cpu_id *c)
++{
++ u32 cpuid_msr_id[] = {
++ MSR_AMD64_CPUID_FN_1,
++ };
++
++ pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
++ c->family);
++
++ return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
++}
++
++static const struct x86_cpu_id msr_save_cpu_table[] = {
++ {
++ .vendor = X86_VENDOR_AMD,
++ .family = 0x15,
++ .model = X86_MODEL_ANY,
++ .feature = X86_FEATURE_ANY,
++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
++ },
++ {
++ .vendor = X86_VENDOR_AMD,
++ .family = 0x16,
++ .model = X86_MODEL_ANY,
++ .feature = X86_FEATURE_ANY,
++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
++ },
++ {}
++};
++
++typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
++static int pm_cpu_check(const struct x86_cpu_id *c)
++{
++ const struct x86_cpu_id *m;
++ int ret = 0;
++
++ m = x86_match_cpu(msr_save_cpu_table);
++ if (m) {
++ pm_cpu_match_t fn;
++
++ fn = (pm_cpu_match_t)m->driver_data;
++ ret = fn(m);
++ }
++
++ return ret;
++}
++
+ static int pm_check_save_msr(void)
+ {
+ dmi_check_system(msr_save_dmi_table);
++ pm_cpu_check(msr_save_cpu_table);
++
+ return 0;
+ }
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 404e776aa36d..b528710364e9 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2085,9 +2085,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
+ blk_rq_pos(container_of(rb_prev(&req->rb_node),
+ struct request, rb_node))) {
+ struct bfq_queue *bfqq = bfq_init_rq(req);
+- struct bfq_data *bfqd = bfqq->bfqd;
++ struct bfq_data *bfqd;
+ struct request *prev, *next_rq;
+
++ if (!bfqq)
++ return;
++
++ bfqd = bfqq->bfqd;
++
+ /* Reposition request in its sort_list */
+ elv_rb_del(&bfqq->sort_list, req);
+ elv_rb_add(&bfqq->sort_list, req);
+@@ -2134,6 +2139,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+ struct bfq_queue *bfqq = bfq_init_rq(rq),
+ *next_bfqq = bfq_init_rq(next);
+
++ if (!bfqq)
++ return;
++
+ /*
+ * If next and rq belong to the same bfq_queue and next is older
+ * than rq, then reposition rq in the fifo (by substituting next
+@@ -5061,12 +5069,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+
+ spin_lock_irq(&bfqd->lock);
+ bfqq = bfq_init_rq(rq);
+- if (at_head || blk_rq_is_passthrough(rq)) {
++ if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
+ if (at_head)
+ list_add(&rq->queuelist, &bfqd->dispatch);
+ else
+ list_add_tail(&rq->queuelist, &bfqd->dispatch);
+- } else { /* bfqq is assumed to be non null here */
++ } else {
+ idle_timer_disabled = __bfq_insert_request(bfqd, rq);
+ /*
+ * Update bfqq, because, if a queue merge has occurred
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 391ac0503dc0..76d0f9de767b 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1786,6 +1786,21 @@ nothing_to_do:
+ return 1;
+ }
+
++static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
++{
++ struct request *rq = scmd->request;
++ u32 req_blocks;
++
++ if (!blk_rq_is_passthrough(rq))
++ return true;
++
++ req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
++ if (n_blocks > req_blocks)
++ return false;
++
++ return true;
++}
++
+ /**
+ * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
+ * @qc: Storage for translated ATA taskfile
+@@ -1830,6 +1845,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
+ scsi_10_lba_len(cdb, &block, &n_block);
+ if (cdb[1] & (1 << 3))
+ tf_flags |= ATA_TFLAG_FUA;
++ if (!ata_check_nblocks(scmd, n_block))
++ goto invalid_fld;
+ break;
+ case READ_6:
+ case WRITE_6:
+@@ -1844,6 +1861,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
+ */
+ if (!n_block)
+ n_block = 256;
++ if (!ata_check_nblocks(scmd, n_block))
++ goto invalid_fld;
+ break;
+ case READ_16:
+ case WRITE_16:
+@@ -1854,6 +1873,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
+ scsi_16_lba_len(cdb, &block, &n_block);
+ if (cdb[1] & (1 << 3))
+ tf_flags |= ATA_TFLAG_FUA;
++ if (!ata_check_nblocks(scmd, n_block))
++ goto invalid_fld;
+ break;
+ default:
+ DPRINTK("no-byte command\n");
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index 10aa27882142..4f115adb4ee8 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -658,6 +658,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
+ unsigned int offset;
+ unsigned char *buf;
+
++ if (!qc->cursg) {
++ qc->curbytes = qc->nbytes;
++ return;
++ }
+ if (qc->curbytes == qc->nbytes - qc->sect_size)
+ ap->hsm_task_state = HSM_ST_LAST;
+
+@@ -683,6 +687,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
+
+ if (qc->cursg_ofs == qc->cursg->length) {
+ qc->cursg = sg_next(qc->cursg);
++ if (!qc->cursg)
++ ap->hsm_task_state = HSM_ST_LAST;
+ qc->cursg_ofs = 0;
+ }
+ }
+diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
+index 7c37f2ff09e4..deae466395de 100644
+--- a/drivers/ata/pata_rb532_cf.c
++++ b/drivers/ata/pata_rb532_cf.c
+@@ -158,7 +158,6 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
+ static int rb532_pata_driver_remove(struct platform_device *pdev)
+ {
+ struct ata_host *ah = platform_get_drvdata(pdev);
+- struct rb532_cf_info *info = ah->private_data;
+
+ ata_host_detach(ah);
+
+diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
+index 5b49f1b33ebe..e2ea2356da06 100644
+--- a/drivers/block/aoe/aoedev.c
++++ b/drivers/block/aoe/aoedev.c
+@@ -323,10 +323,14 @@ flush(const char __user *str, size_t cnt, int exiting)
+ }
+
+ flush_scheduled_work();
+- /* pass one: without sleeping, do aoedev_downdev */
++ /* pass one: do aoedev_downdev, which might sleep */
++restart1:
+ spin_lock_irqsave(&devlist_lock, flags);
+ for (d = devlist; d; d = d->next) {
+ spin_lock(&d->lock);
++ if (d->flags & DEVFL_TKILL)
++ goto cont;
++
+ if (exiting) {
+ /* unconditionally take each device down */
+ } else if (specified) {
+@@ -338,8 +342,11 @@ flush(const char __user *str, size_t cnt, int exiting)
+ || d->ref)
+ goto cont;
+
++ spin_unlock(&d->lock);
++ spin_unlock_irqrestore(&devlist_lock, flags);
+ aoedev_downdev(d);
+ d->flags |= DEVFL_TKILL;
++ goto restart1;
+ cont:
+ spin_unlock(&d->lock);
+ }
+@@ -348,7 +355,7 @@ cont:
+ /* pass two: call freedev, which might sleep,
+ * for aoedevs marked with DEVFL_TKILL
+ */
+-restart:
++restart2:
+ spin_lock_irqsave(&devlist_lock, flags);
+ for (d = devlist; d; d = d->next) {
+ spin_lock(&d->lock);
+@@ -357,7 +364,7 @@ restart:
+ spin_unlock(&d->lock);
+ spin_unlock_irqrestore(&devlist_lock, flags);
+ freedev(d);
+- goto restart;
++ goto restart2;
+ }
+ spin_unlock(&d->lock);
+ }
+diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
+index 5c50e723ecae..1a191eeeebba 100644
+--- a/drivers/clk/socfpga/clk-periph-s10.c
++++ b/drivers/clk/socfpga/clk-periph-s10.c
+@@ -38,7 +38,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
+ if (socfpgaclk->fixed_div) {
+ div = socfpgaclk->fixed_div;
+ } else {
+- if (!socfpgaclk->bypass_reg)
++ if (socfpgaclk->hw.reg)
+ div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
+ }
+
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 4f333d6f2e23..7f9f75201138 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -1091,9 +1091,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+- lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
++ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
++ GPIOLINE_FLAG_IS_OUT);
+ if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+- lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
++ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
++ GPIOLINE_FLAG_IS_OUT);
+
+ if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
+ return -EFAULT;
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+index 02955e6e9dd9..c21ef99cc590 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+@@ -1317,6 +1317,39 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
+ return 0;
+ }
+
++static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev)
++{
++ int r;
++
++ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
++ if (unlikely(r != 0))
++ return r;
++
++ r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
++ AMDGPU_GEM_DOMAIN_VRAM);
++ if (!r)
++ adev->gfx.rlc.clear_state_gpu_addr =
++ amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
++
++ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
++
++ return r;
++}
++
++static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev)
++{
++ int r;
++
++ if (!adev->gfx.rlc.clear_state_obj)
++ return;
++
++ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
++ if (likely(r == 0)) {
++ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
++ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
++ }
++}
++
+ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
+ {
+ amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
+@@ -4777,6 +4810,10 @@ static int gfx_v8_0_hw_init(void *handle)
+ gfx_v8_0_init_golden_registers(adev);
+ gfx_v8_0_constants_init(adev);
+
++ r = gfx_v8_0_csb_vram_pin(adev);
++ if (r)
++ return r;
++
+ r = adev->gfx.rlc.funcs->resume(adev);
+ if (r)
+ return r;
+@@ -4893,6 +4930,9 @@ static int gfx_v8_0_hw_fini(void *handle)
+ else
+ pr_err("rlc is busy, skip halt rlc\n");
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
++
++ gfx_v8_0_csb_vram_unpin(adev);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 2f7f0a2e4a6c..0332177c0302 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -596,6 +596,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
+ (adev->gfx.rlc_feature_version < 1) ||
+ !adev->gfx.rlc.is_rlc_v2_1)
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
++ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
++ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
++ AMD_PG_SUPPORT_CP |
++ AMD_PG_SUPPORT_RLC_SMU_HS;
+ break;
+ default:
+ break;
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index b7e594c2bfb4..84c34712e39e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -949,11 +949,6 @@ static int soc15_common_early_init(void *handle)
+
+ adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
+ }
+-
+- if (adev->pm.pp_feature & PP_GFXOFF_MASK)
+- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+- AMD_PG_SUPPORT_CP |
+- AMD_PG_SUPPORT_RLC_SMU_HS;
+ break;
+ default:
+ /* FIXME: not supported yet */
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+index b4e7404fe660..a11637b0f6cc 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ u8 *ptr = msg->buf;
+
+ while (remaining) {
+- u8 cnt = (remaining > 16) ? 16 : remaining;
+- u8 cmd;
++ u8 cnt, retries, cmd;
+
+ if (msg->flags & I2C_M_RD)
+ cmd = 1;
+@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ if (mcnt || remaining > 16)
+ cmd |= 4; /* MOT */
+
+- ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
+- if (ret < 0) {
+- nvkm_i2c_aux_release(aux);
+- return ret;
++ for (retries = 0, cnt = 0;
++ retries < 32 && !cnt;
++ retries++) {
++ cnt = min_t(u8, remaining, 16);
++ ret = aux->func->xfer(aux, true, cmd,
++ msg->addr, ptr, &cnt);
++ if (ret < 0)
++ goto out;
++ }
++ if (!cnt) {
++ AUX_TRACE(aux, "no data after 32 retries");
++ ret = -EIO;
++ goto out;
+ }
+
+ ptr += cnt;
+@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+ msg++;
+ }
+
++ ret = num;
++out:
+ nvkm_i2c_aux_release(aux);
+- return num;
++ return ret;
+ }
+
+ static u32
+diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+index 95e5c517a15f..9aae3d8e99ef 100644
+--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
++++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+@@ -432,7 +432,7 @@ static int rockchip_dp_resume(struct device *dev)
+
+ static const struct dev_pm_ops rockchip_dp_pm_ops = {
+ #ifdef CONFIG_PM_SLEEP
+- .suspend = rockchip_dp_suspend,
++ .suspend_late = rockchip_dp_suspend,
+ .resume_early = rockchip_dp_resume,
+ #endif
+ };
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+index e4e09d47c5c0..59e9d05ab928 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+@@ -389,8 +389,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
+ break;
+ }
+
+- if (retries == RETRIES)
++ if (retries == RETRIES) {
++ kfree(reply);
+ return -EINVAL;
++ }
+
+ *msg_len = reply_len;
+ *msg = reply;
+diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
+index 98bf694626f7..3a8c4a5971f7 100644
+--- a/drivers/hid/hid-a4tech.c
++++ b/drivers/hid/hid-a4tech.c
+@@ -23,12 +23,36 @@
+ #define A4_2WHEEL_MOUSE_HACK_7 0x01
+ #define A4_2WHEEL_MOUSE_HACK_B8 0x02
+
++#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8)
++
+ struct a4tech_sc {
+ unsigned long quirks;
+ unsigned int hw_wheel;
+ __s32 delayed_value;
+ };
+
++static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
++ struct hid_field *field, struct hid_usage *usage,
++ unsigned long **bit, int *max)
++{
++ struct a4tech_sc *a4 = hid_get_drvdata(hdev);
++
++ if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
++ usage->hid == A4_WHEEL_ORIENTATION) {
++ /*
++ * We do not want to have this usage mapped to anything as it's
++ * nonstandard and doesn't really behave like an HID report.
++ * It's only selecting the orientation (vertical/horizontal) of
++ * the previous mouse wheel report. The input_events will be
++ * generated once both reports are recorded in a4_event().
++ */
++ return -1;
++ }
++
++ return 0;
++
++}
++
+ static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+@@ -52,8 +76,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
+ struct a4tech_sc *a4 = hid_get_drvdata(hdev);
+ struct input_dev *input;
+
+- if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
+- !usage->type)
++ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
+ return 0;
+
+ input = field->hidinput->input;
+@@ -64,7 +87,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
+ return 1;
+ }
+
+- if (usage->hid == 0x000100b8) {
++ if (usage->hid == A4_WHEEL_ORIENTATION) {
+ input_event(input, EV_REL, value ? REL_HWHEEL :
+ REL_WHEEL, a4->delayed_value);
+ input_event(input, EV_REL, value ? REL_HWHEEL_HI_RES :
+@@ -131,6 +154,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
+ static struct hid_driver a4_driver = {
+ .name = "a4tech",
+ .id_table = a4_devices,
++ .input_mapping = a4_input_mapping,
+ .input_mapped = a4_input_mapped,
+ .event = a4_event,
+ .probe = a4_probe,
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 34a812025b94..76aa474e92c1 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -990,6 +990,7 @@
+ #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
+ #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
+ #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
++#define USB_DEVICE_ID_SAITEK_X52 0x075c
+
+ #define USB_VENDOR_ID_SAMSUNG 0x0419
+ #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index cf05816a601f..34e2b3f9d540 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -3749,15 +3749,45 @@ static const struct hid_device_id hidpp_devices[] = {
+
+ { L27MHZ_DEVICE(HID_ANY_ID) },
+
+- { /* Logitech G403 Gaming Mouse over USB */
++ { /* Logitech G203/Prodigy Gaming Mouse */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC084) },
++ { /* Logitech G302 Gaming Mouse */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07F) },
++ { /* Logitech G303 Gaming Mouse */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC080) },
++ { /* Logitech G400 Gaming Mouse */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07E) },
++ { /* Logitech G403 Wireless Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
++ { /* Logitech G403 Gaming Mouse */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC083) },
++ { /* Logitech G403 Hero Gaming Mouse over USB */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08F) },
++ { /* Logitech G502 Proteus Core Gaming Mouse */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07D) },
++ { /* Logitech G502 Proteus Spectrum Gaming Mouse over USB */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC332) },
++ { /* Logitech G502 Hero Gaming Mouse over USB */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08B) },
+ { /* Logitech G700 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) },
++ { /* Logitech G700s Gaming Mouse over USB */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07C) },
++ { /* Logitech G703 Gaming Mouse over USB */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) },
++ { /* Logitech G703 Hero Gaming Mouse over USB */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC090) },
+ { /* Logitech G900 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC081) },
++ { /* Logitech G903 Gaming Mouse over USB */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) },
++ { /* Logitech G903 Hero Gaming Mouse over USB */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) },
+ { /* Logitech G920 Wheel over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
+ .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
++ { /* Logitech G Pro Gaming Mouse over USB */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
+
+ { /* MX5000 keyboard over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 5b669f7d653f..4fe2c3ab76f9 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -141,6 +141,7 @@ static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
++ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
+diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
+index e12f2588ddeb..bdfc5ff3b2c5 100644
+--- a/drivers/hid/hid-tmff.c
++++ b/drivers/hid/hid-tmff.c
+@@ -22,6 +22,8 @@
+
+ #include "hid-ids.h"
+
++#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320
++
+ static const signed short ff_rumble[] = {
+ FF_RUMBLE,
+ -1
+@@ -76,6 +78,7 @@ static int tmff_play(struct input_dev *dev, void *data,
+ struct hid_field *ff_field = tmff->ff_field;
+ int x, y;
+ int left, right; /* Rumbling */
++ int motor_swap;
+
+ switch (effect->type) {
+ case FF_CONSTANT:
+@@ -100,6 +103,13 @@ static int tmff_play(struct input_dev *dev, void *data,
+ ff_field->logical_minimum,
+ ff_field->logical_maximum);
+
++ /* 2-in-1 strong motor is left */
++ if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
++ motor_swap = left;
++ left = right;
++ right = motor_swap;
++ }
++
+ dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
+ ff_field->value[0] = left;
+ ff_field->value[1] = right;
+@@ -226,6 +236,8 @@ static const struct hid_device_id tm_devices[] = {
+ .driver_data = (unsigned long)ff_rumble },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
+ .driver_data = (unsigned long)ff_rumble },
++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */
++ .driver_data = (unsigned long)ff_rumble },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
+ .driver_data = (unsigned long)ff_rumble },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 926c597f5f46..53ed51adb8ac 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -846,6 +846,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
+ y >>= 1;
+ distance >>= 1;
+ }
++ if (features->type == INTUOSHT2)
++ distance = features->distance_max - distance;
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_DISTANCE, distance);
+@@ -1059,7 +1061,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
+ input_report_key(input, BTN_BASE2, (data[11] & 0x02));
+
+ if (data[12] & 0x80)
+- input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
++ input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
+ else
+ input_report_abs(input, ABS_WHEEL, 0);
+
+diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
+index 5f9505a087f6..23f358cb7f49 100644
+--- a/drivers/hv/channel.c
++++ b/drivers/hv/channel.c
+@@ -26,7 +26,7 @@
+
+ static unsigned long virt_to_hvpfn(void *addr)
+ {
+- unsigned long paddr;
++ phys_addr_t paddr;
+
+ if (is_vmalloc_addr(addr))
+ paddr = page_to_phys(vmalloc_to_page(addr)) +
+diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
+index fe7e7097e00a..7e9527ab6d64 100644
+--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
++++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
+@@ -2576,18 +2576,9 @@ void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
+ hfi1_kern_clear_hw_flow(priv->rcd, qp);
+ }
+
+-static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
+- struct hfi1_packet *packet, u8 rcv_type,
+- u8 opcode)
++static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
+ {
+ struct rvt_qp *qp = packet->qp;
+- struct hfi1_qp_priv *qpriv = qp->priv;
+- u32 ipsn;
+- struct ib_other_headers *ohdr = packet->ohdr;
+- struct rvt_ack_entry *e;
+- struct tid_rdma_request *req;
+- struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+- u32 i;
+
+ if (rcv_type >= RHF_RCV_TYPE_IB)
+ goto done;
+@@ -2604,41 +2595,9 @@ static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
+ if (rcv_type == RHF_RCV_TYPE_EAGER) {
+ hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
+ hfi1_schedule_send(qp);
+- goto done_unlock;
+- }
+-
+- /*
+- * For TID READ response, error out QP after freeing the tid
+- * resources.
+- */
+- if (opcode == TID_OP(READ_RESP)) {
+- ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
+- if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
+- cmp_psn(ipsn, qp->s_psn) < 0) {
+- hfi1_kern_read_tid_flow_free(qp);
+- spin_unlock(&qp->s_lock);
+- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+- goto done;
+- }
+- goto done_unlock;
+- }
+-
+- /*
+- * Error out the qp for TID RDMA WRITE
+- */
+- hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
+- for (i = 0; i < rvt_max_atomic(rdi); i++) {
+- e = &qp->s_ack_queue[i];
+- if (e->opcode == TID_OP(WRITE_REQ)) {
+- req = ack_to_tid_req(e);
+- hfi1_kern_exp_rcv_clear_all(req);
+- }
+ }
+- spin_unlock(&qp->s_lock);
+- rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
+- goto done;
+
+-done_unlock:
++ /* Since no payload is delivered, just drop the packet */
+ spin_unlock(&qp->s_lock);
+ done:
+ return true;
+@@ -2689,12 +2648,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
+ u32 fpsn;
+
+ lockdep_assert_held(&qp->r_lock);
++ spin_lock(&qp->s_lock);
+ /* If the psn is out of valid range, drop the packet */
+ if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
+ cmp_psn(ibpsn, qp->s_psn) > 0)
+- return ret;
++ goto s_unlock;
+
+- spin_lock(&qp->s_lock);
+ /*
+ * Note that NAKs implicitly ACK outstanding SEND and RDMA write
+ * requests and implicitly NAK RDMA read and atomic requests issued
+@@ -2742,9 +2701,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
+
+ wqe = do_rc_completion(qp, wqe, ibp);
+ if (qp->s_acked == qp->s_tail)
+- break;
++ goto s_unlock;
+ }
+
++ if (qp->s_acked == qp->s_tail)
++ goto s_unlock;
++
+ /* Handle the eflags for the request */
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
+ goto s_unlock;
+@@ -2924,7 +2886,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
+ if (lnh == HFI1_LRH_GRH)
+ goto r_unlock;
+
+- if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode))
++ if (tid_rdma_tid_err(packet, rcv_type))
+ goto r_unlock;
+ }
+
+@@ -2944,8 +2906,15 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
+ */
+ spin_lock(&qp->s_lock);
+ qpriv = qp->priv;
++ if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
++ qpriv->r_tid_tail == qpriv->r_tid_head)
++ goto unlock;
+ e = &qp->s_ack_queue[qpriv->r_tid_tail];
++ if (e->opcode != TID_OP(WRITE_REQ))
++ goto unlock;
+ req = ack_to_tid_req(e);
++ if (req->comp_seg == req->cur_seg)
++ goto unlock;
+ flow = &req->flows[req->clear_tail];
+ trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
+ trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
+@@ -4511,7 +4480,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
+ struct rvt_swqe *wqe;
+ struct tid_rdma_request *req;
+ struct tid_rdma_flow *flow;
+- u32 aeth, psn, req_psn, ack_psn, fspsn, resync_psn, ack_kpsn;
++ u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
+ unsigned long flags;
+ u16 fidx;
+
+@@ -4540,6 +4509,9 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
+ ack_kpsn--;
+ }
+
++ if (unlikely(qp->s_acked == qp->s_tail))
++ goto ack_op_err;
++
+ wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
+
+ if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
+@@ -4552,7 +4524,8 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
+ trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
+
+ /* Drop stale ACK/NAK */
+- if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0)
++ if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
++ cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
+ goto ack_op_err;
+
+ while (cmp_psn(ack_kpsn,
+@@ -4714,8 +4687,12 @@ done:
+ switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
+ IB_AETH_CREDIT_MASK) {
+ case 0: /* PSN sequence error */
++ if (!req->flows)
++ break;
+ flow = &req->flows[req->acked_tail];
+- fspsn = full_flow_psn(flow, flow->flow_state.spsn);
++ flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
++ if (cmp_psn(psn, flpsn) > 0)
++ break;
+ trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
+ flow);
+ req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
+diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
+index 0e224232f746..008a74a1ed44 100644
+--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
+@@ -1394,6 +1394,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
+ printk(KERN_DEBUG
+ "%s: %s: alloc urb for fifo %i failed",
+ hw->name, __func__, fifo->fifonum);
++ continue;
+ }
+ fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
+ fifo->iso[i].indx = i;
+@@ -1692,13 +1693,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
+ static int
+ setup_hfcsusb(struct hfcsusb *hw)
+ {
++ void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
+ u_char b;
++ int ret;
+
+ if (debug & DBG_HFC_CALL_TRACE)
+ printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+
++ if (!dmabuf)
++ return -ENOMEM;
++
++ ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
++
++ memcpy(&b, dmabuf, sizeof(u_char));
++ kfree(dmabuf);
++
+ /* check the chip id */
+- if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
++ if (ret != 1) {
+ printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
+ hw->name, __func__);
+ return 1;
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index b6b5acc92ca2..2a48ea3f1b30 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1599,7 +1599,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ unsigned long freed;
+
+ c = container_of(shrink, struct dm_bufio_client, shrinker);
+- if (!dm_bufio_trylock(c))
++ if (sc->gfp_mask & __GFP_FS)
++ dm_bufio_lock(c);
++ else if (!dm_bufio_trylock(c))
+ return SHRINK_STOP;
+
+ freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
+diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
+index 845f376a72d9..8288887b7f94 100644
+--- a/drivers/md/dm-dust.c
++++ b/drivers/md/dm-dust.c
+@@ -25,6 +25,7 @@ struct dust_device {
+ unsigned long long badblock_count;
+ spinlock_t dust_lock;
+ unsigned int blksz;
++ int sect_per_block_shift;
+ unsigned int sect_per_block;
+ sector_t start;
+ bool fail_read_on_bb:1;
+@@ -79,7 +80,7 @@ static int dust_remove_block(struct dust_device *dd, unsigned long long block)
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+- bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
++ bblock = dust_rb_search(&dd->badblocklist, block);
+
+ if (bblock == NULL) {
+ if (!dd->quiet_mode) {
+@@ -113,7 +114,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
+ }
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+- bblock->bb = block * dd->sect_per_block;
++ bblock->bb = block;
+ if (!dust_rb_insert(&dd->badblocklist, bblock)) {
+ if (!dd->quiet_mode) {
+ DMERR("%s: block %llu already in badblocklist",
+@@ -138,7 +139,7 @@ static int dust_query_block(struct dust_device *dd, unsigned long long block)
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+- bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
++ bblock = dust_rb_search(&dd->badblocklist, block);
+ if (bblock != NULL)
+ DMINFO("%s: block %llu found in badblocklist", __func__, block);
+ else
+@@ -165,6 +166,7 @@ static int dust_map_read(struct dust_device *dd, sector_t thisblock,
+ int ret = DM_MAPIO_REMAPPED;
+
+ if (fail_read_on_bb) {
++ thisblock >>= dd->sect_per_block_shift;
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ ret = __dust_map_read(dd, thisblock);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+@@ -195,6 +197,7 @@ static int dust_map_write(struct dust_device *dd, sector_t thisblock,
+ unsigned long flags;
+
+ if (fail_read_on_bb) {
++ thisblock >>= dd->sect_per_block_shift;
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ __dust_map_write(dd, thisblock);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+@@ -331,6 +334,8 @@ static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ dd->blksz = blksz;
+ dd->start = tmp;
+
++ dd->sect_per_block_shift = __ffs(sect_per_block);
++
+ /*
+ * Whether to fail a read on a "bad" block.
+ * Defaults to false; enabled later by message.
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 44e76cda087a..29a5e5b4c63c 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -1940,7 +1940,22 @@ offload_to_thread:
+ queue_work(ic->wait_wq, &dio->work);
+ return;
+ }
++ if (journal_read_pos != NOT_FOUND)
++ dio->range.n_sectors = ic->sectors_per_block;
+ wait_and_add_new_range(ic, &dio->range);
++ /*
++ * wait_and_add_new_range drops the spinlock, so the journal
++ * may have been changed arbitrarily. We need to recheck.
++ * To simplify the code, we restrict I/O size to just one block.
++ */
++ if (journal_read_pos != NOT_FOUND) {
++ sector_t next_sector;
++ unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
++ if (unlikely(new_pos != journal_read_pos)) {
++ remove_range_unlocked(ic, &dio->range);
++ goto retry;
++ }
++ }
+ }
+ spin_unlock_irq(&ic->endio_wait.lock);
+
+diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
+index 671c24332802..3f694d9061ec 100644
+--- a/drivers/md/dm-kcopyd.c
++++ b/drivers/md/dm-kcopyd.c
+@@ -548,8 +548,10 @@ static int run_io_job(struct kcopyd_job *job)
+ * no point in continuing.
+ */
+ if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
+- job->master_job->write_err)
++ job->master_job->write_err) {
++ job->write_err = job->master_job->write_err;
+ return -EIO;
++ }
+
+ io_job_start(job->kc->throttle);
+
+@@ -601,6 +603,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
+ else
+ job->read_err = 1;
+ push(&kc->complete_jobs, job);
++ wake(kc);
+ break;
+ }
+
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 9fdef6897316..01aaac2c15be 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3194,7 +3194,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ */
+ r = rs_prepare_reshape(rs);
+ if (r)
+- return r;
++ goto bad;
+
+ /* Reshaping ain't recovery, so disable recovery */
+ rs_setup_recovery(rs, MaxSector);
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index ec8b27e20de3..c840c587083b 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -1334,7 +1334,7 @@ void dm_table_event(struct dm_table *t)
+ }
+ EXPORT_SYMBOL(dm_table_event);
+
+-sector_t dm_table_get_size(struct dm_table *t)
++inline sector_t dm_table_get_size(struct dm_table *t)
+ {
+ return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
+ }
+@@ -1359,6 +1359,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
+ unsigned int l, n = 0, k = 0;
+ sector_t *node;
+
++ if (unlikely(sector >= dm_table_get_size(t)))
++ return &t->targets[t->num_targets];
++
+ for (l = 0; l < t->depth; l++) {
+ n = get_child(n, k);
+ node = get_node(t, l, n);
+diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
+index 4cdde7a02e94..7e8d7fc99410 100644
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -401,15 +401,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
+ sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
+ struct bio *bio;
+
++ if (dmz_bdev_is_dying(zmd->dev))
++ return ERR_PTR(-EIO);
++
+ /* Get a new block and a BIO to read it */
+ mblk = dmz_alloc_mblock(zmd, mblk_no);
+ if (!mblk)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ bio = bio_alloc(GFP_NOIO, 1);
+ if (!bio) {
+ dmz_free_mblock(zmd, mblk);
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock(&zmd->mblk_lock);
+@@ -540,8 +543,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
+ if (!mblk) {
+ /* Cache miss: read the block from disk */
+ mblk = dmz_get_mblock_slow(zmd, mblk_no);
+- if (!mblk)
+- return ERR_PTR(-ENOMEM);
++ if (IS_ERR(mblk))
++ return mblk;
+ }
+
+ /* Wait for on-going read I/O and check for error */
+@@ -569,16 +572,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
+ /*
+ * Issue a metadata block write BIO.
+ */
+-static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
+- unsigned int set)
++static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
++ unsigned int set)
+ {
+ sector_t block = zmd->sb[set].block + mblk->no;
+ struct bio *bio;
+
++ if (dmz_bdev_is_dying(zmd->dev))
++ return -EIO;
++
+ bio = bio_alloc(GFP_NOIO, 1);
+ if (!bio) {
+ set_bit(DMZ_META_ERROR, &mblk->state);
+- return;
++ return -ENOMEM;
+ }
+
+ set_bit(DMZ_META_WRITING, &mblk->state);
+@@ -590,6 +596,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
+ bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
+ bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
+ submit_bio(bio);
++
++ return 0;
+ }
+
+ /*
+@@ -601,6 +609,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
+ struct bio *bio;
+ int ret;
+
++ if (dmz_bdev_is_dying(zmd->dev))
++ return -EIO;
++
+ bio = bio_alloc(GFP_NOIO, 1);
+ if (!bio)
+ return -ENOMEM;
+@@ -658,22 +669,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
+ {
+ struct dmz_mblock *mblk;
+ struct blk_plug plug;
+- int ret = 0;
++ int ret = 0, nr_mblks_submitted = 0;
+
+ /* Issue writes */
+ blk_start_plug(&plug);
+- list_for_each_entry(mblk, write_list, link)
+- dmz_write_mblock(zmd, mblk, set);
++ list_for_each_entry(mblk, write_list, link) {
++ ret = dmz_write_mblock(zmd, mblk, set);
++ if (ret)
++ break;
++ nr_mblks_submitted++;
++ }
+ blk_finish_plug(&plug);
+
+ /* Wait for completion */
+ list_for_each_entry(mblk, write_list, link) {
++ if (!nr_mblks_submitted)
++ break;
+ wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
+ TASK_UNINTERRUPTIBLE);
+ if (test_bit(DMZ_META_ERROR, &mblk->state)) {
+ clear_bit(DMZ_META_ERROR, &mblk->state);
+ ret = -EIO;
+ }
++ nr_mblks_submitted--;
+ }
+
+ /* Flush drive cache (this will also sync data) */
+@@ -735,6 +753,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
+ */
+ dmz_lock_flush(zmd);
+
++ if (dmz_bdev_is_dying(zmd->dev)) {
++ ret = -EIO;
++ goto out;
++ }
++
+ /* Get dirty blocks */
+ spin_lock(&zmd->mblk_lock);
+ list_splice_init(&zmd->mblk_dirty_list, &write_list);
+@@ -1534,7 +1557,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
+ struct dm_zone *zone;
+
+ if (list_empty(&zmd->map_rnd_list))
+- return NULL;
++ return ERR_PTR(-EBUSY);
+
+ list_for_each_entry(zone, &zmd->map_rnd_list, link) {
+ if (dmz_is_buf(zone))
+@@ -1545,7 +1568,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
+ return dzone;
+ }
+
+- return NULL;
++ return ERR_PTR(-EBUSY);
+ }
+
+ /*
+@@ -1556,7 +1579,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
+ struct dm_zone *zone;
+
+ if (list_empty(&zmd->map_seq_list))
+- return NULL;
++ return ERR_PTR(-EBUSY);
+
+ list_for_each_entry(zone, &zmd->map_seq_list, link) {
+ if (!zone->bzone)
+@@ -1565,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
+ return zone;
+ }
+
+- return NULL;
++ return ERR_PTR(-EBUSY);
+ }
+
+ /*
+@@ -1623,6 +1646,10 @@ again:
+ /* Alloate a random zone */
+ dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
+ if (!dzone) {
++ if (dmz_bdev_is_dying(zmd->dev)) {
++ dzone = ERR_PTR(-EIO);
++ goto out;
++ }
+ dmz_wait_for_free_zones(zmd);
+ goto again;
+ }
+@@ -1720,6 +1747,10 @@ again:
+ /* Alloate a random zone */
+ bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
+ if (!bzone) {
++ if (dmz_bdev_is_dying(zmd->dev)) {
++ bzone = ERR_PTR(-EIO);
++ goto out;
++ }
+ dmz_wait_for_free_zones(zmd);
+ goto again;
+ }
+diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
+index edf4b95eb075..9470b8f77a33 100644
+--- a/drivers/md/dm-zoned-reclaim.c
++++ b/drivers/md/dm-zoned-reclaim.c
+@@ -37,7 +37,7 @@ enum {
+ /*
+ * Number of seconds of target BIO inactivity to consider the target idle.
+ */
+-#define DMZ_IDLE_PERIOD (10UL * HZ)
++#define DMZ_IDLE_PERIOD (10UL * HZ)
+
+ /*
+ * Percentage of unmapped (free) random zones below which reclaim starts
+@@ -134,6 +134,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
+ set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
+
+ while (block < end_block) {
++ if (dev->flags & DMZ_BDEV_DYING)
++ return -EIO;
++
+ /* Get a valid region from the source zone */
+ ret = dmz_first_valid_block(zmd, src_zone, &block);
+ if (ret <= 0)
+@@ -215,7 +218,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
+
+ dmz_unlock_flush(zmd);
+
+- return 0;
++ return ret;
+ }
+
+ /*
+@@ -259,7 +262,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
+
+ dmz_unlock_flush(zmd);
+
+- return 0;
++ return ret;
+ }
+
+ /*
+@@ -312,7 +315,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
+
+ dmz_unlock_flush(zmd);
+
+- return 0;
++ return ret;
+ }
+
+ /*
+@@ -334,7 +337,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
+ /*
+ * Find a candidate zone for reclaim and process it.
+ */
+-static void dmz_reclaim(struct dmz_reclaim *zrc)
++static int dmz_do_reclaim(struct dmz_reclaim *zrc)
+ {
+ struct dmz_metadata *zmd = zrc->metadata;
+ struct dm_zone *dzone;
+@@ -344,8 +347,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
+
+ /* Get a data zone */
+ dzone = dmz_get_zone_for_reclaim(zmd);
+- if (!dzone)
+- return;
++ if (IS_ERR(dzone))
++ return PTR_ERR(dzone);
+
+ start = jiffies;
+
+@@ -391,13 +394,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
+ out:
+ if (ret) {
+ dmz_unlock_zone_reclaim(dzone);
+- return;
++ return ret;
+ }
+
+- (void) dmz_flush_metadata(zrc->metadata);
++ ret = dmz_flush_metadata(zrc->metadata);
++ if (ret) {
++ dmz_dev_debug(zrc->dev,
++ "Metadata flush for zone %u failed, err %d\n",
++ dmz_id(zmd, rzone), ret);
++ return ret;
++ }
+
+ dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
+ dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
++ return 0;
+ }
+
+ /*
+@@ -442,6 +452,10 @@ static void dmz_reclaim_work(struct work_struct *work)
+ struct dmz_metadata *zmd = zrc->metadata;
+ unsigned int nr_rnd, nr_unmap_rnd;
+ unsigned int p_unmap_rnd;
++ int ret;
++
++ if (dmz_bdev_is_dying(zrc->dev))
++ return;
+
+ if (!dmz_should_reclaim(zrc)) {
+ mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
+@@ -471,7 +485,17 @@ static void dmz_reclaim_work(struct work_struct *work)
+ (dmz_target_idle(zrc) ? "Idle" : "Busy"),
+ p_unmap_rnd, nr_unmap_rnd, nr_rnd);
+
+- dmz_reclaim(zrc);
++ ret = dmz_do_reclaim(zrc);
++ if (ret) {
++ dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
++ if (ret == -EIO)
++ /*
++ * LLD might be performing some error handling sequence
++ * at the underlying device. To not interfere, do not
++ * attempt to schedule the next reclaim run immediately.
++ */
++ return;
++ }
+
+ dmz_schedule_reclaim(zrc);
+ }
+diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
+index 51d029bbb740..ff3fd011796e 100644
+--- a/drivers/md/dm-zoned-target.c
++++ b/drivers/md/dm-zoned-target.c
+@@ -133,6 +133,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
+
+ refcount_inc(&bioctx->ref);
+ generic_make_request(clone);
++ if (clone->bi_status == BLK_STS_IOERR)
++ return -EIO;
+
+ if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
+ zone->wp_block += nr_blocks;
+@@ -277,8 +279,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
+
+ /* Get the buffer zone. One will be allocated if needed */
+ bzone = dmz_get_chunk_buffer(zmd, zone);
+- if (!bzone)
+- return -ENOSPC;
++ if (IS_ERR(bzone))
++ return PTR_ERR(bzone);
+
+ if (dmz_is_readonly(bzone))
+ return -EROFS;
+@@ -389,6 +391,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
+
+ dmz_lock_metadata(zmd);
+
++ if (dmz->dev->flags & DMZ_BDEV_DYING) {
++ ret = -EIO;
++ goto out;
++ }
++
+ /*
+ * Get the data zone mapping the chunk. There may be no
+ * mapping for read and discard. If a mapping is obtained,
+@@ -493,6 +500,8 @@ static void dmz_flush_work(struct work_struct *work)
+
+ /* Flush dirty metadata blocks */
+ ret = dmz_flush_metadata(dmz->metadata);
++ if (ret)
++ dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
+
+ /* Process queued flush requests */
+ while (1) {
+@@ -513,22 +522,24 @@ static void dmz_flush_work(struct work_struct *work)
+ * Get a chunk work and start it to process a new BIO.
+ * If the BIO chunk has no work yet, create one.
+ */
+-static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
++static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+ {
+ unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
+ struct dm_chunk_work *cw;
++ int ret = 0;
+
+ mutex_lock(&dmz->chunk_lock);
+
+ /* Get the BIO chunk work. If one is not active yet, create one */
+ cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
+ if (!cw) {
+- int ret;
+
+ /* Create a new chunk work */
+ cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
+- if (!cw)
++ if (unlikely(!cw)) {
++ ret = -ENOMEM;
+ goto out;
++ }
+
+ INIT_WORK(&cw->work, dmz_chunk_work);
+ refcount_set(&cw->refcount, 0);
+@@ -539,7 +550,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+ ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
+ if (unlikely(ret)) {
+ kfree(cw);
+- cw = NULL;
+ goto out;
+ }
+ }
+@@ -547,10 +557,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+ bio_list_add(&cw->bio_list, bio);
+ dmz_get_chunk_work(cw);
+
++ dmz_reclaim_bio_acc(dmz->reclaim);
+ if (queue_work(dmz->chunk_wq, &cw->work))
+ dmz_get_chunk_work(cw);
+ out:
+ mutex_unlock(&dmz->chunk_lock);
++ return ret;
++}
++
++/*
++ * Check the backing device availability. If it's on the way out,
++ * start failing I/O. Reclaim and metadata components also call this
++ * function to cleanly abort operation in the event of such failure.
++ */
++bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
++{
++ struct gendisk *disk;
++
++ if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
++ disk = dmz_dev->bdev->bd_disk;
++ if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
++ dmz_dev_warn(dmz_dev, "Backing device queue dying");
++ dmz_dev->flags |= DMZ_BDEV_DYING;
++ } else if (disk->fops->check_events) {
++ if (disk->fops->check_events(disk, 0) &
++ DISK_EVENT_MEDIA_CHANGE) {
++ dmz_dev_warn(dmz_dev, "Backing device offline");
++ dmz_dev->flags |= DMZ_BDEV_DYING;
++ }
++ }
++ }
++
++ return dmz_dev->flags & DMZ_BDEV_DYING;
+ }
+
+ /*
+@@ -564,6 +602,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
+ sector_t sector = bio->bi_iter.bi_sector;
+ unsigned int nr_sectors = bio_sectors(bio);
+ sector_t chunk_sector;
++ int ret;
++
++ if (dmz_bdev_is_dying(dmz->dev))
++ return DM_MAPIO_KILL;
+
+ dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
+ bio_op(bio), (unsigned long long)sector, nr_sectors,
+@@ -601,8 +643,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
+ dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
+
+ /* Now ready to handle this BIO */
+- dmz_reclaim_bio_acc(dmz->reclaim);
+- dmz_queue_chunk_work(dmz, bio);
++ ret = dmz_queue_chunk_work(dmz, bio);
++ if (ret) {
++ dmz_dev_debug(dmz->dev,
++ "BIO op %d, can't process chunk %llu, err %i\n",
++ bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
++ ret);
++ return DM_MAPIO_REQUEUE;
++ }
+
+ return DM_MAPIO_SUBMITTED;
+ }
+@@ -855,6 +903,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+ {
+ struct dmz_target *dmz = ti->private;
+
++ if (dmz_bdev_is_dying(dmz->dev))
++ return -ENODEV;
++
+ *bdev = dmz->dev->bdev;
+
+ return 0;
+diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
+index ed8de49c9a08..93a64529f219 100644
+--- a/drivers/md/dm-zoned.h
++++ b/drivers/md/dm-zoned.h
+@@ -56,6 +56,8 @@ struct dmz_dev {
+
+ unsigned int nr_zones;
+
++ unsigned int flags;
++
+ sector_t zone_nr_sectors;
+ unsigned int zone_nr_sectors_shift;
+
+@@ -67,6 +69,9 @@ struct dmz_dev {
+ (dev)->zone_nr_sectors_shift)
+ #define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
+
++/* Device flags. */
++#define DMZ_BDEV_DYING (1 << 0)
++
+ /*
+ * Zone descriptor.
+ */
+@@ -245,4 +250,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
+ void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
+ void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
+
++/*
++ * Functions defined in dm-zoned-target.c
++ */
++bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
++
+ #endif /* DM_ZONED_H */
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index 58b319757b1e..8aae0624a297 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+
+ new_parent = shadow_current(s);
+
++ pn = dm_block_data(new_parent);
++ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
++ sizeof(__le64) : s->info->value_type.size;
++
++ /* create & init the left block */
+ r = new_block(s->info, &left);
+ if (r < 0)
+ return r;
+
++ ln = dm_block_data(left);
++ nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
++
++ ln->header.flags = pn->header.flags;
++ ln->header.nr_entries = cpu_to_le32(nr_left);
++ ln->header.max_entries = pn->header.max_entries;
++ ln->header.value_size = pn->header.value_size;
++ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
++ memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
++
++ /* create & init the right block */
+ r = new_block(s->info, &right);
+ if (r < 0) {
+ unlock_block(s->info, left);
+ return r;
+ }
+
+- pn = dm_block_data(new_parent);
+- ln = dm_block_data(left);
+ rn = dm_block_data(right);
+-
+- nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
+ nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
+
+- ln->header.flags = pn->header.flags;
+- ln->header.nr_entries = cpu_to_le32(nr_left);
+- ln->header.max_entries = pn->header.max_entries;
+- ln->header.value_size = pn->header.value_size;
+-
+ rn->header.flags = pn->header.flags;
+ rn->header.nr_entries = cpu_to_le32(nr_right);
+ rn->header.max_entries = pn->header.max_entries;
+ rn->header.value_size = pn->header.value_size;
+-
+- memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
+ memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
+-
+- size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
+- sizeof(__le64) : s->info->value_type.size;
+- memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
+ memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
+ nr_right * size);
+
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index aec449243966..25328582cc48 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm)
+ }
+
+ if (smm->recursion_count == 1)
+- apply_bops(smm);
++ r = apply_bops(smm);
+
+ smm->recursion_count--;
+
+diff --git a/drivers/misc/habanalabs/firmware_if.c b/drivers/misc/habanalabs/firmware_if.c
+index eda5d7fcb79f..fe9e57a81b6f 100644
+--- a/drivers/misc/habanalabs/firmware_if.c
++++ b/drivers/misc/habanalabs/firmware_if.c
+@@ -24,7 +24,7 @@ int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
+ {
+ const struct firmware *fw;
+ const u64 *fw_data;
+- size_t fw_size, i;
++ size_t fw_size;
+ int rc;
+
+ rc = request_firmware(&fw, fw_name, hdev->dev);
+@@ -45,22 +45,7 @@ int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
+
+ fw_data = (const u64 *) fw->data;
+
+- if ((fw->size % 8) != 0)
+- fw_size -= 8;
+-
+- for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
+- if (!(i & (0x80000 - 1))) {
+- dev_dbg(hdev->dev,
+- "copied so far %zu out of %zu for %s firmware",
+- i, fw_size, fw_name);
+- usleep_range(20, 100);
+- }
+-
+- writeq(*fw_data, dst);
+- }
+-
+- if ((fw->size % 8) != 0)
+- writel(*(const u32 *) fw_data, dst);
++ memcpy_toio(dst, fw_data, fw_size);
+
+ out:
+ release_firmware(fw);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index f183cadd14e3..e8f48f3cdf94 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2205,6 +2205,15 @@ static void bond_miimon_commit(struct bonding *bond)
+ bond_for_each_slave(bond, slave, iter) {
+ switch (slave->new_link) {
+ case BOND_LINK_NOCHANGE:
++ /* For 802.3ad mode, check current slave speed and
++ * duplex again in case its port was disabled after
++ * invalid speed/duplex reporting but recovered before
++ * link monitoring could make a decision on the actual
++ * link status
++ */
++ if (BOND_MODE(bond) == BOND_MODE_8023AD &&
++ slave->link == BOND_LINK_UP)
++ bond_3ad_adapter_speed_duplex_changed(slave);
+ continue;
+
+ case BOND_LINK_UP:
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index b6b93a2d93a5..483d270664cc 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -1249,6 +1249,8 @@ int register_candev(struct net_device *dev)
+ return -EINVAL;
+
+ dev->rtnl_link_ops = &can_link_ops;
++ netif_carrier_off(dev);
++
+ return register_netdev(dev);
+ }
+ EXPORT_SYMBOL_GPL(register_candev);
+diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
+index 185c7f7d38a4..5e0d5e8101c8 100644
+--- a/drivers/net/can/sja1000/peak_pcmcia.c
++++ b/drivers/net/can/sja1000/peak_pcmcia.c
+@@ -479,7 +479,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
+ if (!netdev)
+ continue;
+
+- strncpy(name, netdev->name, IFNAMSIZ);
++ strlcpy(name, netdev->name, IFNAMSIZ);
+
+ unregister_sja1000dev(netdev);
+
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index 44e99e3d7134..2aec934fab0c 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -664,17 +664,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable)
+ return regulator_disable(reg);
+ }
+
+-static void mcp251x_open_clean(struct net_device *net)
+-{
+- struct mcp251x_priv *priv = netdev_priv(net);
+- struct spi_device *spi = priv->spi;
+-
+- free_irq(spi->irq, priv);
+- mcp251x_hw_sleep(spi);
+- mcp251x_power_enable(priv->transceiver, 0);
+- close_candev(net);
+-}
+-
+ static int mcp251x_stop(struct net_device *net)
+ {
+ struct mcp251x_priv *priv = netdev_priv(net);
+@@ -940,37 +929,43 @@ static int mcp251x_open(struct net_device *net)
+ flags | IRQF_ONESHOT, DEVICE_NAME, priv);
+ if (ret) {
+ dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
+- mcp251x_power_enable(priv->transceiver, 0);
+- close_candev(net);
+- goto open_unlock;
++ goto out_close;
+ }
+
+ priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ 0);
++ if (!priv->wq) {
++ ret = -ENOMEM;
++ goto out_clean;
++ }
+ INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
+ INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
+
+ ret = mcp251x_hw_reset(spi);
+- if (ret) {
+- mcp251x_open_clean(net);
+- goto open_unlock;
+- }
++ if (ret)
++ goto out_free_wq;
+ ret = mcp251x_setup(net, spi);
+- if (ret) {
+- mcp251x_open_clean(net);
+- goto open_unlock;
+- }
++ if (ret)
++ goto out_free_wq;
+ ret = mcp251x_set_normal_mode(spi);
+- if (ret) {
+- mcp251x_open_clean(net);
+- goto open_unlock;
+- }
++ if (ret)
++ goto out_free_wq;
+
+ can_led_event(net, CAN_LED_EVENT_OPEN);
+
+ netif_wake_queue(net);
++ mutex_unlock(&priv->mcp_lock);
+
+-open_unlock:
++ return 0;
++
++out_free_wq:
++ destroy_workqueue(priv->wq);
++out_clean:
++ free_irq(spi->irq, priv);
++ mcp251x_hw_sleep(spi);
++out_close:
++ mcp251x_power_enable(priv->transceiver, 0);
++ close_candev(net);
+ mutex_unlock(&priv->mcp_lock);
+ return ret;
+ }
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index 22b9c8e6d040..65dce642b86b 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -855,7 +855,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
+
+ dev_prev_siblings = dev->prev_siblings;
+ dev->state &= ~PCAN_USB_STATE_CONNECTED;
+- strncpy(name, netdev->name, IFNAMSIZ);
++ strlcpy(name, netdev->name, IFNAMSIZ);
+
+ unregister_netdev(netdev);
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+index 1e82b9efe447..58f89f6a040f 100644
+--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+@@ -3269,7 +3269,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ if (!adapter->regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ err = -ENOMEM;
+- goto out_free_adapter;
++ goto out_free_adapter_nofail;
+ }
+
+ adapter->pdev = pdev;
+@@ -3397,6 +3397,9 @@ out_free_dev:
+ if (adapter->port[i])
+ free_netdev(adapter->port[i]);
+
++out_free_adapter_nofail:
++ kfree_skb(adapter->nofail_skb);
++
+ out_free_adapter:
+ kfree(adapter);
+
+diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
+index 8429f5c1d810..a268e74b1834 100644
+--- a/drivers/net/ethernet/freescale/enetc/Kconfig
++++ b/drivers/net/ethernet/freescale/enetc/Kconfig
+@@ -2,6 +2,7 @@
+ config FSL_ENETC
+ tristate "ENETC PF driver"
+ depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
++ select PHYLIB
+ help
+ This driver supports NXP ENETC gigabit ethernet controller PCIe
+ physical function (PF) devices, managing ENETC Ports at a privileged
+@@ -12,6 +13,7 @@ config FSL_ENETC
+ config FSL_ENETC_VF
+ tristate "ENETC VF driver"
+ depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
++ select PHYLIB
+ help
+ This driver supports NXP ENETC gigabit ethernet controller PCIe
+ virtual function (VF) devices enabled by the ENETC PF driver.
+diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
+index e1f2978506fd..51cf6b0db904 100644
+--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
+@@ -153,6 +153,7 @@ struct hip04_priv {
+ unsigned int reg_inten;
+
+ struct napi_struct napi;
++ struct device *dev;
+ struct net_device *ndev;
+
+ struct tx_desc *tx_desc;
+@@ -181,7 +182,7 @@ struct hip04_priv {
+
+ static inline unsigned int tx_count(unsigned int head, unsigned int tail)
+ {
+- return (head - tail) % (TX_DESC_NUM - 1);
++ return (head - tail) % TX_DESC_NUM;
+ }
+
+ static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
+@@ -383,7 +384,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
+ }
+
+ if (priv->tx_phys[tx_tail]) {
+- dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
++ dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
+ priv->tx_skb[tx_tail]->len,
+ DMA_TO_DEVICE);
+ priv->tx_phys[tx_tail] = 0;
+@@ -434,8 +435,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ return NETDEV_TX_BUSY;
+ }
+
+- phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
+- if (dma_mapping_error(&ndev->dev, phys)) {
++ phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
++ if (dma_mapping_error(priv->dev, phys)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+@@ -494,6 +495,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
+ u16 len;
+ u32 err;
+
++ /* clean up tx descriptors */
++ tx_remaining = hip04_tx_reclaim(ndev, false);
++
+ while (cnt && !last) {
+ buf = priv->rx_buf[priv->rx_head];
+ skb = build_skb(buf, priv->rx_buf_size);
+@@ -502,7 +506,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
+ goto refill;
+ }
+
+- dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
++ dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ priv->rx_phys[priv->rx_head] = 0;
+
+@@ -531,9 +535,9 @@ refill:
+ buf = netdev_alloc_frag(priv->rx_buf_size);
+ if (!buf)
+ goto done;
+- phys = dma_map_single(&ndev->dev, buf,
++ phys = dma_map_single(priv->dev, buf,
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+- if (dma_mapping_error(&ndev->dev, phys))
++ if (dma_mapping_error(priv->dev, phys))
+ goto done;
+ priv->rx_buf[priv->rx_head] = buf;
+ priv->rx_phys[priv->rx_head] = phys;
+@@ -554,8 +558,7 @@ refill:
+ }
+ napi_complete_done(napi, rx);
+ done:
+- /* clean up tx descriptors and start a new timer if necessary */
+- tx_remaining = hip04_tx_reclaim(ndev, false);
++ /* start a new timer if necessary */
+ if (rx < budget && tx_remaining)
+ hip04_start_tx_timer(priv);
+
+@@ -637,9 +640,9 @@ static int hip04_mac_open(struct net_device *ndev)
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ dma_addr_t phys;
+
+- phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
++ phys = dma_map_single(priv->dev, priv->rx_buf[i],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+- if (dma_mapping_error(&ndev->dev, phys))
++ if (dma_mapping_error(priv->dev, phys))
+ return -EIO;
+
+ priv->rx_phys[i] = phys;
+@@ -673,7 +676,7 @@ static int hip04_mac_stop(struct net_device *ndev)
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ if (priv->rx_phys[i]) {
+- dma_unmap_single(&ndev->dev, priv->rx_phys[i],
++ dma_unmap_single(priv->dev, priv->rx_phys[i],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ priv->rx_phys[i] = 0;
+ }
+@@ -817,6 +820,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
++ priv->dev = d;
+ priv->ndev = ndev;
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index 50ed1bdb632d..885529701de9 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -4580,9 +4580,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
+ else
+ ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
+
+- ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
+- ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC |
+- MVPP22_XLG_CTRL4_EN_IDLE_CHECK;
++ ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
++ MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
++ ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
+
+ if (old_ctrl0 != ctrl0)
+ writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
+index fdfedbc8e431..70a771cd8788 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
+@@ -1093,7 +1093,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
+ snprintf(bit_name, 30,
+ p_aeu->bit_name, num);
+ else
+- strncpy(bit_name,
++ strlcpy(bit_name,
+ p_aeu->bit_name, 30);
+
+ /* We now need to pass bitmask in its
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+index 13802b825d65..909422d93903 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+@@ -442,7 +442,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
+ /* Vendor specific information */
+ dev->vendor_id = cdev->vendor_id;
+ dev->vendor_part_id = cdev->device_id;
+- dev->hw_ver = 0;
++ dev->hw_ver = cdev->chip_rev;
+ dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
+ (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+index e3850938cf2f..d7bf0ad954b8 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -85,6 +85,8 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
+ u32 value;
+
+ base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
++ if (queue >= 4)
++ queue -= 4;
+
+ value = readl(ioaddr + base_register);
+
+@@ -102,6 +104,8 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
+ u32 value;
+
+ base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
++ if (queue >= 4)
++ queue -= 4;
+
+ value = readl(ioaddr + base_register);
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+index 64b8cb88ea45..d4bd99770f5d 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+@@ -106,6 +106,8 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
++ if (queue >= 4)
++ queue -= 4;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_PSRQ(queue);
+@@ -169,6 +171,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
++ if (queue >= 4)
++ queue -= 4;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_QxMDMACH(queue);
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index 0f0f4b31eb7e..9b5218a8c15b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -385,6 +385,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
+ return ERR_PTR(-ENOMEM);
+
+ *mac = of_get_mac_address(np);
++ if (IS_ERR(*mac)) {
++ if (PTR_ERR(*mac) == -EPROBE_DEFER)
++ return ERR_CAST(*mac);
++
++ *mac = NULL;
++ }
++
+ plat->interface = of_get_phy_mode(np);
+
+ /* Get max speed of operation from device tree */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+index 58ea18af9813..37c0bc699cd9 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+@@ -37,7 +37,7 @@ static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
+ entry = &priv->tc_entries[i];
+ if (!entry->in_use && !first && free)
+ first = entry;
+- if (entry->handle == loc && !free)
++ if ((entry->handle == loc) && !free && !entry->is_frag)
+ dup = entry;
+ }
+
+diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
+index b86a4b2116f8..59a94e07e7c5 100644
+--- a/drivers/net/phy/phy_led_triggers.c
++++ b/drivers/net/phy/phy_led_triggers.c
+@@ -48,8 +48,9 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
+ if (!phy->last_triggered)
+ led_trigger_event(&phy->led_link_trigger->trigger,
+ LED_FULL);
++ else
++ led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
+
+- led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
+ led_trigger_event(&plt->trigger, LED_FULL);
+ phy->last_triggered = plt;
+ }
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 8b4ad10cf940..26c5207466af 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1294,6 +1294,7 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
+ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
+ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
++ {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
+ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
+ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
+ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+index fba242284507..fa81ad67539f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -1627,6 +1627,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
+ init_completion(&drv->request_firmware_complete);
+ INIT_LIST_HEAD(&drv->list);
+
++ iwl_load_fw_dbg_tlv(drv->trans->dev, drv->trans);
++
+ #ifdef CONFIG_IWLWIFI_DEBUGFS
+ /* Create the device debugfs entries. */
+ drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
+@@ -1647,8 +1649,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
+ err_fw:
+ #ifdef CONFIG_IWLWIFI_DEBUGFS
+ debugfs_remove_recursive(drv->dbgfs_drv);
+- iwl_fw_dbg_free(drv->trans);
+ #endif
++ iwl_fw_dbg_free(drv->trans);
+ kfree(drv);
+ err:
+ return ERR_PTR(ret);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index 964c7baabede..a6183281ee1e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -207,11 +207,11 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
+ },
+ };
+
+-static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+- enum set_key_cmd cmd,
+- struct ieee80211_vif *vif,
+- struct ieee80211_sta *sta,
+- struct ieee80211_key_conf *key);
++static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
++ enum set_key_cmd cmd,
++ struct ieee80211_vif *vif,
++ struct ieee80211_sta *sta,
++ struct ieee80211_key_conf *key);
+
+ void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+ {
+@@ -474,7 +474,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+ ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
+ ieee80211_hw_set(hw, STA_MMPDU_TXQ);
+- ieee80211_hw_set(hw, TX_AMSDU);
++ /*
++ * On older devices, enabling TX A-MSDU occasionally leads to
++ * something getting messed up, the command read from the FIFO
++ * gets out of sync and isn't a TX command, so that we have an
++ * assert EDC.
++ *
++ * It's not clear where the bug is, but since we didn't used to
++ * support A-MSDU until moving the mac80211 iTXQs, just leave it
++ * for older devices. We also don't see this issue on any newer
++ * devices.
++ */
++ if (mvm->cfg->device_family >= IWL_DEVICE_FAMILY_9000)
++ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, TX_FRAG_LIST);
+
+ if (iwl_mvm_has_tlc_offload(mvm)) {
+@@ -2725,7 +2737,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
+
+ mvmvif->ap_early_keys[i] = NULL;
+
+- ret = iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
++ ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
+ if (ret)
+ goto out_quota_failed;
+ }
+@@ -3493,11 +3505,11 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
+ return ret;
+ }
+
+-static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+- enum set_key_cmd cmd,
+- struct ieee80211_vif *vif,
+- struct ieee80211_sta *sta,
+- struct ieee80211_key_conf *key)
++static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
++ enum set_key_cmd cmd,
++ struct ieee80211_vif *vif,
++ struct ieee80211_sta *sta,
++ struct ieee80211_key_conf *key)
+ {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+@@ -3552,8 +3564,6 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ return -EOPNOTSUPP;
+ }
+
+- mutex_lock(&mvm->mutex);
+-
+ switch (cmd) {
+ case SET_KEY:
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+@@ -3699,7 +3709,22 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ ret = -EINVAL;
+ }
+
++ return ret;
++}
++
++static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
++ enum set_key_cmd cmd,
++ struct ieee80211_vif *vif,
++ struct ieee80211_sta *sta,
++ struct ieee80211_key_conf *key)
++{
++ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
++ int ret;
++
++ mutex_lock(&mvm->mutex);
++ ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key);
+ mutex_unlock(&mvm->mutex);
++
+ return ret;
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index ed8fc9a9204c..0c11a219e347 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -1807,7 +1807,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+ #endif /* CONFIG_IWLWIFI_DEBUGFS */
+
+ /* rate scaling */
+-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync);
++int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq);
+ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
+ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
+ void rs_update_last_rssi(struct iwl_mvm *mvm,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+index 63fdb4e68e9d..01b032f18bc8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+@@ -1197,6 +1197,27 @@ static u8 rs_get_tid(struct ieee80211_hdr *hdr)
+ return tid;
+ }
+
++void iwl_mvm_rs_init_wk(struct work_struct *wk)
++{
++ struct iwl_mvm_sta *mvmsta = container_of(wk, struct iwl_mvm_sta,
++ rs_init_wk);
++ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
++ struct ieee80211_sta *sta;
++
++ rcu_read_lock();
++
++ sta = rcu_dereference(mvmvif->mvm->fw_id_to_mac_id[mvmsta->sta_id]);
++ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
++ rcu_read_unlock();
++ return;
++ }
++
++ iwl_mvm_rs_rate_init(mvmvif->mvm, sta, mvmvif->phy_ctxt->channel->band,
++ true);
++
++ rcu_read_unlock();
++}
++
+ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, struct ieee80211_tx_info *info, bool ndp)
+ {
+@@ -1269,7 +1290,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ (unsigned long)(lq_sta->last_tx +
+ (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
+ IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
+- iwl_mvm_rs_rate_init(mvm, sta, info->band, true);
++ schedule_work(&mvmsta->rs_init_wk);
+ return;
+ }
+ lq_sta->last_tx = jiffies;
+@@ -1305,7 +1326,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ IWL_DEBUG_RATE(mvm,
+ "Too many rates mismatch. Send sync LQ. rs_state %d\n",
+ lq_sta->rs_state);
+- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
++ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
+ }
+ /* Regardless, ignore this status info for outdated rate */
+ return;
+@@ -1367,7 +1388,8 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ if (info->status.ampdu_ack_len == 0)
+ info->status.ampdu_len = 1;
+
+- rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl, tx_resp_rate.index,
++ rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl,
++ tx_resp_rate.index,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
+
+@@ -1442,16 +1464,24 @@ static void rs_drv_mac80211_tx_status(void *mvm_r,
+ struct iwl_op_mode *op_mode = mvm_r;
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
++ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+- if (!iwl_mvm_sta_from_mac80211(sta)->vif)
++ if (!mvmsta->vif)
+ return;
+
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ info->flags & IEEE80211_TX_CTL_NO_ACK)
+ return;
+
++ /* If it's locked we are in middle of init flow
++ * just wait for next tx status to update the lq_sta data
++ */
++ if (!mutex_trylock(&mvmsta->lq_sta.rs_drv.mutex))
++ return;
++
+ iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info,
+ ieee80211_is_qos_nullfunc(hdr->frame_control));
++ mutex_unlock(&mvmsta->lq_sta.rs_drv.mutex);
+ }
+
+ /*
+@@ -1794,7 +1824,7 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm,
+ struct iwl_scale_tbl_info *tbl)
+ {
+ rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
+- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
++ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
+ }
+
+ static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
+@@ -2896,7 +2926,7 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
+ static void rs_initialize_lq(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct iwl_lq_sta *lq_sta,
+- enum nl80211_band band, bool update)
++ enum nl80211_band band)
+ {
+ struct iwl_scale_tbl_info *tbl;
+ struct rs_rate *rate;
+@@ -2926,7 +2956,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
+ rs_set_expected_tpt_table(lq_sta, tbl);
+ rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
+ /* TODO restore station should remember the lq cmd */
+- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update);
++ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
+ }
+
+ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
+@@ -3179,7 +3209,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
+ * Called after adding a new station to initialize rate scaling
+ */
+ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+- enum nl80211_band band, bool update)
++ enum nl80211_band band)
+ {
+ int i, j;
+ struct ieee80211_hw *hw = mvm->hw;
+@@ -3259,7 +3289,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ #ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_mvm_reset_frame_stats(mvm);
+ #endif
+- rs_initialize_lq(mvm, sta, lq_sta, band, update);
++ rs_initialize_lq(mvm, sta, lq_sta, band);
+ }
+
+ static void rs_drv_rate_update(void *mvm_r,
+@@ -3573,7 +3603,7 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
+
+ bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
+ bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
+- iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false);
++ iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd);
+
+ ss_params |= LQ_SS_BFER_ALLOWED;
+ IWL_DEBUG_RATE(mvm,
+@@ -3739,7 +3769,7 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
+
+ if (lq_sta->pers.dbg_fixed_rate) {
+ rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL);
+- iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false);
++ iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq);
+ }
+ }
+
+@@ -4136,10 +4166,15 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
+ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ enum nl80211_band band, bool update)
+ {
+- if (iwl_mvm_has_tlc_offload(mvm))
++ if (iwl_mvm_has_tlc_offload(mvm)) {
+ rs_fw_rate_init(mvm, sta, band, update);
+- else
+- rs_drv_rate_init(mvm, sta, band, update);
++ } else {
++ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
++
++ mutex_lock(&mvmsta->lq_sta.rs_drv.mutex);
++ rs_drv_rate_init(mvm, sta, band);
++ mutex_unlock(&mvmsta->lq_sta.rs_drv.mutex);
++ }
+ }
+
+ int iwl_mvm_rate_control_register(void)
+@@ -4169,7 +4204,7 @@ static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
+ }
+
+- return iwl_mvm_send_lq_cmd(mvm, lq, false);
++ return iwl_mvm_send_lq_cmd(mvm, lq);
+ }
+
+ /**
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+index f7eb60dbaf20..086f47e2a4f0 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+@@ -4,7 +4,7 @@
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
+- * Copyright(c) 2018 Intel Corporation
++ * Copyright(c) 2018 - 2019 Intel Corporation
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+@@ -376,6 +376,9 @@ struct iwl_lq_sta {
+ /* tx power reduce for this sta */
+ int tpc_reduce;
+
++ /* avoid races of reinit and update table from rx_tx */
++ struct mutex mutex;
++
+ /* persistent fields - initialized only once - keep last! */
+ struct lq_sta_pers {
+ #ifdef CONFIG_MAC80211_DEBUGFS
+@@ -440,6 +443,8 @@ struct iwl_mvm_sta;
+ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable);
+
++void iwl_mvm_rs_init_wk(struct work_struct *wk);
++
+ #ifdef CONFIG_IWLWIFI_DEBUGFS
+ void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm);
+ #endif
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index f545a737a92d..22715cdb8317 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -1684,6 +1684,10 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
+ */
+ if (iwl_mvm_has_tlc_offload(mvm))
+ iwl_mvm_rs_add_sta(mvm, mvm_sta);
++ else
++ mutex_init(&mvm_sta->lq_sta.rs_drv.mutex);
++
++ INIT_WORK(&mvm_sta->rs_init_wk, iwl_mvm_rs_init_wk);
+
+ iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
+
+@@ -1846,6 +1850,8 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+ if (ret)
+ return ret;
+
++ cancel_work_sync(&mvm_sta->rs_init_wk);
++
+ /* flush its queues here since we are freeing mvm_sta */
+ ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
+ if (ret)
+@@ -2972,7 +2978,7 @@ out:
+ IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
+ sta->addr, tid);
+
+- return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
++ return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
+ }
+
+ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+index b4d4071b865d..6e93c30492b7 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+@@ -421,6 +421,7 @@ struct iwl_mvm_sta {
+ struct iwl_lq_sta_rs_fw rs_fw;
+ struct iwl_lq_sta rs_drv;
+ } lq_sta;
++ struct work_struct rs_init_wk;
+ struct ieee80211_vif *vif;
+ struct iwl_mvm_key_pn __rcu *ptk_pn[4];
+ struct iwl_mvm_rxq_dup_data *dup_data;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+index cc56ab88fb43..a71277de2e0e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+@@ -641,12 +641,12 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync)
++int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
+ {
+ struct iwl_host_cmd cmd = {
+ .id = LQ_CMD,
+ .len = { sizeof(struct iwl_lq_cmd), },
+- .flags = sync ? 0 : CMD_ASYNC,
++ .flags = CMD_ASYNC,
+ .data = { lq, },
+ };
+
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index 1c699a9fa866..faec05ab4275 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -3615,10 +3615,12 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, &hwsim_genl_family,
+ NLM_F_MULTI, HWSIM_CMD_GET_RADIO);
+- if (!hdr)
++ if (hdr) {
++ genl_dump_check_consistent(cb, hdr);
++ genlmsg_end(skb, hdr);
++ } else {
+ res = -EMSGSIZE;
+- genl_dump_check_consistent(cb, hdr);
+- genlmsg_end(skb, hdr);
++ }
+ }
+
+ done:
+diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
+index c3e10b6ab3a4..f25f1ec5f9e9 100644
+--- a/drivers/nfc/st-nci/se.c
++++ b/drivers/nfc/st-nci/se.c
+@@ -333,6 +333,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
+
+ transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
+ skb->len - 2, GFP_KERNEL);
++ if (!transaction)
++ return -ENOMEM;
+
+ transaction->aid_len = skb->data[1];
+ memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
+diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
+index 06fc542fd198..6586378cacb0 100644
+--- a/drivers/nfc/st21nfca/se.c
++++ b/drivers/nfc/st21nfca/se.c
+@@ -317,6 +317,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
+
+ transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
+ skb->len - 2, GFP_KERNEL);
++ if (!transaction)
++ return -ENOMEM;
+
+ transaction->aid_len = skb->data[1];
+ memcpy(transaction->aid, &skb->data[2],
+diff --git a/drivers/nvmem/nvmem-sysfs.c b/drivers/nvmem/nvmem-sysfs.c
+index 6f303b91f6e7..9e0c429cd08a 100644
+--- a/drivers/nvmem/nvmem-sysfs.c
++++ b/drivers/nvmem/nvmem-sysfs.c
+@@ -224,10 +224,17 @@ int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
+ if (!config->base_dev)
+ return -EINVAL;
+
+- if (nvmem->read_only)
+- nvmem->eeprom = bin_attr_ro_root_nvmem;
+- else
+- nvmem->eeprom = bin_attr_rw_root_nvmem;
++ if (nvmem->read_only) {
++ if (config->root_only)
++ nvmem->eeprom = bin_attr_ro_root_nvmem;
++ else
++ nvmem->eeprom = bin_attr_ro_nvmem;
++ } else {
++ if (config->root_only)
++ nvmem->eeprom = bin_attr_rw_root_nvmem;
++ else
++ nvmem->eeprom = bin_attr_rw_nvmem;
++ }
+ nvmem->eeprom.attr.name = "eeprom";
+ nvmem->eeprom.size = nvmem->size;
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
+index 152053361862..989506bd90b1 100644
+--- a/drivers/regulator/axp20x-regulator.c
++++ b/drivers/regulator/axp20x-regulator.c
+@@ -174,14 +174,14 @@
+ #define AXP803_DCDC5_1140mV_STEPS 35
+ #define AXP803_DCDC5_1140mV_END \
+ (AXP803_DCDC5_1140mV_START + AXP803_DCDC5_1140mV_STEPS)
+-#define AXP803_DCDC5_NUM_VOLTAGES 68
++#define AXP803_DCDC5_NUM_VOLTAGES 69
+
+ #define AXP803_DCDC6_600mV_START 0x00
+ #define AXP803_DCDC6_600mV_STEPS 50
+ #define AXP803_DCDC6_600mV_END \
+ (AXP803_DCDC6_600mV_START + AXP803_DCDC6_600mV_STEPS)
+ #define AXP803_DCDC6_1120mV_START 0x33
+-#define AXP803_DCDC6_1120mV_STEPS 14
++#define AXP803_DCDC6_1120mV_STEPS 20
+ #define AXP803_DCDC6_1120mV_END \
+ (AXP803_DCDC6_1120mV_START + AXP803_DCDC6_1120mV_STEPS)
+ #define AXP803_DCDC6_NUM_VOLTAGES 72
+@@ -240,7 +240,7 @@
+ #define AXP806_DCDCA_600mV_END \
+ (AXP806_DCDCA_600mV_START + AXP806_DCDCA_600mV_STEPS)
+ #define AXP806_DCDCA_1120mV_START 0x33
+-#define AXP806_DCDCA_1120mV_STEPS 14
++#define AXP806_DCDCA_1120mV_STEPS 20
+ #define AXP806_DCDCA_1120mV_END \
+ (AXP806_DCDCA_1120mV_START + AXP806_DCDCA_1120mV_STEPS)
+ #define AXP806_DCDCA_NUM_VOLTAGES 72
+@@ -774,8 +774,8 @@ static const struct regulator_linear_range axp806_dcdcd_ranges[] = {
+ AXP806_DCDCD_600mV_END,
+ 20000),
+ REGULATOR_LINEAR_RANGE(1600000,
+- AXP806_DCDCD_600mV_START,
+- AXP806_DCDCD_600mV_END,
++ AXP806_DCDCD_1600mV_START,
++ AXP806_DCDCD_1600mV_END,
+ 100000),
+ };
+
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 3fe3029617a8..aa3bfc20b737 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -7032,6 +7032,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
+ static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
+ struct ufs_vreg *vreg)
+ {
++ if (!vreg)
++ return 0;
++
+ return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
+ }
+
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index af3f37ba82c8..1f32c9e3ca65 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1453,6 +1453,14 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
+ { PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP },
++ /* TGL-LP */
++ { PCI_VDEVICE(INTEL, 0xa0aa), LPSS_CNL_SSP },
++ { PCI_VDEVICE(INTEL, 0xa0ab), LPSS_CNL_SSP },
++ { PCI_VDEVICE(INTEL, 0xa0de), LPSS_CNL_SSP },
++ { PCI_VDEVICE(INTEL, 0xa0df), LPSS_CNL_SSP },
++ { PCI_VDEVICE(INTEL, 0xa0fb), LPSS_CNL_SSP },
++ { PCI_VDEVICE(INTEL, 0xa0fd), LPSS_CNL_SSP },
++ { PCI_VDEVICE(INTEL, 0xa0fe), LPSS_CNL_SSP },
+ { },
+ };
+
+@@ -1817,14 +1825,16 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
+ status = devm_spi_register_controller(&pdev->dev, controller);
+ if (status != 0) {
+ dev_err(&pdev->dev, "problem registering spi controller\n");
+- goto out_error_clock_enabled;
++ goto out_error_pm_runtime_enabled;
+ }
+
+ return status;
+
+-out_error_clock_enabled:
++out_error_pm_runtime_enabled:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
++
++out_error_clock_enabled:
+ clk_disable_unprepare(ssp->clk);
+
+ out_error_dma_irq_alloc:
+diff --git a/drivers/staging/fbtft/fb_bd663474.c b/drivers/staging/fbtft/fb_bd663474.c
+index b6c6d66e4eb1..e2c7646588f8 100644
+--- a/drivers/staging/fbtft/fb_bd663474.c
++++ b/drivers/staging/fbtft/fb_bd663474.c
+@@ -24,7 +24,7 @@
+
+ static int init_display(struct fbtft_par *par)
+ {
+- if (!par->gpio.cs)
++ if (par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
+
+ par->fbtftops.reset(par);
+diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c
+index d609a2b67db9..fd32376700e2 100644
+--- a/drivers/staging/fbtft/fb_ili9163.c
++++ b/drivers/staging/fbtft/fb_ili9163.c
+@@ -77,7 +77,7 @@ static int init_display(struct fbtft_par *par)
+ {
+ par->fbtftops.reset(par);
+
+- if (!par->gpio.cs)
++ if (par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
+
+ write_reg(par, MIPI_DCS_SOFT_RESET); /* software reset */
+diff --git a/drivers/staging/fbtft/fb_ili9325.c b/drivers/staging/fbtft/fb_ili9325.c
+index b090e7ab6fdd..85e54a10ed72 100644
+--- a/drivers/staging/fbtft/fb_ili9325.c
++++ b/drivers/staging/fbtft/fb_ili9325.c
+@@ -85,7 +85,7 @@ static int init_display(struct fbtft_par *par)
+ {
+ par->fbtftops.reset(par);
+
+- if (!par->gpio.cs)
++ if (par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
+
+ bt &= 0x07;
+diff --git a/drivers/staging/fbtft/fb_s6d1121.c b/drivers/staging/fbtft/fb_s6d1121.c
+index b3d0701880fe..5a129b1352cc 100644
+--- a/drivers/staging/fbtft/fb_s6d1121.c
++++ b/drivers/staging/fbtft/fb_s6d1121.c
+@@ -29,7 +29,7 @@ static int init_display(struct fbtft_par *par)
+ {
+ par->fbtftops.reset(par);
+
+- if (!par->gpio.cs)
++ if (par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
+
+ /* Initialization sequence from Lib_UTFT */
+diff --git a/drivers/staging/fbtft/fb_ssd1289.c b/drivers/staging/fbtft/fb_ssd1289.c
+index bbf75f795234..88a5b6925901 100644
+--- a/drivers/staging/fbtft/fb_ssd1289.c
++++ b/drivers/staging/fbtft/fb_ssd1289.c
+@@ -28,7 +28,7 @@ static int init_display(struct fbtft_par *par)
+ {
+ par->fbtftops.reset(par);
+
+- if (!par->gpio.cs)
++ if (par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
+
+ write_reg(par, 0x00, 0x0001);
+diff --git a/drivers/staging/fbtft/fb_ssd1331.c b/drivers/staging/fbtft/fb_ssd1331.c
+index 4cfe9f8535d0..37622c9462aa 100644
+--- a/drivers/staging/fbtft/fb_ssd1331.c
++++ b/drivers/staging/fbtft/fb_ssd1331.c
+@@ -81,7 +81,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
+ va_start(args, len);
+
+ *buf = (u8)va_arg(args, unsigned int);
+- if (!par->gpio.dc)
++ if (par->gpio.dc)
+ gpiod_set_value(par->gpio.dc, 0);
+ ret = par->fbtftops.write(par, par->buf, sizeof(u8));
+ if (ret < 0) {
+@@ -104,7 +104,7 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
+ return;
+ }
+ }
+- if (!par->gpio.dc)
++ if (par->gpio.dc)
+ gpiod_set_value(par->gpio.dc, 1);
+ va_end(args);
+ }
+diff --git a/drivers/staging/fbtft/fb_upd161704.c b/drivers/staging/fbtft/fb_upd161704.c
+index 564a38e34440..c77832ae5e5b 100644
+--- a/drivers/staging/fbtft/fb_upd161704.c
++++ b/drivers/staging/fbtft/fb_upd161704.c
+@@ -26,7 +26,7 @@ static int init_display(struct fbtft_par *par)
+ {
+ par->fbtftops.reset(par);
+
+- if (!par->gpio.cs)
++ if (par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
+
+ /* Initialization sequence from Lib_UTFT */
+diff --git a/drivers/staging/fbtft/fbtft-bus.c b/drivers/staging/fbtft/fbtft-bus.c
+index 2ea814d0dca5..63c65dd67b17 100644
+--- a/drivers/staging/fbtft/fbtft-bus.c
++++ b/drivers/staging/fbtft/fbtft-bus.c
+@@ -135,7 +135,7 @@ int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len)
+ remain = len / 2;
+ vmem16 = (u16 *)(par->info->screen_buffer + offset);
+
+- if (!par->gpio.dc)
++ if (par->gpio.dc)
+ gpiod_set_value(par->gpio.dc, 1);
+
+ /* non buffered write */
+diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
+index bc750250ccd6..5127de922f6a 100644
+--- a/drivers/staging/fbtft/fbtft-core.c
++++ b/drivers/staging/fbtft/fbtft-core.c
+@@ -916,7 +916,7 @@ static int fbtft_init_display_dt(struct fbtft_par *par)
+ return -EINVAL;
+
+ par->fbtftops.reset(par);
+- if (!par->gpio.cs)
++ if (par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
+
+ while (p) {
+@@ -1007,7 +1007,7 @@ int fbtft_init_display(struct fbtft_par *par)
+ }
+
+ par->fbtftops.reset(par);
+- if (!par->gpio.cs)
++ if (par->gpio.cs)
+ gpiod_set_value(par->gpio.cs, 0); /* Activate chip */
+
+ i = 0;
+diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
+index a47c541f8006..ba30f4f33e7c 100644
+--- a/fs/ceph/addr.c
++++ b/fs/ceph/addr.c
+@@ -912,8 +912,9 @@ get_more_pages:
+ if (page_offset(page) >= ceph_wbc.i_size) {
+ dout("%p page eof %llu\n",
+ page, ceph_wbc.i_size);
+- if (ceph_wbc.size_stable ||
+- page_offset(page) >= i_size_read(inode))
++ if ((ceph_wbc.size_stable ||
++ page_offset(page) >= i_size_read(inode)) &&
++ clear_page_dirty_for_io(page))
+ mapping->a_ops->invalidatepage(page,
+ 0, PAGE_SIZE);
+ unlock_page(page);
+diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
+index ac9b53b89365..5083e238ad15 100644
+--- a/fs/ceph/locks.c
++++ b/fs/ceph/locks.c
+@@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
+ req->r_wait_for_completion = ceph_lock_wait_for_completion;
+
+ err = ceph_mdsc_do_request(mdsc, inode, req);
+-
+- if (operation == CEPH_MDS_OP_GETFILELOCK) {
++ if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
+ fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
+ if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
+ fl->fl_type = F_RDLCK;
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 2ec37dc589a7..42de31d20616 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -3439,7 +3439,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
+ static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
+ unsigned int buflen)
+ {
+- sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
++ void *addr;
++ /*
++ * VMAP_STACK (at least) puts stack into the vmalloc address space
++ */
++ if (is_vmalloc_addr(buf))
++ addr = vmalloc_to_page(buf);
++ else
++ addr = virt_to_page(buf);
++ sg_set_page(sg, addr, buflen, offset_in_page(buf));
+ }
+
+ /* Assumes the first rqst has a transform header as the first iov.
+@@ -4015,7 +4023,6 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
+ {
+ int ret, length;
+ char *buf = server->smallbuf;
+- char *tmpbuf;
+ struct smb2_sync_hdr *shdr;
+ unsigned int pdu_length = server->pdu_size;
+ unsigned int buf_size;
+@@ -4045,18 +4052,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
+ return length;
+
+ next_is_large = server->large_buf;
+- one_more:
++one_more:
+ shdr = (struct smb2_sync_hdr *)buf;
+ if (shdr->NextCommand) {
+- if (next_is_large) {
+- tmpbuf = server->bigbuf;
++ if (next_is_large)
+ next_buffer = (char *)cifs_buf_get();
+- } else {
+- tmpbuf = server->smallbuf;
++ else
+ next_buffer = (char *)cifs_small_buf_get();
+- }
+ memcpy(next_buffer,
+- tmpbuf + le32_to_cpu(shdr->NextCommand),
++ buf + le32_to_cpu(shdr->NextCommand),
+ pdu_length - le32_to_cpu(shdr->NextCommand));
+ }
+
+@@ -4085,12 +4089,21 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
+ pdu_length -= le32_to_cpu(shdr->NextCommand);
+ server->large_buf = next_is_large;
+ if (next_is_large)
+- server->bigbuf = next_buffer;
++ server->bigbuf = buf = next_buffer;
+ else
+- server->smallbuf = next_buffer;
+-
+- buf += le32_to_cpu(shdr->NextCommand);
++ server->smallbuf = buf = next_buffer;
+ goto one_more;
++ } else if (ret != 0) {
++ /*
++ * ret != 0 here means that we didn't get to handle_mid() thus
++ * server->smallbuf and server->bigbuf are still valid. We need
++ * to free next_buffer because it is not going to be used
++ * anywhere.
++ */
++ if (next_is_large)
++ free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
++ else
++ free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
+ }
+
+ return ret;
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 61018559e8fe..03cd8f5bba85 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -618,6 +618,13 @@ static void io_put_req(struct io_kiocb *req)
+ io_free_req(req);
+ }
+
++static unsigned io_cqring_events(struct io_cq_ring *ring)
++{
++ /* See comment at the top of this file */
++ smp_rmb();
++ return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
++}
++
+ /*
+ * Find and free completed poll iocbs
+ */
+@@ -709,7 +716,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
+ static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
+ long min)
+ {
+- while (!list_empty(&ctx->poll_list)) {
++ while (!list_empty(&ctx->poll_list) && !need_resched()) {
+ int ret;
+
+ ret = io_do_iopoll(ctx, nr_events, min);
+@@ -736,6 +743,12 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
+ unsigned int nr_events = 0;
+
+ io_iopoll_getevents(ctx, &nr_events, 1);
++
++ /*
++ * Ensure we allow local-to-the-cpu processing to take place,
++ * in this case we need to ensure that we reap all events.
++ */
++ cond_resched();
+ }
+ mutex_unlock(&ctx->uring_lock);
+ }
+@@ -743,11 +756,42 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
+ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+ long min)
+ {
+- int ret = 0;
++ int iters, ret = 0;
+
++ /*
++ * We disallow the app entering submit/complete with polling, but we
++ * still need to lock the ring to prevent racing with polled issue
++ * that got punted to a workqueue.
++ */
++ mutex_lock(&ctx->uring_lock);
++
++ iters = 0;
+ do {
+ int tmin = 0;
+
++ /*
++ * Don't enter poll loop if we already have events pending.
++ * If we do, we can potentially be spinning for commands that
++ * already triggered a CQE (eg in error).
++ */
++ if (io_cqring_events(ctx->cq_ring))
++ break;
++
++ /*
++ * If a submit got punted to a workqueue, we can have the
++ * application entering polling for a command before it gets
++ * issued. That app will hold the uring_lock for the duration
++ * of the poll right here, so we need to take a breather every
++ * now and then to ensure that the issue has a chance to add
++ * the poll to the issued list. Otherwise we can spin here
++ * forever, while the workqueue is stuck trying to acquire the
++ * very same mutex.
++ */
++ if (!(++iters & 7)) {
++ mutex_unlock(&ctx->uring_lock);
++ mutex_lock(&ctx->uring_lock);
++ }
++
+ if (*nr_events < min)
+ tmin = min - *nr_events;
+
+@@ -757,6 +801,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+ ret = 0;
+ } while (min && !*nr_events && !need_resched());
+
++ mutex_unlock(&ctx->uring_lock);
+ return ret;
+ }
+
+@@ -2073,15 +2118,7 @@ static int io_sq_thread(void *data)
+ unsigned nr_events = 0;
+
+ if (ctx->flags & IORING_SETUP_IOPOLL) {
+- /*
+- * We disallow the app entering submit/complete
+- * with polling, but we still need to lock the
+- * ring to prevent racing with polled issue
+- * that got punted to a workqueue.
+- */
+- mutex_lock(&ctx->uring_lock);
+ io_iopoll_check(ctx, &nr_events, 0);
+- mutex_unlock(&ctx->uring_lock);
+ } else {
+ /*
+ * Normal IO, just pretend everything completed.
+@@ -2216,13 +2253,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
+ return submit;
+ }
+
+-static unsigned io_cqring_events(struct io_cq_ring *ring)
+-{
+- /* See comment at the top of this file */
+- smp_rmb();
+- return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
+-}
+-
+ /*
+ * Wait until events become available, if we don't already have some. The
+ * application must reap them itself, as they reside on the shared cq ring.
+@@ -2978,9 +3008,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
+ min_complete = min(min_complete, ctx->cq_entries);
+
+ if (ctx->flags & IORING_SETUP_IOPOLL) {
+- mutex_lock(&ctx->uring_lock);
+ ret = io_iopoll_check(ctx, &nr_events, min_complete);
+- mutex_unlock(&ctx->uring_lock);
+ } else {
+ ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
+ }
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 0af854cce8ff..071b90a45933 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -1046,6 +1046,22 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp)
+ nfs4_schedule_state_manager(clp);
+ }
+
++static void
++nfs_delegation_test_free_expired(struct inode *inode,
++ nfs4_stateid *stateid,
++ const struct cred *cred)
++{
++ struct nfs_server *server = NFS_SERVER(inode);
++ const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
++ int status;
++
++ if (!cred)
++ return;
++ status = ops->test_and_free_expired(server, stateid, cred);
++ if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
++ nfs_remove_bad_delegation(inode, stateid);
++}
++
+ /**
+ * nfs_reap_expired_delegations - reap expired delegations
+ * @clp: nfs_client to process
+@@ -1057,7 +1073,6 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp)
+ */
+ void nfs_reap_expired_delegations(struct nfs_client *clp)
+ {
+- const struct nfs4_minor_version_ops *ops = clp->cl_mvops;
+ struct nfs_delegation *delegation;
+ struct nfs_server *server;
+ struct inode *inode;
+@@ -1088,11 +1103,7 @@ restart:
+ nfs4_stateid_copy(&stateid, &delegation->stateid);
+ clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
+ rcu_read_unlock();
+- if (cred != NULL &&
+- ops->test_and_free_expired(server, &stateid, cred) < 0) {
+- nfs_revoke_delegation(inode, &stateid);
+- nfs_inode_find_state_and_recover(inode, &stateid);
+- }
++ nfs_delegation_test_free_expired(inode, &stateid, cred);
+ put_cred(cred);
+ if (nfs4_server_rebooted(clp)) {
+ nfs_inode_mark_test_expired_delegation(server,inode);
+diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
+index 53507aa96b0b..3800ab6f08fa 100644
+--- a/fs/nfs/fscache.c
++++ b/fs/nfs/fscache.c
+@@ -114,6 +114,10 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
+ struct rb_node **p, *parent;
+ int diff;
+
++ nfss->fscache_key = NULL;
++ nfss->fscache = NULL;
++ if (!(nfss->options & NFS_OPTION_FSCACHE))
++ return;
+ if (!uniq) {
+ uniq = "";
+ ulen = 1;
+@@ -226,10 +230,11 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
+ void nfs_fscache_init_inode(struct inode *inode)
+ {
+ struct nfs_fscache_inode_auxdata auxdata;
++ struct nfs_server *nfss = NFS_SERVER(inode);
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ nfsi->fscache = NULL;
+- if (!S_ISREG(inode->i_mode))
++ if (!(nfss->fscache && S_ISREG(inode->i_mode)))
+ return;
+
+ memset(&auxdata, 0, sizeof(auxdata));
+diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
+index 25a75e40d91d..ad041cfbf9ec 100644
+--- a/fs/nfs/fscache.h
++++ b/fs/nfs/fscache.h
+@@ -182,7 +182,7 @@ static inline void nfs_fscache_wait_on_invalidate(struct inode *inode)
+ */
+ static inline const char *nfs_server_fscache_state(struct nfs_server *server)
+ {
+- if (server->fscache && (server->options & NFS_OPTION_FSCACHE))
++ if (server->fscache)
+ return "yes";
+ return "no ";
+ }
+diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
+index 8a38a254f516..235919156edd 100644
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -465,7 +465,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
+
+ extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, const struct cred *, gfp_t);
+ extern void nfs4_put_state_owner(struct nfs4_state_owner *);
+-extern void nfs4_purge_state_owners(struct nfs_server *);
++extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
++extern void nfs4_free_state_owners(struct list_head *head);
+ extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
+ extern void nfs4_put_open_state(struct nfs4_state *);
+ extern void nfs4_close_state(struct nfs4_state *, fmode_t);
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 81b9b6d7927a..208a236dc235 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -758,9 +758,12 @@ out:
+
+ static void nfs4_destroy_server(struct nfs_server *server)
+ {
++ LIST_HEAD(freeme);
++
+ nfs_server_return_all_delegations(server);
+ unset_pnfs_layoutdriver(server);
+- nfs4_purge_state_owners(server);
++ nfs4_purge_state_owners(server, &freeme);
++ nfs4_free_state_owners(&freeme);
+ }
+
+ /*
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 63edda145d1b..2023011c7a8f 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1654,6 +1654,14 @@ static void nfs_state_set_open_stateid(struct nfs4_state *state,
+ write_sequnlock(&state->seqlock);
+ }
+
++static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
++{
++ clear_bit(NFS_O_RDWR_STATE, &state->flags);
++ clear_bit(NFS_O_WRONLY_STATE, &state->flags);
++ clear_bit(NFS_O_RDONLY_STATE, &state->flags);
++ clear_bit(NFS_OPEN_STATE, &state->flags);
++}
++
+ static void nfs_state_set_delegation(struct nfs4_state *state,
+ const nfs4_stateid *deleg_stateid,
+ fmode_t fmode)
+@@ -2049,13 +2057,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
+ {
+ int ret;
+
+- /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
+- clear_bit(NFS_O_RDWR_STATE, &state->flags);
+- clear_bit(NFS_O_WRONLY_STATE, &state->flags);
+- clear_bit(NFS_O_RDONLY_STATE, &state->flags);
+ /* memory barrier prior to reading state->n_* */
+- clear_bit(NFS_DELEGATED_STATE, &state->flags);
+- clear_bit(NFS_OPEN_STATE, &state->flags);
+ smp_rmb();
+ ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
+ if (ret != 0)
+@@ -2131,6 +2133,8 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta
+ ctx = nfs4_state_find_open_context(state);
+ if (IS_ERR(ctx))
+ return -EAGAIN;
++ clear_bit(NFS_DELEGATED_STATE, &state->flags);
++ nfs_state_clear_open_state_flags(state);
+ ret = nfs4_do_open_reclaim(ctx, state);
+ put_nfs_open_context(ctx);
+ return ret;
+@@ -2146,6 +2150,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
+ case -ENOENT:
+ case -EAGAIN:
+ case -ESTALE:
++ case -ETIMEDOUT:
+ break;
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+@@ -2466,6 +2471,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data,
+ if (!ctx) {
+ nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
+ data->is_recover = true;
++ task_setup_data.flags |= RPC_TASK_TIMEOUT;
+ } else {
+ nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
+ pnfs_lgopen_prepare(data, ctx);
+@@ -2672,6 +2678,7 @@ static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
+ {
+ /* NFSv4.0 doesn't allow for delegation recovery on open expire */
+ nfs40_clear_delegation_stateid(state);
++ nfs_state_clear_open_state_flags(state);
+ return nfs4_open_expired(sp, state);
+ }
+
+@@ -2714,13 +2721,13 @@ out_free:
+ return -NFS4ERR_EXPIRED;
+ }
+
+-static void nfs41_check_delegation_stateid(struct nfs4_state *state)
++static int nfs41_check_delegation_stateid(struct nfs4_state *state)
+ {
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ nfs4_stateid stateid;
+ struct nfs_delegation *delegation;
+ const struct cred *cred = NULL;
+- int status;
++ int status, ret = NFS_OK;
+
+ /* Get the delegation credential for use by test/free_stateid */
+ rcu_read_lock();
+@@ -2728,20 +2735,15 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
+ if (delegation == NULL) {
+ rcu_read_unlock();
+ nfs_state_clear_delegation(state);
+- return;
++ return NFS_OK;
+ }
+
+ nfs4_stateid_copy(&stateid, &delegation->stateid);
+- if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+- rcu_read_unlock();
+- nfs_state_clear_delegation(state);
+- return;
+- }
+
+ if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
+ &delegation->flags)) {
+ rcu_read_unlock();
+- return;
++ return NFS_OK;
+ }
+
+ if (delegation->cred)
+@@ -2751,9 +2753,24 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
+ trace_nfs4_test_delegation_stateid(state, NULL, status);
+ if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
+ nfs_finish_clear_delegation_stateid(state, &stateid);
++ else
++ ret = status;
+
+- if (delegation->cred)
+- put_cred(cred);
++ put_cred(cred);
++ return ret;
++}
++
++static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
++{
++ nfs4_stateid tmp;
++
++ if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
++ nfs4_copy_delegation_stateid(state->inode, state->state,
++ &tmp, NULL) &&
++ nfs4_stateid_match_other(&state->stateid, &tmp))
++ nfs_state_set_delegation(state, &tmp, state->state);
++ else
++ nfs_state_clear_delegation(state);
+ }
+
+ /**
+@@ -2823,21 +2840,12 @@ static int nfs41_check_open_stateid(struct nfs4_state *state)
+ const struct cred *cred = state->owner->so_cred;
+ int status;
+
+- if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) {
+- if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) {
+- if (nfs4_have_delegation(state->inode, state->state))
+- return NFS_OK;
+- return -NFS4ERR_OPENMODE;
+- }
++ if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
+ return -NFS4ERR_BAD_STATEID;
+- }
+ status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
+ trace_nfs4_test_open_stateid(state, NULL, status);
+ if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
+- clear_bit(NFS_O_RDONLY_STATE, &state->flags);
+- clear_bit(NFS_O_WRONLY_STATE, &state->flags);
+- clear_bit(NFS_O_RDWR_STATE, &state->flags);
+- clear_bit(NFS_OPEN_STATE, &state->flags);
++ nfs_state_clear_open_state_flags(state);
+ stateid->type = NFS4_INVALID_STATEID_TYPE;
+ return status;
+ }
+@@ -2850,7 +2858,11 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
+ {
+ int status;
+
+- nfs41_check_delegation_stateid(state);
++ status = nfs41_check_delegation_stateid(state);
++ if (status != NFS_OK)
++ return status;
++ nfs41_delegation_recover_stateid(state);
++
+ status = nfs41_check_expired_locks(state);
+ if (status != NFS_OK)
+ return status;
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index e2e3c4f04d3e..0e69cd846afb 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -624,24 +624,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
+ /**
+ * nfs4_purge_state_owners - Release all cached state owners
+ * @server: nfs_server with cached state owners to release
++ * @head: resulting list of state owners
+ *
+ * Called at umount time. Remaining state owners will be on
+ * the LRU with ref count of zero.
++ * Note that the state owners are not freed, but are added
++ * to the list @head, which can later be used as an argument
++ * to nfs4_free_state_owners.
+ */
+-void nfs4_purge_state_owners(struct nfs_server *server)
++void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
+ {
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_state_owner *sp, *tmp;
+- LIST_HEAD(doomed);
+
+ spin_lock(&clp->cl_lock);
+ list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
+- list_move(&sp->so_lru, &doomed);
++ list_move(&sp->so_lru, head);
+ nfs4_remove_state_owner_locked(sp);
+ }
+ spin_unlock(&clp->cl_lock);
++}
+
+- list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
++/**
++ * nfs4_purge_state_owners - Release all cached state owners
++ * @head: resulting list of state owners
++ *
++ * Frees a list of state owners that was generated by
++ * nfs4_purge_state_owners
++ */
++void nfs4_free_state_owners(struct list_head *head)
++{
++ struct nfs4_state_owner *sp, *tmp;
++
++ list_for_each_entry_safe(sp, tmp, head, so_lru) {
+ list_del(&sp->so_lru);
+ nfs4_free_state_owner(sp);
+ }
+@@ -1513,6 +1528,7 @@ restart:
+ switch (status) {
+ case 0:
+ break;
++ case -ETIMEDOUT:
+ case -ESTALE:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+@@ -1606,6 +1622,7 @@ static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_st
+ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
+ {
+ struct nfs4_state *state;
++ unsigned int loop = 0;
+ int status = 0;
+
+ /* Note: we rely on the sp->so_states list being ordered
+@@ -1632,8 +1649,10 @@ restart:
+
+ switch (status) {
+ default:
+- if (status >= 0)
++ if (status >= 0) {
++ loop = 0;
+ break;
++ }
+ printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status);
+ /* Fall through */
+ case -ENOENT:
+@@ -1647,6 +1666,10 @@ restart:
+ break;
+ case -EAGAIN:
+ ssleep(1);
++ if (loop++ < 10) {
++ set_bit(ops->state_flag_bit, &state->flags);
++ break;
++ }
+ /* Fall through */
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+@@ -1659,11 +1682,13 @@ restart:
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_NO_GRACE:
+ nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
++ /* Fall through */
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
++ case -ETIMEDOUT:
+ goto out_err;
+ }
+ nfs4_put_open_state(state);
+@@ -1857,12 +1882,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
+ struct nfs4_state_owner *sp;
+ struct nfs_server *server;
+ struct rb_node *pos;
++ LIST_HEAD(freeme);
+ int status = 0;
+
+ restart:
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+- nfs4_purge_state_owners(server);
++ nfs4_purge_state_owners(server, &freeme);
+ spin_lock(&clp->cl_lock);
+ for (pos = rb_first(&server->state_owners);
+ pos != NULL;
+@@ -1891,6 +1917,7 @@ restart:
+ spin_unlock(&clp->cl_lock);
+ }
+ rcu_read_unlock();
++ nfs4_free_state_owners(&freeme);
+ return 0;
+ }
+
+@@ -1946,7 +1973,6 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
+ return -EPERM;
+ case -EACCES:
+ case -NFS4ERR_DELAY:
+- case -ETIMEDOUT:
+ case -EAGAIN:
+ ssleep(1);
+ break;
+@@ -2575,7 +2601,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ }
+
+ /* Now recover expired state... */
+- if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
++ if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
+ section = "reclaim nograce";
+ status = nfs4_do_reclaim(clp,
+ clp->cl_mvops->nograce_recovery_ops);
+@@ -2583,6 +2609,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
+ continue;
+ if (status < 0)
+ goto out_error;
++ clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
+ }
+
+ nfs4_end_drain_session(clp);
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index f88ddac2dcdf..4d375b517eda 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -2239,6 +2239,7 @@ nfs_compare_remount_data(struct nfs_server *nfss,
+ data->acdirmin != nfss->acdirmin / HZ ||
+ data->acdirmax != nfss->acdirmax / HZ ||
+ data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
++ (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
+ data->nfs_server.port != nfss->port ||
+ data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
+ !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
+diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
+index ccbdbd62f0d8..fe6d804a38dc 100644
+--- a/fs/userfaultfd.c
++++ b/fs/userfaultfd.c
+@@ -880,6 +880,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
+ /* len == 0 means wake all */
+ struct userfaultfd_wake_range range = { .len = 0, };
+ unsigned long new_flags;
++ bool still_valid;
+
+ WRITE_ONCE(ctx->released, true);
+
+@@ -895,8 +896,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
+ * taking the mmap_sem for writing.
+ */
+ down_write(&mm->mmap_sem);
+- if (!mmget_still_valid(mm))
+- goto skip_mm;
++ still_valid = mmget_still_valid(mm);
+ prev = NULL;
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ cond_resched();
+@@ -907,19 +907,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
+ continue;
+ }
+ new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
+- prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
+- new_flags, vma->anon_vma,
+- vma->vm_file, vma->vm_pgoff,
+- vma_policy(vma),
+- NULL_VM_UFFD_CTX);
+- if (prev)
+- vma = prev;
+- else
+- prev = vma;
++ if (still_valid) {
++ prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
++ new_flags, vma->anon_vma,
++ vma->vm_file, vma->vm_pgoff,
++ vma_policy(vma),
++ NULL_VM_UFFD_CTX);
++ if (prev)
++ vma = prev;
++ else
++ prev = vma;
++ }
+ vma->vm_flags = new_flags;
+ vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+ }
+-skip_mm:
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ wakeup:
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index 74047bd0c1ae..e427ad097e2e 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -803,6 +803,7 @@ xfs_setattr_nonsize(
+
+ out_cancel:
+ xfs_trans_cancel(tp);
++ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ out_dqrele:
+ xfs_qm_dqrele(udqp);
+ xfs_qm_dqrele(gdqp);
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index 8fb5be3ca0ca..8b13bd05befa 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -7254,6 +7254,21 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *req,
+ gfp_t gfp);
+
++/**
++ * cfg80211_iftype_allowed - check whether the interface can be allowed
++ * @wiphy: the wiphy
++ * @iftype: interface type
++ * @is_4addr: use_4addr flag, must be '0' when check_swif is '1'
++ * @check_swif: check iftype against software interfaces
++ *
++ * Check whether the interface is allowed to operate; additionally, this API
++ * can be used to check iftype against the software interfaces when
++ * check_swif is '1'.
++ */
++bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
++ bool is_4addr, u8 check_swif);
++
++
+ /* Logging, debugging and troubleshooting/diagnostic helpers. */
+
+ /* wiphy_printk helpers, similar to dev_printk */
+diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
+index 3429888347e7..b3609e4c46e0 100644
+--- a/include/sound/simple_card_utils.h
++++ b/include/sound/simple_card_utils.h
+@@ -149,6 +149,10 @@ inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
+ {
+ struct device *dev = simple_priv_to_dev(priv);
+
++ /* dai might be NULL */
++ if (!dai)
++ return;
++
+ if (dai->name)
+ dev_dbg(dev, "%s dai name = %s\n",
+ name, dai->name);
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index cc1d060cbf13..fa06b528c73c 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -498,10 +498,10 @@ rxrpc_tx_points;
+ #define E_(a, b) { a, b }
+
+ TRACE_EVENT(rxrpc_local,
+- TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op,
++ TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
+ int usage, const void *where),
+
+- TP_ARGS(local, op, usage, where),
++ TP_ARGS(local_debug_id, op, usage, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, local )
+@@ -511,7 +511,7 @@ TRACE_EVENT(rxrpc_local,
+ ),
+
+ TP_fast_assign(
+- __entry->local = local->debug_id;
++ __entry->local = local_debug_id;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+diff --git a/include/uapi/sound/sof/fw.h b/include/uapi/sound/sof/fw.h
+index 1afca973eb09..e9f697467a86 100644
+--- a/include/uapi/sound/sof/fw.h
++++ b/include/uapi/sound/sof/fw.h
+@@ -13,6 +13,8 @@
+ #ifndef __INCLUDE_UAPI_SOF_FW_H__
+ #define __INCLUDE_UAPI_SOF_FW_H__
+
++#include <linux/types.h>
++
+ #define SND_SOF_FW_SIG_SIZE 4
+ #define SND_SOF_FW_ABI 1
+ #define SND_SOF_FW_SIG "Reef"
+@@ -46,8 +48,8 @@ enum snd_sof_fw_blk_type {
+
+ struct snd_sof_blk_hdr {
+ enum snd_sof_fw_blk_type type;
+- uint32_t size; /* bytes minus this header */
+- uint32_t offset; /* offset from base */
++ __u32 size; /* bytes minus this header */
++ __u32 offset; /* offset from base */
+ } __packed;
+
+ /*
+@@ -61,8 +63,8 @@ enum snd_sof_fw_mod_type {
+
+ struct snd_sof_mod_hdr {
+ enum snd_sof_fw_mod_type type;
+- uint32_t size; /* bytes minus this header */
+- uint32_t num_blocks; /* number of blocks */
++ __u32 size; /* bytes minus this header */
++ __u32 num_blocks; /* number of blocks */
+ } __packed;
+
+ /*
+@@ -70,9 +72,9 @@ struct snd_sof_mod_hdr {
+ */
+ struct snd_sof_fw_header {
+ unsigned char sig[SND_SOF_FW_SIG_SIZE]; /* "Reef" */
+- uint32_t file_size; /* size of file minus this header */
+- uint32_t num_modules; /* number of modules */
+- uint32_t abi; /* version of header format */
++ __u32 file_size; /* size of file minus this header */
++ __u32 num_modules; /* number of modules */
++ __u32 abi; /* version of header format */
+ } __packed;
+
+ #endif
+diff --git a/include/uapi/sound/sof/header.h b/include/uapi/sound/sof/header.h
+index 7868990b0d6f..5f4518e7a972 100644
+--- a/include/uapi/sound/sof/header.h
++++ b/include/uapi/sound/sof/header.h
+@@ -9,6 +9,8 @@
+ #ifndef __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__
+ #define __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__
+
++#include <linux/types.h>
++
+ /*
+ * Header for all non IPC ABI data.
+ *
+@@ -16,12 +18,12 @@
+ * Used by any bespoke component data structures or binary blobs.
+ */
+ struct sof_abi_hdr {
+- uint32_t magic; /**< 'S', 'O', 'F', '\0' */
+- uint32_t type; /**< component specific type */
+- uint32_t size; /**< size in bytes of data excl. this struct */
+- uint32_t abi; /**< SOF ABI version */
+- uint32_t reserved[4]; /**< reserved for future use */
+- uint32_t data[0]; /**< Component data - opaque to core */
++ __u32 magic; /**< 'S', 'O', 'F', '\0' */
++ __u32 type; /**< component specific type */
++ __u32 size; /**< size in bytes of data excl. this struct */
++ __u32 abi; /**< SOF ABI version */
++ __u32 reserved[4]; /**< reserved for future use */
++ __u32 data[0]; /**< Component data - opaque to core */
+ } __packed;
+
+ #endif
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index 9484e88dabc2..9be995fc3c5a 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -295,6 +295,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
+ }
+ }
+
++static void irq_sysfs_del(struct irq_desc *desc)
++{
++ /*
++ * If irq_sysfs_init() has not yet been invoked (early boot), then
++ * irq_kobj_base is NULL and the descriptor was never added.
++ * kobject_del() complains about a object with no parent, so make
++ * it conditional.
++ */
++ if (irq_kobj_base)
++ kobject_del(&desc->kobj);
++}
++
+ static int __init irq_sysfs_init(void)
+ {
+ struct irq_desc *desc;
+@@ -325,6 +337,7 @@ static struct kobj_type irq_kobj_type = {
+ };
+
+ static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
++static void irq_sysfs_del(struct irq_desc *desc) {}
+
+ #endif /* CONFIG_SYSFS */
+
+@@ -438,7 +451,7 @@ static void free_desc(unsigned int irq)
+ * The sysfs entry must be serialized against a concurrent
+ * irq_sysfs_init() as well.
+ */
+- kobject_del(&desc->kobj);
++ irq_sysfs_del(desc);
+ delete_irq_desc(irq);
+
+ /*
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 43901fa3f269..1c66480afda8 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -2088,17 +2088,13 @@ retry:
+ }
+
+ deactivate_task(rq, next_task, 0);
+- sub_running_bw(&next_task->dl, &rq->dl);
+- sub_rq_bw(&next_task->dl, &rq->dl);
+ set_task_cpu(next_task, later_rq->cpu);
+- add_rq_bw(&next_task->dl, &later_rq->dl);
+
+ /*
+ * Update the later_rq clock here, because the clock is used
+ * by the cpufreq_update_util() inside __add_running_bw().
+ */
+ update_rq_clock(later_rq);
+- add_running_bw(&next_task->dl, &later_rq->dl);
+ activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
+ ret = 1;
+
+@@ -2186,11 +2182,7 @@ static void pull_dl_task(struct rq *this_rq)
+ resched = true;
+
+ deactivate_task(src_rq, p, 0);
+- sub_running_bw(&p->dl, &src_rq->dl);
+- sub_rq_bw(&p->dl, &src_rq->dl);
+ set_task_cpu(p, this_cpu);
+- add_rq_bw(&p->dl, &this_rq->dl);
+- add_running_bw(&p->dl, &this_rq->dl);
+ activate_task(this_rq, p, 0);
+ dmin = p->dl.deadline;
+
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index 7acc632c3b82..6e52b67b420e 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -1051,7 +1051,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
+
+ if (!rcu_access_pointer(group->poll_kworker)) {
+ struct sched_param param = {
+- .sched_priority = MAX_RT_PRIO - 1,
++ .sched_priority = 1,
+ };
+ struct kthread_worker *kworker;
+
+@@ -1061,7 +1061,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
+ mutex_unlock(&group->trigger_lock);
+ return ERR_CAST(kworker);
+ }
+- sched_setscheduler(kworker->task, SCHED_FIFO, ¶m);
++ sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m);
+ kthread_init_delayed_work(&group->poll_work,
+ psi_poll_work);
+ rcu_assign_pointer(group->poll_kworker, kworker);
+@@ -1131,7 +1131,15 @@ static void psi_trigger_destroy(struct kref *ref)
+ * deadlock while waiting for psi_poll_work to acquire trigger_lock
+ */
+ if (kworker_to_destroy) {
++ /*
++ * After the RCU grace period has expired, the worker
++ * can no longer be found through group->poll_kworker.
++ * But it might have been already scheduled before
++ * that - deschedule it cleanly before destroying it.
++ */
+ kthread_cancel_delayed_work_sync(&group->poll_work);
++ atomic_set(&group->poll_scheduled, 0);
++
+ kthread_destroy_worker(kworker_to_destroy);
+ }
+ kfree(t);
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 885642c82aaa..83236e22a8a8 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -32,6 +32,7 @@
+ #include <linux/shmem_fs.h>
+ #include <linux/oom.h>
+ #include <linux/numa.h>
++#include <linux/page_owner.h>
+
+ #include <asm/tlb.h>
+ #include <asm/pgalloc.h>
+@@ -2500,6 +2501,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
+ }
+
+ ClearPageCompound(head);
++
++ split_page_owner(head, HPAGE_PMD_ORDER);
++
+ /* See comment in __split_huge_page_tail() */
+ if (PageAnon(head)) {
+ /* Additional pin to swap cache */
+diff --git a/mm/kasan/common.c b/mm/kasan/common.c
+index 242fdc01aaa9..874d75bb65eb 100644
+--- a/mm/kasan/common.c
++++ b/mm/kasan/common.c
+@@ -409,8 +409,14 @@ static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ return shadow_byte < 0 ||
+ shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
+- else
+- return tag != (u8)shadow_byte;
++
++ /* else CONFIG_KASAN_SW_TAGS: */
++ if ((u8)shadow_byte == KASAN_TAG_INVALID)
++ return true;
++ if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
++ return true;
++
++ return false;
+ }
+
+ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 8f5dabfaf94d..bb783c27ba21 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3150,6 +3150,60 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
+ }
+ }
+
++static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
++{
++ unsigned long stat[MEMCG_NR_STAT];
++ struct mem_cgroup *mi;
++ int node, cpu, i;
++
++ for (i = 0; i < MEMCG_NR_STAT; i++)
++ stat[i] = 0;
++
++ for_each_online_cpu(cpu)
++ for (i = 0; i < MEMCG_NR_STAT; i++)
++ stat[i] += raw_cpu_read(memcg->vmstats_percpu->stat[i]);
++
++ for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
++ for (i = 0; i < MEMCG_NR_STAT; i++)
++ atomic_long_add(stat[i], &mi->vmstats[i]);
++
++ for_each_node(node) {
++ struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
++ struct mem_cgroup_per_node *pi;
++
++ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
++ stat[i] = 0;
++
++ for_each_online_cpu(cpu)
++ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
++ stat[i] += raw_cpu_read(
++ pn->lruvec_stat_cpu->count[i]);
++
++ for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
++ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
++ atomic_long_add(stat[i], &pi->lruvec_stat[i]);
++ }
++}
++
++static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
++{
++ unsigned long events[NR_VM_EVENT_ITEMS];
++ struct mem_cgroup *mi;
++ int cpu, i;
++
++ for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
++ events[i] = 0;
++
++ for_each_online_cpu(cpu)
++ for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
++ events[i] += raw_cpu_read(
++ memcg->vmstats_percpu->events[i]);
++
++ for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
++ for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
++ atomic_long_add(events[i], &mi->vmevents[i]);
++}
++
+ #ifdef CONFIG_MEMCG_KMEM
+ static int memcg_online_kmem(struct mem_cgroup *memcg)
+ {
+@@ -4551,6 +4605,12 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
+ {
+ int node;
+
++ /*
++ * Flush percpu vmstats and vmevents to guarantee the value correctness
++ * on parent's and all ancestor levels.
++ */
++ memcg_flush_percpu_vmstats(memcg);
++ memcg_flush_percpu_vmevents(memcg);
+ for_each_node(node)
+ free_mem_cgroup_per_node_info(memcg, node);
+ free_percpu(memcg->vmstats_percpu);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 8e3bc949ebcc..81177ed87a38 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2167,27 +2167,12 @@ static int move_freepages(struct zone *zone,
+ unsigned int order;
+ int pages_moved = 0;
+
+-#ifndef CONFIG_HOLES_IN_ZONE
+- /*
+- * page_zone is not safe to call in this context when
+- * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
+- * anyway as we check zone boundaries in move_freepages_block().
+- * Remove at a later date when no bug reports exist related to
+- * grouping pages by mobility
+- */
+- VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
+- pfn_valid(page_to_pfn(end_page)) &&
+- page_zone(start_page) != page_zone(end_page));
+-#endif
+ for (page = start_page; page <= end_page;) {
+ if (!pfn_valid_within(page_to_pfn(page))) {
+ page++;
+ continue;
+ }
+
+- /* Make sure we are not inadvertently changing nodes */
+- VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
+-
+ if (!PageBuddy(page)) {
+ /*
+ * We assume that pages that could be isolated for
+@@ -2202,6 +2187,10 @@ static int move_freepages(struct zone *zone,
+ continue;
+ }
+
++ /* Make sure we are not inadvertently changing nodes */
++ VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
++ VM_BUG_ON_PAGE(page_zone(page) != zone, page);
++
+ order = page_order(page);
+ move_to_free_area(page, &zone->free_area[order], migratetype);
+ page += 1 << order;
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index c4debbe683eb..46686d0e3df8 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -41,6 +41,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/wait.h>
+ #include <linux/zpool.h>
+
+ /*
+@@ -144,6 +145,8 @@ struct z3fold_header {
+ * @release_wq: workqueue for safe page release
+ * @work: work_struct for safe page release
+ * @inode: inode for z3fold pseudo filesystem
++ * @destroying: bool to stop migration once we start destruction
++ * @isolated: int to count the number of pages currently in isolation
+ *
+ * This structure is allocated at pool creation time and maintains metadata
+ * pertaining to a particular z3fold pool.
+@@ -162,8 +165,11 @@ struct z3fold_pool {
+ const struct zpool_ops *zpool_ops;
+ struct workqueue_struct *compact_wq;
+ struct workqueue_struct *release_wq;
++ struct wait_queue_head isolate_wait;
+ struct work_struct work;
+ struct inode *inode;
++ bool destroying;
++ int isolated;
+ };
+
+ /*
+@@ -771,6 +777,7 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
+ goto out_c;
+ spin_lock_init(&pool->lock);
+ spin_lock_init(&pool->stale_lock);
++ init_waitqueue_head(&pool->isolate_wait);
+ pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
+ if (!pool->unbuddied)
+ goto out_pool;
+@@ -810,6 +817,15 @@ out:
+ return NULL;
+ }
+
++static bool pool_isolated_are_drained(struct z3fold_pool *pool)
++{
++ bool ret;
++
++ spin_lock(&pool->lock);
++ ret = pool->isolated == 0;
++ spin_unlock(&pool->lock);
++ return ret;
++}
+ /**
+ * z3fold_destroy_pool() - destroys an existing z3fold pool
+ * @pool: the z3fold pool to be destroyed
+@@ -819,6 +835,22 @@ out:
+ static void z3fold_destroy_pool(struct z3fold_pool *pool)
+ {
+ kmem_cache_destroy(pool->c_handle);
++ /*
++ * We set pool-> destroying under lock to ensure that
++ * z3fold_page_isolate() sees any changes to destroying. This way we
++ * avoid the need for any memory barriers.
++ */
++
++ spin_lock(&pool->lock);
++ pool->destroying = true;
++ spin_unlock(&pool->lock);
++
++ /*
++ * We need to ensure that no pages are being migrated while we destroy
++ * these workqueues, as migration can queue work on either of the
++ * workqueues.
++ */
++ wait_event(pool->isolate_wait, !pool_isolated_are_drained(pool));
+
+ /*
+ * We need to destroy pool->compact_wq before pool->release_wq,
+@@ -1309,6 +1341,28 @@ static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
+ return atomic64_read(&pool->pages_nr);
+ }
+
++/*
++ * z3fold_dec_isolated() expects to be called while pool->lock is held.
++ */
++static void z3fold_dec_isolated(struct z3fold_pool *pool)
++{
++ assert_spin_locked(&pool->lock);
++ VM_BUG_ON(pool->isolated <= 0);
++ pool->isolated--;
++
++ /*
++ * If we have no more isolated pages, we have to see if
++ * z3fold_destroy_pool() is waiting for a signal.
++ */
++ if (pool->isolated == 0 && waitqueue_active(&pool->isolate_wait))
++ wake_up_all(&pool->isolate_wait);
++}
++
++static void z3fold_inc_isolated(struct z3fold_pool *pool)
++{
++ pool->isolated++;
++}
++
+ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
+ {
+ struct z3fold_header *zhdr;
+@@ -1335,6 +1389,33 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
+ spin_lock(&pool->lock);
+ if (!list_empty(&page->lru))
+ list_del(&page->lru);
++ /*
++ * We need to check for destruction while holding pool->lock, as
++ * otherwise destruction could see 0 isolated pages, and
++ * proceed.
++ */
++ if (unlikely(pool->destroying)) {
++ spin_unlock(&pool->lock);
++ /*
++ * If this page isn't stale, somebody else holds a
++ * reference to it. Let't drop our refcount so that they
++ * can call the release logic.
++ */
++ if (unlikely(kref_put(&zhdr->refcount,
++ release_z3fold_page_locked))) {
++ /*
++ * If we get here we have kref problems, so we
++ * should freak out.
++ */
++ WARN(1, "Z3fold is experiencing kref problems\n");
++ return false;
++ }
++ z3fold_page_unlock(zhdr);
++ return false;
++ }
++
++
++ z3fold_inc_isolated(pool);
+ spin_unlock(&pool->lock);
+ z3fold_page_unlock(zhdr);
+ return true;
+@@ -1408,6 +1489,10 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
+
+ queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
+
++ spin_lock(&pool->lock);
++ z3fold_dec_isolated(pool);
++ spin_unlock(&pool->lock);
++
+ page_mapcount_reset(page);
+ unlock_page(page);
+ put_page(page);
+@@ -1428,10 +1513,14 @@ static void z3fold_page_putback(struct page *page)
+ INIT_LIST_HEAD(&page->lru);
+ if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
+ atomic64_dec(&pool->pages_nr);
++ spin_lock(&pool->lock);
++ z3fold_dec_isolated(pool);
++ spin_unlock(&pool->lock);
+ return;
+ }
+ spin_lock(&pool->lock);
+ list_add(&page->lru, &pool->lru);
++ z3fold_dec_isolated(pool);
+ spin_unlock(&pool->lock);
+ z3fold_page_unlock(zhdr);
+ }
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index 0787d33b80d8..515b00801af2 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -53,6 +53,7 @@
+ #include <linux/zpool.h>
+ #include <linux/mount.h>
+ #include <linux/migrate.h>
++#include <linux/wait.h>
+ #include <linux/pagemap.h>
+ #include <linux/fs.h>
+
+@@ -267,6 +268,10 @@ struct zs_pool {
+ #ifdef CONFIG_COMPACTION
+ struct inode *inode;
+ struct work_struct free_work;
++ /* A wait queue for when migration races with async_free_zspage() */
++ struct wait_queue_head migration_wait;
++ atomic_long_t isolated_pages;
++ bool destroying;
+ #endif
+ };
+
+@@ -1882,6 +1887,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
+ zspage->isolated--;
+ }
+
++static void putback_zspage_deferred(struct zs_pool *pool,
++ struct size_class *class,
++ struct zspage *zspage)
++{
++ enum fullness_group fg;
++
++ fg = putback_zspage(class, zspage);
++ if (fg == ZS_EMPTY)
++ schedule_work(&pool->free_work);
++
++}
++
++static inline void zs_pool_dec_isolated(struct zs_pool *pool)
++{
++ VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
++ atomic_long_dec(&pool->isolated_pages);
++ /*
++ * There's no possibility of racing, since wait_for_isolated_drain()
++ * checks the isolated count under &class->lock after enqueuing
++ * on migration_wait.
++ */
++ if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
++ wake_up_all(&pool->migration_wait);
++}
++
+ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
+ struct page *newpage, struct page *oldpage)
+ {
+@@ -1951,6 +1981,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
+ */
+ if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
+ get_zspage_mapping(zspage, &class_idx, &fullness);
++ atomic_long_inc(&pool->isolated_pages);
+ remove_zspage(class, zspage, fullness);
+ }
+
+@@ -2050,8 +2081,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
+ * Page migration is done so let's putback isolated zspage to
+ * the list if @page is final isolated subpage in the zspage.
+ */
+- if (!is_zspage_isolated(zspage))
+- putback_zspage(class, zspage);
++ if (!is_zspage_isolated(zspage)) {
++ /*
++ * We cannot race with zs_destroy_pool() here because we wait
++ * for isolation to hit zero before we start destroying.
++ * Also, we ensure that everyone can see pool->destroying before
++ * we start waiting.
++ */
++ putback_zspage_deferred(pool, class, zspage);
++ zs_pool_dec_isolated(pool);
++ }
+
+ reset_page(page);
+ put_page(page);
+@@ -2097,13 +2136,12 @@ static void zs_page_putback(struct page *page)
+ spin_lock(&class->lock);
+ dec_zspage_isolation(zspage);
+ if (!is_zspage_isolated(zspage)) {
+- fg = putback_zspage(class, zspage);
+ /*
+ * Due to page_lock, we cannot free zspage immediately
+ * so let's defer.
+ */
+- if (fg == ZS_EMPTY)
+- schedule_work(&pool->free_work);
++ putback_zspage_deferred(pool, class, zspage);
++ zs_pool_dec_isolated(pool);
+ }
+ spin_unlock(&class->lock);
+ }
+@@ -2127,8 +2165,36 @@ static int zs_register_migration(struct zs_pool *pool)
+ return 0;
+ }
+
++static bool pool_isolated_are_drained(struct zs_pool *pool)
++{
++ return atomic_long_read(&pool->isolated_pages) == 0;
++}
++
++/* Function for resolving migration */
++static void wait_for_isolated_drain(struct zs_pool *pool)
++{
++
++ /*
++ * We're in the process of destroying the pool, so there are no
++ * active allocations. zs_page_isolate() fails for completely free
++ * zspages, so we need only wait for the zs_pool's isolated
++ * count to hit zero.
++ */
++ wait_event(pool->migration_wait,
++ pool_isolated_are_drained(pool));
++}
++
+ static void zs_unregister_migration(struct zs_pool *pool)
+ {
++ pool->destroying = true;
++ /*
++ * We need a memory barrier here to ensure global visibility of
++ * pool->destroying. Thus pool->isolated pages will either be 0 in which
++ * case we don't care, or it will be > 0 and pool->destroying will
++ * ensure that we wake up once isolation hits 0.
++ */
++ smp_mb();
++ wait_for_isolated_drain(pool); /* This can block */
+ flush_work(&pool->free_work);
+ iput(pool->inode);
+ }
+@@ -2366,6 +2432,8 @@ struct zs_pool *zs_create_pool(const char *name)
+ if (!pool->name)
+ goto err;
+
++ init_waitqueue_head(&pool->migration_wait);
++
+ if (create_cache(pool))
+ goto err;
+
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 1fa9ac483173..c8177a89f52c 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -2267,8 +2267,10 @@ static int compat_do_replace(struct net *net, void __user *user,
+ state.buf_kern_len = size64;
+
+ ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
+- if (WARN_ON(ret < 0))
++ if (WARN_ON(ret < 0)) {
++ vfree(entries_tmp);
+ goto out_unlock;
++ }
+
+ vfree(entries_tmp);
+ tmp.entries_size = size64;
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 5275ddf580bc..72711053ebe6 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -1046,32 +1046,50 @@ static __init int cgw_module_init(void)
+ pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n",
+ max_hops);
+
+- register_pernet_subsys(&cangw_pernet_ops);
++ ret = register_pernet_subsys(&cangw_pernet_ops);
++ if (ret)
++ return ret;
++
++ ret = -ENOMEM;
+ cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
+ 0, 0, NULL);
+-
+ if (!cgw_cache)
+- return -ENOMEM;
++ goto out_cache_create;
+
+ /* set notifier */
+ notifier.notifier_call = cgw_notifier;
+- register_netdevice_notifier(¬ifier);
++ ret = register_netdevice_notifier(¬ifier);
++ if (ret)
++ goto out_register_notifier;
+
+ ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
+ NULL, cgw_dump_jobs, 0);
+- if (ret) {
+- unregister_netdevice_notifier(¬ifier);
+- kmem_cache_destroy(cgw_cache);
+- return -ENOBUFS;
+- }
+-
+- /* Only the first call to rtnl_register_module can fail */
+- rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
+- cgw_create_job, NULL, 0);
+- rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
+- cgw_remove_job, NULL, 0);
++ if (ret)
++ goto out_rtnl_register1;
++
++ ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
++ cgw_create_job, NULL, 0);
++ if (ret)
++ goto out_rtnl_register2;
++ ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
++ cgw_remove_job, NULL, 0);
++ if (ret)
++ goto out_rtnl_register3;
+
+ return 0;
++
++out_rtnl_register3:
++ rtnl_unregister(PF_CAN, RTM_NEWROUTE);
++out_rtnl_register2:
++ rtnl_unregister(PF_CAN, RTM_GETROUTE);
++out_rtnl_register1:
++ unregister_netdevice_notifier(¬ifier);
++out_register_notifier:
++ kmem_cache_destroy(cgw_cache);
++out_cache_create:
++ unregister_pernet_subsys(&cangw_pernet_ops);
++
++ return ret;
+ }
+
+ static __exit void cgw_module_exit(void)
+diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
+index 9a8eca5eda65..565ea889673c 100644
+--- a/net/ceph/osd_client.c
++++ b/net/ceph/osd_client.c
+@@ -1504,7 +1504,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
+ struct ceph_osds up, acting;
+ bool force_resend = false;
+ bool unpaused = false;
+- bool legacy_change;
++ bool legacy_change = false;
+ bool split = false;
+ bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
+ bool recovery_deletes = ceph_osdmap_flag(osdc,
+@@ -1592,15 +1592,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
+ t->osd = acting.primary;
+ }
+
+- if (unpaused || legacy_change || force_resend ||
+- (split && con && CEPH_HAVE_FEATURE(con->peer_features,
+- RESEND_ON_SPLIT)))
++ if (unpaused || legacy_change || force_resend || split)
+ ct_res = CALC_TARGET_NEED_RESEND;
+ else
+ ct_res = CALC_TARGET_NO_ACTION;
+
+ out:
+- dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
++ dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
++ legacy_change, force_resend, split, ct_res, t->osd);
+ return ct_res;
+ }
+
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index be6092ac69f8..8a4a45e7c29d 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -252,6 +252,8 @@ static void sock_map_free(struct bpf_map *map)
+ raw_spin_unlock_bh(&stab->lock);
+ rcu_read_unlock();
+
++ synchronize_rcu();
++
+ bpf_map_area_free(stab->sks);
+ kfree(stab);
+ }
+@@ -281,16 +283,20 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
+ struct sock **psk)
+ {
+ struct sock *sk;
++ int err = 0;
+
+ raw_spin_lock_bh(&stab->lock);
+ sk = *psk;
+ if (!sk_test || sk_test == sk)
+- *psk = NULL;
++ sk = xchg(psk, NULL);
++
++ if (likely(sk))
++ sock_map_unref(sk, psk);
++ else
++ err = -EINVAL;
++
+ raw_spin_unlock_bh(&stab->lock);
+- if (unlikely(!sk))
+- return -EINVAL;
+- sock_map_unref(sk, psk);
+- return 0;
++ return err;
+ }
+
+ static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
+@@ -333,6 +339,7 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
+ struct sock *sk, u64 flags)
+ {
+ struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
++ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct sk_psock_link *link;
+ struct sk_psock *psock;
+ struct sock *osk;
+@@ -343,6 +350,8 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
+ return -EINVAL;
+ if (unlikely(idx >= map->max_entries))
+ return -E2BIG;
++ if (unlikely(icsk->icsk_ulp_data))
++ return -EINVAL;
+
+ link = sk_psock_init_link();
+ if (!link)
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 1b224fa27367..ad1e58184c4e 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -3796,9 +3796,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
+ }
+
+ /* Always allow software iftypes */
+- if (local->hw.wiphy->software_iftypes & BIT(iftype) ||
+- (iftype == NL80211_IFTYPE_AP_VLAN &&
+- local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) {
++ if (cfg80211_iftype_allowed(local->hw.wiphy, iftype, 0, 1)) {
+ if (radar_detect)
+ return -EINVAL;
+ return 0;
+@@ -3833,7 +3831,8 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
+
+ if (sdata_iter == sdata ||
+ !ieee80211_sdata_running(sdata_iter) ||
+- local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
++ cfg80211_iftype_allowed(local->hw.wiphy,
++ wdev_iter->iftype, 0, 1))
+ continue;
+
+ params.iftype_num[wdev_iter->iftype]++;
+diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+index b73c37b3a791..cfe7b556775f 100644
+--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
++++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+@@ -227,7 +227,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
+
+ e.id = ip_to_id(map, ip);
+
+- if (opt->flags & IPSET_DIM_ONE_SRC)
++ if (opt->flags & IPSET_DIM_TWO_SRC)
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+ else
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 16afa0df4004..e103c875383a 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1161,7 +1161,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
+ return -ENOENT;
+
+ write_lock_bh(&ip_set_ref_lock);
+- if (set->ref != 0) {
++ if (set->ref != 0 || set->ref_netlink != 0) {
+ ret = -IPSET_ERR_REFERENCED;
+ goto out;
+ }
+diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
+index faf59b6a998f..24d8f4df4230 100644
+--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
++++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
+@@ -89,15 +89,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+- /* MAC can be src only */
+- if (!(opt->flags & IPSET_DIM_TWO_SRC))
+- return 0;
+-
+ if (skb_mac_header(skb) < skb->head ||
+ (skb_mac_header(skb) + ETH_HLEN) > skb->data)
+ return -EINVAL;
+
+- if (opt->flags & IPSET_DIM_ONE_SRC)
++ if (opt->flags & IPSET_DIM_TWO_SRC)
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+ else
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
+diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
+index d09eaf153544..8c9bd3ae9edf 100644
+--- a/net/rxrpc/af_rxrpc.c
++++ b/net/rxrpc/af_rxrpc.c
+@@ -193,7 +193,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
+
+ service_in_use:
+ write_unlock(&local->services_lock);
+- rxrpc_put_local(local);
++ rxrpc_unuse_local(local);
+ ret = -EADDRINUSE;
+ error_unlock:
+ release_sock(&rx->sk);
+@@ -901,7 +901,7 @@ static int rxrpc_release_sock(struct sock *sk)
+ rxrpc_queue_work(&rxnet->service_conn_reaper);
+ rxrpc_queue_work(&rxnet->client_conn_reaper);
+
+- rxrpc_put_local(rx->local);
++ rxrpc_unuse_local(rx->local);
+ rx->local = NULL;
+ key_put(rx->key);
+ rx->key = NULL;
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index 80335b4ee4fd..9796c45d2f6a 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -254,7 +254,8 @@ struct rxrpc_security {
+ */
+ struct rxrpc_local {
+ struct rcu_head rcu;
+- atomic_t usage;
++ atomic_t active_users; /* Number of users of the local endpoint */
++ atomic_t usage; /* Number of references to the structure */
+ struct rxrpc_net *rxnet; /* The network ns in which this resides */
+ struct list_head link;
+ struct socket *socket; /* my UDP socket */
+@@ -1002,6 +1003,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc
+ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
+ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
+ void rxrpc_put_local(struct rxrpc_local *);
++struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
++void rxrpc_unuse_local(struct rxrpc_local *);
+ void rxrpc_queue_local(struct rxrpc_local *);
+ void rxrpc_destroy_all_locals(struct rxrpc_net *);
+
+@@ -1061,6 +1064,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
+ struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
+ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
+ void rxrpc_put_peer(struct rxrpc_peer *);
++void rxrpc_put_peer_locked(struct rxrpc_peer *);
+
+ /*
+ * proc.c
+diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
+index 5bd6f1546e5c..ee95d1cd1cdf 100644
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -1108,8 +1108,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
+ {
+ _enter("%p,%p", local, skb);
+
+- skb_queue_tail(&local->event_queue, skb);
+- rxrpc_queue_local(local);
++ if (rxrpc_get_local_maybe(local)) {
++ skb_queue_tail(&local->event_queue, skb);
++ rxrpc_queue_local(local);
++ } else {
++ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
++ }
+ }
+
+ /*
+@@ -1119,8 +1123,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
+ {
+ CHECK_SLAB_OKAY(&local->usage);
+
+- skb_queue_tail(&local->reject_queue, skb);
+- rxrpc_queue_local(local);
++ if (rxrpc_get_local_maybe(local)) {
++ skb_queue_tail(&local->reject_queue, skb);
++ rxrpc_queue_local(local);
++ } else {
++ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
++ }
+ }
+
+ /*
+diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
+index b1c71bad510b..72a6e12a9304 100644
+--- a/net/rxrpc/local_object.c
++++ b/net/rxrpc/local_object.c
+@@ -79,6 +79,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
+ local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
+ if (local) {
+ atomic_set(&local->usage, 1);
++ atomic_set(&local->active_users, 1);
+ local->rxnet = rxnet;
+ INIT_LIST_HEAD(&local->link);
+ INIT_WORK(&local->processor, rxrpc_local_processor);
+@@ -92,7 +93,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
+ local->debug_id = atomic_inc_return(&rxrpc_debug_id);
+ memcpy(&local->srx, srx, sizeof(*srx));
+ local->srx.srx_service = 0;
+- trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
++ trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
+ }
+
+ _leave(" = %p", local);
+@@ -266,11 +267,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
+ * bind the transport socket may still fail if we're attempting
+ * to use a local address that the dying object is still using.
+ */
+- if (!rxrpc_get_local_maybe(local)) {
+- cursor = cursor->next;
+- list_del_init(&local->link);
++ if (!rxrpc_use_local(local))
+ break;
+- }
+
+ age = "old";
+ goto found;
+@@ -284,7 +282,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
+ if (ret < 0)
+ goto sock_error;
+
+- list_add_tail(&local->link, cursor);
++ if (cursor != &rxnet->local_endpoints)
++ list_replace_init(cursor, &local->link);
++ else
++ list_add_tail(&local->link, cursor);
+ age = "new";
+
+ found:
+@@ -320,7 +321,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
+ int n;
+
+ n = atomic_inc_return(&local->usage);
+- trace_rxrpc_local(local, rxrpc_local_got, n, here);
++ trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
+ return local;
+ }
+
+@@ -334,7 +335,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
+ if (local) {
+ int n = atomic_fetch_add_unless(&local->usage, 1, 0);
+ if (n > 0)
+- trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
++ trace_rxrpc_local(local->debug_id, rxrpc_local_got,
++ n + 1, here);
+ else
+ local = NULL;
+ }
+@@ -342,24 +344,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
+ }
+
+ /*
+- * Queue a local endpoint.
++ * Queue a local endpoint and pass the caller's reference to the work item.
+ */
+ void rxrpc_queue_local(struct rxrpc_local *local)
+ {
+ const void *here = __builtin_return_address(0);
++ unsigned int debug_id = local->debug_id;
++ int n = atomic_read(&local->usage);
+
+ if (rxrpc_queue_work(&local->processor))
+- trace_rxrpc_local(local, rxrpc_local_queued,
+- atomic_read(&local->usage), here);
+-}
+-
+-/*
+- * A local endpoint reached its end of life.
+- */
+-static void __rxrpc_put_local(struct rxrpc_local *local)
+-{
+- _enter("%d", local->debug_id);
+- rxrpc_queue_work(&local->processor);
++ trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
++ else
++ rxrpc_put_local(local);
+ }
+
+ /*
+@@ -372,10 +368,47 @@ void rxrpc_put_local(struct rxrpc_local *local)
+
+ if (local) {
+ n = atomic_dec_return(&local->usage);
+- trace_rxrpc_local(local, rxrpc_local_put, n, here);
++ trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
+
+ if (n == 0)
+- __rxrpc_put_local(local);
++ call_rcu(&local->rcu, rxrpc_local_rcu);
++ }
++}
++
++/*
++ * Start using a local endpoint.
++ */
++struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
++{
++ unsigned int au;
++
++ local = rxrpc_get_local_maybe(local);
++ if (!local)
++ return NULL;
++
++ au = atomic_fetch_add_unless(&local->active_users, 1, 0);
++ if (au == 0) {
++ rxrpc_put_local(local);
++ return NULL;
++ }
++
++ return local;
++}
++
++/*
++ * Cease using a local endpoint. Once the number of active users reaches 0, we
++ * start the closure of the transport in the work processor.
++ */
++void rxrpc_unuse_local(struct rxrpc_local *local)
++{
++ unsigned int au;
++
++ if (local) {
++ au = atomic_dec_return(&local->active_users);
++ if (au == 0)
++ rxrpc_queue_local(local);
++ else
++ rxrpc_put_local(local);
+ }
+ }
+
+@@ -393,16 +426,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
+
+ _enter("%d", local->debug_id);
+
+- /* We can get a race between an incoming call packet queueing the
+- * processor again and the work processor starting the destruction
+- * process which will shut down the UDP socket.
+- */
+- if (local->dead) {
+- _leave(" [already dead]");
+- return;
+- }
+- local->dead = true;
+-
+ mutex_lock(&rxnet->local_mutex);
+ list_del_init(&local->link);
+ mutex_unlock(&rxnet->local_mutex);
+@@ -422,13 +445,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
+ */
+ rxrpc_purge_queue(&local->reject_queue);
+ rxrpc_purge_queue(&local->event_queue);
+-
+- _debug("rcu local %d", local->debug_id);
+- call_rcu(&local->rcu, rxrpc_local_rcu);
+ }
+
+ /*
+- * Process events on an endpoint
++ * Process events on an endpoint. The work item carries a ref which
++ * we must release.
+ */
+ static void rxrpc_local_processor(struct work_struct *work)
+ {
+@@ -436,13 +457,15 @@ static void rxrpc_local_processor(struct work_struct *work)
+ container_of(work, struct rxrpc_local, processor);
+ bool again;
+
+- trace_rxrpc_local(local, rxrpc_local_processing,
++ trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
+ atomic_read(&local->usage), NULL);
+
+ do {
+ again = false;
+- if (atomic_read(&local->usage) == 0)
+- return rxrpc_local_destroyer(local);
++ if (atomic_read(&local->active_users) == 0) {
++ rxrpc_local_destroyer(local);
++ break;
++ }
+
+ if (!skb_queue_empty(&local->reject_queue)) {
+ rxrpc_reject_packets(local);
+@@ -454,6 +477,8 @@ static void rxrpc_local_processor(struct work_struct *work)
+ again = true;
+ }
+ } while (again);
++
++ rxrpc_put_local(local);
+ }
+
+ /*
+diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
+index 9f2f45c09e58..7666ec72d37e 100644
+--- a/net/rxrpc/peer_event.c
++++ b/net/rxrpc/peer_event.c
+@@ -378,7 +378,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
+ spin_lock_bh(&rxnet->peer_hash_lock);
+ list_add_tail(&peer->keepalive_link,
+ &rxnet->peer_keepalive[slot & mask]);
+- rxrpc_put_peer(peer);
++ rxrpc_put_peer_locked(peer);
+ }
+
+ spin_unlock_bh(&rxnet->peer_hash_lock);
+diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
+index 9d3ce81cf8ae..9c3ac96f71cb 100644
+--- a/net/rxrpc/peer_object.c
++++ b/net/rxrpc/peer_object.c
+@@ -436,6 +436,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
+ }
+ }
+
++/*
++ * Drop a ref on a peer record where the caller already holds the
++ * peer_hash_lock.
++ */
++void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
++{
++ const void *here = __builtin_return_address(0);
++ int n;
++
++ n = atomic_dec_return(&peer->usage);
++ trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
++ if (n == 0) {
++ hash_del_rcu(&peer->hash_link);
++ list_del_init(&peer->keepalive_link);
++ kfree_rcu(peer, rcu);
++ }
++}
++
+ /*
+ * Make sure all peer records have been discarded.
+ */
+diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
+index 5d3f33ce6d41..bae14438f869 100644
+--- a/net/rxrpc/sendmsg.c
++++ b/net/rxrpc/sendmsg.c
+@@ -226,6 +226,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
+ rxrpc_set_call_completion(call,
+ RXRPC_CALL_LOCAL_ERROR,
+ 0, ret);
++ rxrpc_notify_socket(call);
+ goto out;
+ }
+ _debug("need instant resend %d", ret);
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 53ad3dbb76fe..ed24a0b071c3 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1397,10 +1397,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
+ }
+ break;
+ case NETDEV_PRE_UP:
+- if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) &&
+- !(wdev->iftype == NL80211_IFTYPE_AP_VLAN &&
+- rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP &&
+- wdev->use_4addr))
++ if (!cfg80211_iftype_allowed(wdev->wiphy, wdev->iftype,
++ wdev->use_4addr, 0))
+ return notifier_from_errno(-EOPNOTSUPP);
+
+ if (rfkill_blocked(rdev->rfkill))
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 520d437aa8d1..88a1de9def11 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -3481,9 +3481,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
+ return err;
+ }
+
+- if (!(rdev->wiphy.interface_modes & (1 << type)) &&
+- !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr &&
+- rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP))
++ if (!cfg80211_iftype_allowed(&rdev->wiphy, type, params.use_4addr, 0))
+ return -EOPNOTSUPP;
+
+ err = nl80211_parse_mon_options(rdev, type, info, ¶ms);
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 1c39d6a2e850..d0e35b7b9e35 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -1697,7 +1697,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
+ for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
+ num_interfaces += params->iftype_num[iftype];
+ if (params->iftype_num[iftype] > 0 &&
+- !(wiphy->software_iftypes & BIT(iftype)))
++ !cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
+ used_iftypes |= BIT(iftype);
+ }
+
+@@ -1719,7 +1719,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
+ return -ENOMEM;
+
+ for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
+- if (wiphy->software_iftypes & BIT(iftype))
++ if (cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
+ continue;
+ for (j = 0; j < c->n_limits; j++) {
+ all_iftypes |= limits[j].types;
+@@ -2072,3 +2072,26 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
+ return max_vht_nss;
+ }
+ EXPORT_SYMBOL(ieee80211_get_vht_max_nss);
++
++bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
++ bool is_4addr, u8 check_swif)
++
++{
++ bool is_vlan = iftype == NL80211_IFTYPE_AP_VLAN;
++
++ switch (check_swif) {
++ case 0:
++ if (is_vlan && is_4addr)
++ return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
++ return wiphy->interface_modes & BIT(iftype);
++ case 1:
++ if (!(wiphy->software_iftypes & BIT(iftype)) && is_vlan)
++ return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
++ return wiphy->software_iftypes & BIT(iftype);
++ default:
++ break;
++ }
++
++ return false;
++}
++EXPORT_SYMBOL(cfg80211_iftype_allowed);
+diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
+index 9775bda2a4ca..d8aa6ab3f68b 100644
+--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
++++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
+@@ -367,9 +367,11 @@ static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_pcm_substream *substream)
+
+ static int acp3x_dma_new(struct snd_soc_pcm_runtime *rtd)
+ {
++ struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
++ DRV_NAME);
++ struct device *parent = component->dev->parent;
+ snd_pcm_lib_preallocate_pages_for_all(rtd->pcm, SNDRV_DMA_TYPE_DEV,
+- rtd->pcm->card->dev,
+- MIN_BUFFER, MAX_BUFFER);
++ parent, MIN_BUFFER, MAX_BUFFER);
+ return 0;
+ }
+
+diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
+index 70ed28d97d49..6398741ebd0e 100644
+--- a/sound/soc/generic/audio-graph-card.c
++++ b/sound/soc/generic/audio-graph-card.c
+@@ -63,6 +63,7 @@ static int graph_get_dai_id(struct device_node *ep)
+ struct device_node *endpoint;
+ struct of_endpoint info;
+ int i, id;
++ const u32 *reg;
+ int ret;
+
+ /* use driver specified DAI ID if exist */
+@@ -83,8 +84,9 @@ static int graph_get_dai_id(struct device_node *ep)
+ return info.id;
+
+ node = of_get_parent(ep);
++ reg = of_get_property(node, "reg", NULL);
+ of_node_put(node);
+- if (of_get_property(node, "reg", NULL))
++ if (reg)
+ return info.port;
+ }
+ node = of_graph_get_port_parent(ep);
+@@ -222,10 +224,6 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
+
+ dev_dbg(dev, "link_of DPCM (%pOF)\n", ep);
+
+- of_node_put(ports);
+- of_node_put(port);
+- of_node_put(node);
+-
+ if (li->cpu) {
+ int is_single_links = 0;
+
+@@ -243,17 +241,17 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
+
+ ret = asoc_simple_parse_cpu(ep, dai_link, &is_single_links);
+ if (ret)
+- return ret;
++ goto out_put_node;
+
+ ret = asoc_simple_parse_clk_cpu(dev, ep, dai_link, dai);
+ if (ret < 0)
+- return ret;
++ goto out_put_node;
+
+ ret = asoc_simple_set_dailink_name(dev, dai_link,
+ "fe.%s",
+ dai_link->cpu_dai_name);
+ if (ret < 0)
+- return ret;
++ goto out_put_node;
+
+ /* card->num_links includes Codec */
+ asoc_simple_canonicalize_cpu(dai_link, is_single_links);
+@@ -277,17 +275,17 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
+
+ ret = asoc_simple_parse_codec(ep, dai_link);
+ if (ret < 0)
+- return ret;
++ goto out_put_node;
+
+ ret = asoc_simple_parse_clk_codec(dev, ep, dai_link, dai);
+ if (ret < 0)
+- return ret;
++ goto out_put_node;
+
+ ret = asoc_simple_set_dailink_name(dev, dai_link,
+ "be.%s",
+ codecs->dai_name);
+ if (ret < 0)
+- return ret;
++ goto out_put_node;
+
+ /* check "prefix" from top node */
+ snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
+@@ -307,19 +305,23 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv,
+
+ ret = asoc_simple_parse_tdm(ep, dai);
+ if (ret)
+- return ret;
++ goto out_put_node;
+
+ ret = asoc_simple_parse_daifmt(dev, cpu_ep, codec_ep,
+ NULL, &dai_link->dai_fmt);
+ if (ret < 0)
+- return ret;
++ goto out_put_node;
+
+ dai_link->dpcm_playback = 1;
+ dai_link->dpcm_capture = 1;
+ dai_link->ops = &graph_ops;
+ dai_link->init = asoc_simple_dai_init;
+
+- return 0;
++out_put_node:
++ of_node_put(ports);
++ of_node_put(port);
++ of_node_put(node);
++ return ret;
+ }
+
+ static int graph_dai_link_of(struct asoc_simple_priv *priv,
+diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
+index 9b568f578bcd..2712a2b20102 100644
+--- a/sound/soc/generic/simple-card.c
++++ b/sound/soc/generic/simple-card.c
+@@ -138,8 +138,6 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
+
+ li->link++;
+
+- of_node_put(node);
+-
+ /* For single DAI link & old style of DT node */
+ if (is_top)
+ prefix = PREFIX;
+@@ -161,17 +159,17 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
+
+ ret = asoc_simple_parse_cpu(np, dai_link, &is_single_links);
+ if (ret)
+- return ret;
++ goto out_put_node;
+
+ ret = asoc_simple_parse_clk_cpu(dev, np, dai_link, dai);
+ if (ret < 0)
+- return ret;
++ goto out_put_node;
+
+ ret = asoc_simple_set_dailink_name(dev, dai_link,
+ "fe.%s",
+ dai_link->cpu_dai_name);
+ if (ret < 0)
+- return ret;
++ goto out_put_node;
+
+ asoc_simple_canonicalize_cpu(dai_link, is_single_links);
+ } else {
+@@ -194,17 +192,17 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
+
+ ret = asoc_simple_parse_codec(np, dai_link);
+ if (ret < 0)
+- return ret;
++ goto out_put_node;
+
+ ret = asoc_simple_parse_clk_codec(dev, np, dai_link, dai);
+ if (ret < 0)
+- return ret;
++ goto out_put_node;
+
+ ret = asoc_simple_set_dailink_name(dev, dai_link,
+ "be.%s",
+ codecs->dai_name);
+ if (ret < 0)
+- return ret;
++ goto out_put_node;
+
+ /* check "prefix" from top node */
+ snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
+@@ -222,19 +220,21 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv,
+
+ ret = asoc_simple_parse_tdm(np, dai);
+ if (ret)
+- return ret;
++ goto out_put_node;
+
+ ret = asoc_simple_parse_daifmt(dev, node, codec,
+ prefix, &dai_link->dai_fmt);
+ if (ret < 0)
+- return ret;
++ goto out_put_node;
+
+ dai_link->dpcm_playback = 1;
+ dai_link->dpcm_capture = 1;
+ dai_link->ops = &simple_ops;
+ dai_link->init = asoc_simple_dai_init;
+
+- return 0;
++out_put_node:
++ of_node_put(node);
++ return ret;
+ }
+
+ static int simple_dai_link_of(struct asoc_simple_priv *priv,
+@@ -378,8 +378,6 @@ static int simple_for_each_link(struct asoc_simple_priv *priv,
+ goto error;
+ }
+
+- of_node_put(codec);
+-
+ /* get convert-xxx property */
+ memset(&adata, 0, sizeof(adata));
+ for_each_child_of_node(node, np)
+@@ -401,11 +399,13 @@ static int simple_for_each_link(struct asoc_simple_priv *priv,
+ ret = func_noml(priv, np, codec, li, is_top);
+
+ if (ret < 0) {
++ of_node_put(codec);
+ of_node_put(np);
+ goto error;
+ }
+ }
+
++ of_node_put(codec);
+ node = of_get_next_child(top, node);
+ } while (!is_top && node);
+
+diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
+index 2fe1ce879123..c360ebc3ccc7 100644
+--- a/sound/soc/intel/boards/bytcht_es8316.c
++++ b/sound/soc/intel/boards/bytcht_es8316.c
+@@ -436,6 +436,14 @@ static const struct acpi_gpio_mapping byt_cht_es8316_gpios[] = {
+
+ /* Please keep this list alphabetically sorted */
+ static const struct dmi_system_id byt_cht_es8316_quirk_table[] = {
++ { /* Irbis NB41 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "IRBIS"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "NB41"),
++ },
++ .driver_data = (void *)(BYT_CHT_ES8316_INTMIC_IN2_MAP
++ | BYT_CHT_ES8316_JD_INVERTED),
++ },
+ { /* Teclast X98 Plus II */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
+index 0a34d0eb8dba..88ebaf6e1880 100644
+--- a/sound/soc/rockchip/rockchip_i2s.c
++++ b/sound/soc/rockchip/rockchip_i2s.c
+@@ -326,7 +326,6 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
+ val |= I2S_CHN_4;
+ break;
+ case 2:
+- case 1:
+ val |= I2S_CHN_2;
+ break;
+ default:
+@@ -459,7 +458,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
+ },
+ .capture = {
+ .stream_name = "Capture",
+- .channels_min = 1,
++ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = (SNDRV_PCM_FMTBIT_S8 |
+@@ -659,7 +658,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
+ }
+
+ if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) {
+- if (val >= 1 && val <= 8)
++ if (val >= 2 && val <= 8)
+ soc_dai->capture.channels_max = val;
+ }
+
+diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c
+index e688169ff12a..d606e48fe551 100644
+--- a/sound/soc/samsung/odroid.c
++++ b/sound/soc/samsung/odroid.c
+@@ -275,9 +275,8 @@ static int odroid_audio_probe(struct platform_device *pdev)
+ }
+
+ of_node_put(cpu);
+- of_node_put(codec);
+ if (ret < 0)
+- return ret;
++ goto err_put_node;
+
+ ret = snd_soc_of_get_dai_link_codecs(dev, codec, codec_link);
+ if (ret < 0)
+@@ -300,7 +299,6 @@ static int odroid_audio_probe(struct platform_device *pdev)
+ ret = PTR_ERR(priv->clk_i2s_bus);
+ goto err_put_sclk;
+ }
+- of_node_put(cpu_dai);
+
+ ret = devm_snd_soc_register_card(dev, card);
+ if (ret < 0) {
+@@ -308,6 +306,8 @@ static int odroid_audio_probe(struct platform_device *pdev)
+ goto err_put_clk_i2s;
+ }
+
++ of_node_put(cpu_dai);
++ of_node_put(codec);
+ return 0;
+
+ err_put_clk_i2s:
+@@ -317,6 +317,8 @@ err_put_sclk:
+ err_put_cpu_dai:
+ of_node_put(cpu_dai);
+ snd_soc_of_put_dai_link_codecs(codec_link);
++err_put_node:
++ of_node_put(codec);
+ return ret;
+ }
+
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 6aeba0d66ec5..dd0f43a1c5e1 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -1605,8 +1605,11 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
+ }
+ }
+
+- if (dai_link->dai_fmt)
+- snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
++ if (dai_link->dai_fmt) {
++ ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
++ if (ret)
++ return ret;
++ }
+
+ ret = soc_post_component_init(rtd, dai_link->name);
+ if (ret)
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index c91df5a9c840..f40adb604c25 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -1156,8 +1156,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
+ list_add_tail(&widget->work_list, list);
+
+ if (custom_stop_condition && custom_stop_condition(widget, dir)) {
+- widget->endpoints[dir] = 1;
+- return widget->endpoints[dir];
++ list = NULL;
++ custom_stop_condition = NULL;
+ }
+
+ if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
+@@ -1194,8 +1194,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
+ *
+ * Optionally, can be supplied with a function acting as a stopping condition.
+ * This function takes the dapm widget currently being examined and the walk
+- * direction as an arguments, it should return true if the walk should be
+- * stopped and false otherwise.
++ * direction as an arguments, it should return true if widgets from that point
++ * in the graph onwards should not be added to the widget list.
+ */
+ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
+ struct list_head *list,
+@@ -3705,6 +3705,8 @@ request_failed:
+ dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
+ w->name, ret);
+
++ kfree_const(w->sname);
++ kfree(w);
+ return ERR_PTR(ret);
+ }
+
+diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
+index 5e8e31743a28..56009d147208 100644
+--- a/sound/soc/ti/davinci-mcasp.c
++++ b/sound/soc/ti/davinci-mcasp.c
+@@ -194,7 +194,7 @@ static inline void mcasp_set_axr_pdir(struct davinci_mcasp *mcasp, bool enable)
+ {
+ u32 bit;
+
+- for_each_set_bit(bit, &mcasp->pdir, PIN_BIT_AFSR) {
++ for_each_set_bit(bit, &mcasp->pdir, PIN_BIT_AMUTE) {
+ if (enable)
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit));
+ else
+@@ -222,6 +222,7 @@ static void mcasp_start_rx(struct davinci_mcasp *mcasp)
+ if (mcasp_is_synchronous(mcasp)) {
+ mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST);
+ mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
++ mcasp_set_clk_pdir(mcasp, true);
+ }
+
+ /* Activate serializer(s) */
+@@ -1253,6 +1254,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
+ return ret;
+ }
+
++static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
++ struct snd_pcm_hw_rule *rule)
++{
++ struct davinci_mcasp_ruledata *rd = rule->private;
++ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
++ struct snd_mask nfmt;
++ int i, slot_width;
++
++ snd_mask_none(&nfmt);
++ slot_width = rd->mcasp->slot_width;
++
++ for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
++ if (snd_mask_test(fmt, i)) {
++ if (snd_pcm_format_width(i) <= slot_width) {
++ snd_mask_set(&nfmt, i);
++ }
++ }
++ }
++
++ return snd_mask_refine(fmt, &nfmt);
++}
++
+ static const unsigned int davinci_mcasp_dai_rates[] = {
+ 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
+ 88200, 96000, 176400, 192000,
+@@ -1360,7 +1383,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
+ struct davinci_mcasp_ruledata *ruledata =
+ &mcasp->ruledata[substream->stream];
+ u32 max_channels = 0;
+- int i, dir;
++ int i, dir, ret;
+ int tdm_slots = mcasp->tdm_slots;
+
+ /* Do not allow more then one stream per direction */
+@@ -1389,6 +1412,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
+ max_channels++;
+ }
+ ruledata->serializers = max_channels;
++ ruledata->mcasp = mcasp;
+ max_channels *= tdm_slots;
+ /*
+ * If the already active stream has less channels than the calculated
+@@ -1414,20 +1438,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
+ 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &mcasp->chconstr[substream->stream]);
+
+- if (mcasp->slot_width)
+- snd_pcm_hw_constraint_minmax(substream->runtime,
+- SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
+- 8, mcasp->slot_width);
++ if (mcasp->slot_width) {
++ /* Only allow formats require <= slot_width bits on the bus */
++ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
++ SNDRV_PCM_HW_PARAM_FORMAT,
++ davinci_mcasp_hw_rule_slot_width,
++ ruledata,
++ SNDRV_PCM_HW_PARAM_FORMAT, -1);
++ if (ret)
++ return ret;
++ }
+
+ /*
+ * If we rely on implicit BCLK divider setting we should
+ * set constraints based on what we can provide.
+ */
+ if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
+- int ret;
+-
+- ruledata->mcasp = mcasp;
+-
+ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ davinci_mcasp_hw_rule_rate,
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 3865a5d27251..77e14d995479 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -1044,8 +1044,13 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
+ if (!has_datasec && kind == BTF_KIND_VAR) {
+ /* replace VAR with INT */
+ t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
+- t->size = sizeof(int);
+- *(int *)(t+1) = BTF_INT_ENC(0, 0, 32);
++ /*
++ * using size = 1 is the safest choice, 4 will be too
++ * big and cause kernel BTF validation failure if
++ * original variable took less than 4 bytes
++ */
++ t->size = 1;
++ *(int *)(t+1) = BTF_INT_ENC(0, 0, 8);
+ } else if (!has_datasec && kind == BTF_KIND_DATASEC) {
+ /* replace DATASEC with STRUCT */
+ struct btf_var_secinfo *v = (void *)(t + 1);
+diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
+index ca272c5b67f4..fa948c5445ec 100644
+--- a/tools/lib/bpf/xsk.c
++++ b/tools/lib/bpf/xsk.c
+@@ -327,17 +327,16 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
+
+ static int xsk_get_max_queues(struct xsk_socket *xsk)
+ {
+- struct ethtool_channels channels;
+- struct ifreq ifr;
++ struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
++ struct ifreq ifr = {};
+ int fd, err, ret;
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd < 0)
+ return -errno;
+
+- channels.cmd = ETHTOOL_GCHANNELS;
+ ifr.ifr_data = (void *)&channels;
+- strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
++ memcpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
+ ifr.ifr_name[IFNAMSIZ - 1] = '\0';
+ err = ioctl(fd, SIOCETHTOOL, &ifr);
+ if (err && errno != EOPNOTSUPP) {
+@@ -345,7 +344,7 @@ static int xsk_get_max_queues(struct xsk_socket *xsk)
+ goto out;
+ }
+
+- if (channels.max_combined == 0 || errno == EOPNOTSUPP)
++ if (err || channels.max_combined == 0)
+ /* If the device says it has no channels, then all traffic
+ * is sent to a single stream, so max queues = 1.
+ */
+@@ -562,7 +561,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
+ err = -errno;
+ goto out_socket;
+ }
+- strncpy(xsk->ifname, ifname, IFNAMSIZ - 1);
++ memcpy(xsk->ifname, ifname, IFNAMSIZ - 1);
+ xsk->ifname[IFNAMSIZ - 1] = '\0';
+
+ err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
+diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
+index a7784554a80d..23c27ca48abf 100644
+--- a/tools/perf/bench/numa.c
++++ b/tools/perf/bench/numa.c
+@@ -379,8 +379,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
+
+ /* Allocate and initialize all memory on CPU#0: */
+ if (init_cpu0) {
+- orig_mask = bind_to_node(0);
+- bind_to_memnode(0);
++ int node = numa_node_of_cpu(0);
++
++ orig_mask = bind_to_node(node);
++ bind_to_memnode(node);
+ }
+
+ bytes = bytes0 + HPSIZE;
+diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
+index 9c228c55e1fb..22386ab35050 100644
+--- a/tools/perf/builtin-ftrace.c
++++ b/tools/perf/builtin-ftrace.c
+@@ -173,7 +173,7 @@ static int set_tracing_cpumask(struct cpu_map *cpumap)
+ int last_cpu;
+
+ last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
+- mask_size = (last_cpu + 3) / 4 + 1;
++ mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
+ mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
+
+ cpumask = malloc(mask_size);
+diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
+index 58f77fd0f59f..ed5423d8a95f 100644
+--- a/tools/perf/pmu-events/jevents.c
++++ b/tools/perf/pmu-events/jevents.c
+@@ -450,6 +450,7 @@ static struct fixed {
+ { "inst_retired.any_p", "event=0xc0" },
+ { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
+ { "cpu_clk_unhalted.thread", "event=0x3c" },
++ { "cpu_clk_unhalted.core", "event=0x3c" },
+ { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
+ { NULL, NULL},
+ };
+diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
+index 0b599229bc7e..0aba5b39c21e 100644
+--- a/tools/perf/util/cpumap.c
++++ b/tools/perf/util/cpumap.c
+@@ -701,7 +701,10 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
+ unsigned char *bitmap;
+ int last_cpu = cpu_map__cpu(map, map->nr - 1);
+
+- bitmap = zalloc((last_cpu + 7) / 8);
++ if (buf == NULL)
++ return 0;
++
++ bitmap = zalloc(last_cpu / 8 + 1);
+ if (bitmap == NULL) {
+ buf[0] = '\0';
+ return 0;
+diff --git a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
+index 5aeaa284fc47..a68062820410 100644
+--- a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
++++ b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
+@@ -41,8 +41,7 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
+ }
+
+ /* Rewrite destination. */
+- if ((ctx->user_ip6[0] & 0xFFFF) == bpf_htons(0xFACE) &&
+- ctx->user_ip6[0] >> 16 == bpf_htons(0xB00C)) {
++ if (ctx->user_ip6[0] == bpf_htonl(0xFACEB00C)) {
+ ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0);
+ ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1);
+ ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2);
+diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c
+index b0fda2877119..d438193804b2 100644
+--- a/tools/testing/selftests/bpf/verifier/ctx_skb.c
++++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c
+@@ -974,6 +974,17 @@
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
++{
++ "read gso_segs from CGROUP_SKB",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
++ offsetof(struct __sk_buff, gso_segs)),
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
++},
+ {
+ "write gso_segs from CGROUP_SKB",
+ .insns = {
+diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
+new file mode 100644
+index 000000000000..63ed533f73d6
+--- /dev/null
++++ b/tools/testing/selftests/kvm/config
+@@ -0,0 +1,3 @@
++CONFIG_KVM=y
++CONFIG_KVM_INTEL=y
++CONFIG_KVM_AMD=y
+diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh
+index cca2baa03fb8..a8d8e8b3dc81 100755
+--- a/tools/testing/selftests/net/forwarding/gre_multipath.sh
++++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh
+@@ -93,18 +93,10 @@ sw1_create()
+ ip route add vrf v$ol1 192.0.2.16/28 \
+ nexthop dev g1a \
+ nexthop dev g1b
+-
+- tc qdisc add dev $ul1 clsact
+- tc filter add dev $ul1 egress pref 111 prot ipv4 \
+- flower dst_ip 192.0.2.66 action pass
+- tc filter add dev $ul1 egress pref 222 prot ipv4 \
+- flower dst_ip 192.0.2.82 action pass
+ }
+
+ sw1_destroy()
+ {
+- tc qdisc del dev $ul1 clsact
+-
+ ip route del vrf v$ol1 192.0.2.16/28
+
+ ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
+@@ -139,10 +131,18 @@ sw2_create()
+ ip route add vrf v$ol2 192.0.2.0/28 \
+ nexthop dev g2a \
+ nexthop dev g2b
++
++ tc qdisc add dev $ul2 clsact
++ tc filter add dev $ul2 ingress pref 111 prot 802.1Q \
++ flower vlan_id 111 action pass
++ tc filter add dev $ul2 ingress pref 222 prot 802.1Q \
++ flower vlan_id 222 action pass
+ }
+
+ sw2_destroy()
+ {
++ tc qdisc del dev $ul2 clsact
++
+ ip route del vrf v$ol2 192.0.2.0/28
+
+ ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
+@@ -187,12 +187,16 @@ setup_prepare()
+ sw1_create
+ sw2_create
+ h2_create
++
++ forwarding_enable
+ }
+
+ cleanup()
+ {
+ pre_cleanup
+
++ forwarding_restore
++
+ h2_destroy
+ sw2_destroy
+ sw1_destroy
+@@ -211,15 +215,15 @@ multipath4_test()
+ nexthop dev g1a weight $weight1 \
+ nexthop dev g1b weight $weight2
+
+- local t0_111=$(tc_rule_stats_get $ul1 111 egress)
+- local t0_222=$(tc_rule_stats_get $ul1 222 egress)
++ local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
++ local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
+
+ ip vrf exec v$h1 \
+ $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
+ -d 1msec -t udp "sp=1024,dp=0-32768"
+
+- local t1_111=$(tc_rule_stats_get $ul1 111 egress)
+- local t1_222=$(tc_rule_stats_get $ul1 222 egress)
++ local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
++ local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
+
+ local d111=$((t1_111 - t0_111))
+ local d222=$((t1_222 - t0_222))
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-10-07 17:45 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-10-07 17:45 UTC (permalink / raw
To: gentoo-commits
commit: 0b3da0b3904e0dabb4d3ba04448ba3169b8599d8
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Oct 7 17:45:41 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Oct 7 17:45:41 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0b3da0b3
Linux patch 5.2.20
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1019_linux-5.2.20.patch | 5241 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 5245 insertions(+)
diff --git a/0000_README b/0000_README
index 71b680e..428535d 100644
--- a/0000_README
+++ b/0000_README
@@ -119,6 +119,10 @@ Patch: 1018_linux-5.2.19.patch
From: https://www.kernel.org
Desc: Linux 5.2.19
+Patch: 1019_linux-5.2.20.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.20
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1019_linux-5.2.20.patch b/1019_linux-5.2.20.patch
new file mode 100644
index 0000000..62e2397
--- /dev/null
+++ b/1019_linux-5.2.20.patch
@@ -0,0 +1,5241 @@
+diff --git a/Makefile b/Makefile
+index 5c981a5c882f..4bad77aa54c5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 19
++SUBLEVEL = 20
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 8869742a85df..6029d324911c 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -75,7 +75,7 @@ config ARM
+ select HAVE_EXIT_THREAD
+ select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
+ select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
+- select HAVE_FUNCTION_TRACER if !XIP_KERNEL
++ select HAVE_FUNCTION_TRACER if !XIP_KERNEL && (CC_IS_GCC || CLANG_VERSION >= 100000)
+ select HAVE_GCC_PLUGINS
+ select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
+ select HAVE_IDE if PCI || ISA || PCMCIA
+@@ -1545,8 +1545,9 @@ config ARM_PATCH_IDIV
+ code to do integer division.
+
+ config AEABI
+- bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K
+- default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K
++ bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && \
++ !CPU_V7M && !CPU_V6 && !CPU_V6K && !CC_IS_CLANG
++ default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K || CC_IS_CLANG
+ help
+ This option allows for the kernel to be compiled using the latest
+ ARM ABI (aka EABI). This is only useful if you are using a user
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index f863c6935d0e..c0b278358301 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -112,6 +112,10 @@ ifeq ($(CONFIG_ARM_UNWIND),y)
+ CFLAGS_ABI +=-funwind-tables
+ endif
+
++ifeq ($(CONFIG_CC_IS_CLANG),y)
++CFLAGS_ABI += -meabi gnu
++endif
++
+ # Accept old syntax despite ".syntax unified"
+ AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
+
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 0048eadd0681..e76155d5840b 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -211,7 +211,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
+ {
+ unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
+
+- if (fsr & FSR_WRITE)
++ if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
+ mask = VM_WRITE;
+ if (fsr & FSR_LNX_PF)
+ mask = VM_EXEC;
+@@ -282,7 +282,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+- if (fsr & FSR_WRITE)
++ if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
+ flags |= FAULT_FLAG_WRITE;
+
+ /*
+diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
+index c063708fa503..9ecc2097a87a 100644
+--- a/arch/arm/mm/fault.h
++++ b/arch/arm/mm/fault.h
+@@ -6,6 +6,7 @@
+ * Fault status register encodings. We steal bit 31 for our own purposes.
+ */
+ #define FSR_LNX_PF (1 << 31)
++#define FSR_CM (1 << 13)
+ #define FSR_WRITE (1 << 11)
+ #define FSR_FS4 (1 << 10)
+ #define FSR_FS3_0 (15)
+diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
+index f866870db749..0b94b674aa91 100644
+--- a/arch/arm/mm/mmap.c
++++ b/arch/arm/mm/mmap.c
+@@ -18,8 +18,9 @@
+ (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
+
+ /* gap between mmap and stack */
+-#define MIN_GAP (128*1024*1024UL)
+-#define MAX_GAP ((TASK_SIZE)/6*5)
++#define MIN_GAP (128*1024*1024UL)
++#define MAX_GAP ((STACK_TOP)/6*5)
++#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
+
+ static int mmap_is_legacy(struct rlimit *rlim_stack)
+ {
+@@ -35,13 +36,22 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
+ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+ {
+ unsigned long gap = rlim_stack->rlim_cur;
++ unsigned long pad = stack_guard_gap;
++
++ /* Account for stack randomization if necessary */
++ if (current->flags & PF_RANDOMIZE)
++ pad += (STACK_RND_MASK << PAGE_SHIFT);
++
++ /* Values close to RLIM_INFINITY can overflow. */
++ if (gap + pad > gap)
++ gap += pad;
+
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+- return PAGE_ALIGN(TASK_SIZE - gap - rnd);
++ return PAGE_ALIGN(STACK_TOP - gap - rnd);
+ }
+
+ /*
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index 1aa2586fa597..13233c7917fe 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -1177,6 +1177,22 @@ void __init adjust_lowmem_bounds(void)
+ */
+ vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+
++ /*
++ * The first usable region must be PMD aligned. Mark its start
++ * as MEMBLOCK_NOMAP if it isn't
++ */
++ for_each_memblock(memory, reg) {
++ if (!memblock_is_nomap(reg)) {
++ if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
++ phys_addr_t len;
++
++ len = round_up(reg->base, PMD_SIZE) - reg->base;
++ memblock_mark_nomap(reg->base, len);
++ }
++ break;
++ }
++ }
++
+ for_each_memblock(memory, reg) {
+ phys_addr_t block_start = reg->base;
+ phys_addr_t block_end = reg->base + reg->size;
+diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
+index 7a299a20f6dc..7a8b8bc69e8d 100644
+--- a/arch/arm64/include/asm/cmpxchg.h
++++ b/arch/arm64/include/asm/cmpxchg.h
+@@ -63,7 +63,7 @@ __XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory")
+ #undef __XCHG_CASE
+
+ #define __XCHG_GEN(sfx) \
+-static inline unsigned long __xchg##sfx(unsigned long x, \
++static __always_inline unsigned long __xchg##sfx(unsigned long x, \
+ volatile void *ptr, \
+ int size) \
+ { \
+@@ -105,7 +105,7 @@ __XCHG_GEN(_mb)
+ #define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
+
+ #define __CMPXCHG_GEN(sfx) \
+-static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
++static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
+ unsigned long old, \
+ unsigned long new, \
+ int size) \
+@@ -212,7 +212,7 @@ __CMPWAIT_CASE( , , 64);
+ #undef __CMPWAIT_CASE
+
+ #define __CMPWAIT_GEN(sfx) \
+-static inline void __cmpwait##sfx(volatile void *ptr, \
++static __always_inline void __cmpwait##sfx(volatile void *ptr, \
+ unsigned long val, \
+ int size) \
+ { \
+diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
+index b050641b5139..8dac7110f0cb 100644
+--- a/arch/arm64/mm/mmap.c
++++ b/arch/arm64/mm/mmap.c
+@@ -54,7 +54,11 @@ unsigned long arch_mmap_rnd(void)
+ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+ {
+ unsigned long gap = rlim_stack->rlim_cur;
+- unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
++ unsigned long pad = stack_guard_gap;
++
++ /* Account for stack randomization if necessary */
++ if (current->flags & PF_RANDOMIZE)
++ pad += (STACK_RND_MASK << PAGE_SHIFT);
+
+ /* Values close to RLIM_INFINITY can overflow. */
+ if (gap + pad > gap)
+diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
+index 94096299fc56..50cc2f0962e5 100644
+--- a/arch/mips/include/asm/atomic.h
++++ b/arch/mips/include/asm/atomic.h
+@@ -68,7 +68,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
+ "\t" __scbeqz " %0, 1b \n" \
+ " .set pop \n" \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
++ : "Ir" (i) : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long flags; \
+ \
+@@ -98,7 +98,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
+ " .set pop \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
++ : "Ir" (i) : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long flags; \
+ \
+@@ -132,7 +132,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
+ " move %0, %1 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
++ : "Ir" (i) : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long flags; \
+ \
+@@ -193,6 +193,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+ if (kernel_uses_llsc) {
+ int temp;
+
++ loongson_llsc_mb();
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set "MIPS_ISA_LEVEL" \n"
+@@ -200,16 +201,16 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+ " .set pop \n"
+ " subu %0, %1, %3 \n"
+ " move %1, %0 \n"
+- " bltz %0, 1f \n"
++ " bltz %0, 2f \n"
+ " .set push \n"
+ " .set "MIPS_ISA_LEVEL" \n"
+ " sc %1, %2 \n"
+ "\t" __scbeqz " %1, 1b \n"
+- "1: \n"
++ "2: \n"
+ " .set pop \n"
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF_SMALL_ASM() (v->counter)
+- : "Ir" (i));
++ : "Ir" (i) : __LLSC_CLOBBER);
+ } else {
+ unsigned long flags;
+
+@@ -269,7 +270,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
+ "\t" __scbeqz " %0, 1b \n" \
+ " .set pop \n" \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
++ : "Ir" (i) : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long flags; \
+ \
+@@ -299,7 +300,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
+ " .set pop \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
++ : "Ir" (i) : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long flags; \
+ \
+@@ -333,7 +334,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
+ " .set pop \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
++ : "Ir" (i) : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long flags; \
+ \
+diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
+index b865e317a14f..9228f7386220 100644
+--- a/arch/mips/include/asm/barrier.h
++++ b/arch/mips/include/asm/barrier.h
+@@ -211,14 +211,22 @@
+ #define __smp_wmb() barrier()
+ #endif
+
++/*
++ * When LL/SC does imply order, it must also be a compiler barrier to avoid the
++ * compiler from reordering where the CPU will not. When it does not imply
++ * order, the compiler is also free to reorder across the LL/SC loop and
++ * ordering will be done by smp_llsc_mb() and friends.
++ */
+ #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
+ #define __WEAK_LLSC_MB " sync \n"
++#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
++#define __LLSC_CLOBBER
+ #else
+ #define __WEAK_LLSC_MB " \n"
++#define smp_llsc_mb() do { } while (0)
++#define __LLSC_CLOBBER "memory"
+ #endif
+
+-#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+-
+ #ifdef CONFIG_CPU_CAVIUM_OCTEON
+ #define smp_mb__before_llsc() smp_wmb()
+ #define __smp_mb__before_llsc() __smp_wmb()
+@@ -238,36 +246,40 @@
+
+ /*
+ * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
+- * store or pref) in between an ll & sc can cause the sc instruction to
++ * store or prefetch) in between an LL & SC can cause the SC instruction to
+ * erroneously succeed, breaking atomicity. Whilst it's unusual to write code
+ * containing such sequences, this bug bites harder than we might otherwise
+ * expect due to reordering & speculation:
+ *
+- * 1) A memory access appearing prior to the ll in program order may actually
+- * be executed after the ll - this is the reordering case.
++ * 1) A memory access appearing prior to the LL in program order may actually
++ * be executed after the LL - this is the reordering case.
+ *
+- * In order to avoid this we need to place a memory barrier (ie. a sync
+- * instruction) prior to every ll instruction, in between it & any earlier
+- * memory access instructions. Many of these cases are already covered by
+- * smp_mb__before_llsc() but for the remaining cases, typically ones in
+- * which multiple CPUs may operate on a memory location but ordering is not
+- * usually guaranteed, we use loongson_llsc_mb() below.
++ * In order to avoid this we need to place a memory barrier (ie. a SYNC
++ * instruction) prior to every LL instruction, in between it and any earlier
++ * memory access instructions.
+ *
+ * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
+ *
+- * 2) If a conditional branch exists between an ll & sc with a target outside
+- * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg()
++ * 2) If a conditional branch exists between an LL & SC with a target outside
++ * of the LL-SC loop, for example an exit upon value mismatch in cmpxchg()
+ * or similar, then misprediction of the branch may allow speculative
+- * execution of memory accesses from outside of the ll-sc loop.
++ * execution of memory accesses from outside of the LL-SC loop.
+ *
+- * In order to avoid this we need a memory barrier (ie. a sync instruction)
++ * In order to avoid this we need a memory barrier (ie. a SYNC instruction)
+ * at each affected branch target, for which we also use loongson_llsc_mb()
+ * defined below.
+ *
+ * This case affects all current Loongson 3 CPUs.
++ *
++ * The above described cases cause an error in the cache coherence protocol;
++ * such that the Invalidate of a competing LL-SC goes 'missing' and SC
++ * erroneously observes its core still has Exclusive state and lets the SC
++ * proceed.
++ *
++ * Therefore the error only occurs on SMP systems.
+ */
+ #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */
+-#define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
++#define loongson_llsc_mb() __asm__ __volatile__("sync" : : :"memory")
+ #else
+ #define loongson_llsc_mb() do { } while (0)
+ #endif
+diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
+index 9a466dde9b96..985d6a02f9ea 100644
+--- a/arch/mips/include/asm/bitops.h
++++ b/arch/mips/include/asm/bitops.h
+@@ -66,7 +66,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
+ " beqzl %0, 1b \n"
+ " .set pop \n"
+ : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
++ : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)
++ : __LLSC_CLOBBER);
+ #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
+ loongson_llsc_mb();
+@@ -76,7 +77,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
+ " " __INS "%0, %3, %2, 1 \n"
+ " " __SC "%0, %1 \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (bit), "r" (~0));
++ : "ir" (bit), "r" (~0)
++ : __LLSC_CLOBBER);
+ } while (unlikely(!temp));
+ #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
+ } else if (kernel_uses_llsc) {
+@@ -90,7 +92,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
+ " " __SC "%0, %1 \n"
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (1UL << bit));
++ : "ir" (1UL << bit)
++ : __LLSC_CLOBBER);
+ } while (unlikely(!temp));
+ } else
+ __mips_set_bit(nr, addr);
+@@ -122,7 +125,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
+ " beqzl %0, 1b \n"
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (~(1UL << bit)));
++ : "ir" (~(1UL << bit))
++ : __LLSC_CLOBBER);
+ #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
+ loongson_llsc_mb();
+@@ -132,7 +136,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
+ " " __INS "%0, $0, %2, 1 \n"
+ " " __SC "%0, %1 \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (bit));
++ : "ir" (bit)
++ : __LLSC_CLOBBER);
+ } while (unlikely(!temp));
+ #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
+ } else if (kernel_uses_llsc) {
+@@ -146,7 +151,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
+ " " __SC "%0, %1 \n"
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (~(1UL << bit)));
++ : "ir" (~(1UL << bit))
++ : __LLSC_CLOBBER);
+ } while (unlikely(!temp));
+ } else
+ __mips_clear_bit(nr, addr);
+@@ -192,7 +198,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
+ " beqzl %0, 1b \n"
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (1UL << bit));
++ : "ir" (1UL << bit)
++ : __LLSC_CLOBBER);
+ } else if (kernel_uses_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+@@ -207,7 +214,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
+ " " __SC "%0, %1 \n"
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
+- : "ir" (1UL << bit));
++ : "ir" (1UL << bit)
++ : __LLSC_CLOBBER);
+ } while (unlikely(!temp));
+ } else
+ __mips_change_bit(nr, addr);
+@@ -244,11 +252,12 @@ static inline int test_and_set_bit(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } else if (kernel_uses_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+
++ loongson_llsc_mb();
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+@@ -259,7 +268,7 @@ static inline int test_and_set_bit(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
+@@ -300,11 +309,12 @@ static inline int test_and_set_bit_lock(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } else if (kernel_uses_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+
++ loongson_llsc_mb();
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+@@ -315,7 +325,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
+@@ -358,12 +368,13 @@ static inline int test_and_clear_bit(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+
++ loongson_llsc_mb();
+ do {
+ __asm__ __volatile__(
+ " " __LL "%0, %1 # test_and_clear_bit \n"
+@@ -372,13 +383,14 @@ static inline int test_and_clear_bit(unsigned long nr,
+ " " __SC "%0, %1 \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "ir" (bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } while (unlikely(!temp));
+ #endif
+ } else if (kernel_uses_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+
++ loongson_llsc_mb();
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+@@ -390,7 +402,7 @@ static inline int test_and_clear_bit(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
+@@ -433,11 +445,12 @@ static inline int test_and_change_bit(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } else if (kernel_uses_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+
++ loongson_llsc_mb();
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+@@ -448,7 +461,7 @@ static inline int test_and_change_bit(unsigned long nr,
+ " .set pop \n"
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
+ : "r" (1UL << bit)
+- : "memory");
++ : __LLSC_CLOBBER);
+ } while (unlikely(!res));
+
+ res = temp & (1UL << bit);
+diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
+index f345a873742d..c8a47d18f628 100644
+--- a/arch/mips/include/asm/cmpxchg.h
++++ b/arch/mips/include/asm/cmpxchg.h
+@@ -46,6 +46,7 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
+ __typeof(*(m)) __ret; \
+ \
+ if (kernel_uses_llsc) { \
++ loongson_llsc_mb(); \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+@@ -60,7 +61,7 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
+ " .set pop \n" \
+ : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
+ : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \
+- : "memory"); \
++ : __LLSC_CLOBBER); \
+ } else { \
+ unsigned long __flags; \
+ \
+@@ -117,6 +118,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
+ __typeof(*(m)) __ret; \
+ \
+ if (kernel_uses_llsc) { \
++ loongson_llsc_mb(); \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+@@ -132,8 +134,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
+ " .set pop \n" \
+ "2: \n" \
+ : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
+- : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
+- : "memory"); \
++ : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
++ : __LLSC_CLOBBER); \
++ loongson_llsc_mb(); \
+ } else { \
+ unsigned long __flags; \
+ \
+@@ -229,6 +232,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
+ */
+ local_irq_save(flags);
+
++ loongson_llsc_mb();
+ asm volatile(
+ " .set push \n"
+ " .set " MIPS_ISA_ARCH_LEVEL " \n"
+@@ -274,6 +278,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
+ "r" (old),
+ "r" (new)
+ : "memory");
++ loongson_llsc_mb();
+
+ local_irq_restore(flags);
+ return ret;
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index 1e6966e8527e..bdbdc19a2b8f 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -689,6 +689,9 @@
+ #define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
+ #define MIPS_CONF7_AR (_ULCAST_(1) << 16)
+
++/* Ingenic Config7 bits */
++#define MIPS_CONF7_BTB_LOOP_EN (_ULCAST_(1) << 4)
++
+ /* Config7 Bits specific to MIPS Technologies. */
+
+ /* Performance counters implemented Per TC */
+@@ -2813,6 +2816,7 @@ __BUILD_SET_C0(status)
+ __BUILD_SET_C0(cause)
+ __BUILD_SET_C0(config)
+ __BUILD_SET_C0(config5)
++__BUILD_SET_C0(config7)
+ __BUILD_SET_C0(intcontrol)
+ __BUILD_SET_C0(intctl)
+ __BUILD_SET_C0(srsmap)
+diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
+index 180ad081afcf..c2d88c1dcc0f 100644
+--- a/arch/mips/kernel/branch.c
++++ b/arch/mips/kernel/branch.c
+@@ -58,6 +58,7 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc)
+ {
+ union mips_instruction insn = (union mips_instruction)dec_insn.insn;
++ int __maybe_unused bc_false = 0;
+
+ if (!cpu_has_mmips)
+ return 0;
+@@ -139,7 +140,6 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ #ifdef CONFIG_MIPS_FP_SUPPORT
+ case mm_bc2f_op:
+ case mm_bc1f_op: {
+- int bc_false = 0;
+ unsigned int fcr31;
+ unsigned int bit;
+
+diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
+index 9635c1db3ae6..e654ffc1c8a0 100644
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1964,6 +1964,13 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
+ c->cputype = CPU_JZRISC;
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ __cpu_name[cpu] = "Ingenic JZRISC";
++ /*
++ * The XBurst core by default attempts to avoid branch target
++ * buffer lookups by detecting & special casing loops. This
++ * feature will cause BogoMIPS and lpj calculate in error.
++ * Set cp0 config7 bit 4 to disable this feature.
++ */
++ set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
+ break;
+ default:
+ panic("Unknown Ingenic Processor ID!");
+diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
+index b6dc78ad5d8c..b0e25e913bdb 100644
+--- a/arch/mips/kernel/syscall.c
++++ b/arch/mips/kernel/syscall.c
+@@ -132,6 +132,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
+ [efault] "i" (-EFAULT)
+ : "memory");
+ } else if (cpu_has_llsc) {
++ loongson_llsc_mb();
+ __asm__ __volatile__ (
+ " .set push \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
+diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
+index d79f2b432318..f5c778113384 100644
+--- a/arch/mips/mm/mmap.c
++++ b/arch/mips/mm/mmap.c
+@@ -21,8 +21,9 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
+ EXPORT_SYMBOL(shm_align_mask);
+
+ /* gap between mmap and stack */
+-#define MIN_GAP (128*1024*1024UL)
+-#define MAX_GAP ((TASK_SIZE)/6*5)
++#define MIN_GAP (128*1024*1024UL)
++#define MAX_GAP ((TASK_SIZE)/6*5)
++#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
+
+ static int mmap_is_legacy(struct rlimit *rlim_stack)
+ {
+@@ -38,6 +39,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
+ static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
+ {
+ unsigned long gap = rlim_stack->rlim_cur;
++ unsigned long pad = stack_guard_gap;
++
++ /* Account for stack randomization if necessary */
++ if (current->flags & PF_RANDOMIZE)
++ pad += (STACK_RND_MASK << PAGE_SHIFT);
++
++ /* Values close to RLIM_INFINITY can overflow. */
++ if (gap + pad > gap)
++ gap += pad;
+
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index 144ceb0fba88..bece1264d1c5 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -631,7 +631,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
+ return;
+ }
+
+- if (cpu_has_rixi && _PAGE_NO_EXEC) {
++ if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
+ if (fill_includes_sw_bits) {
+ UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
+ } else {
+diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
+index 3a6aa57b9d90..eea28ca679db 100644
+--- a/arch/powerpc/include/asm/futex.h
++++ b/arch/powerpc/include/asm/futex.h
+@@ -60,8 +60,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+
+ pagefault_enable();
+
+- if (!ret)
+- *oval = oldval;
++ *oval = oldval;
+
+ prevent_write_to_user(uaddr, sizeof(*uaddr));
+ return ret;
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index 89623962c727..fe0c32fb9f96 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -744,6 +744,33 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
+ */
+ #define MAX_WAIT_FOR_RECOVERY 300
+
++
++/* Walks the PE tree after processing an event to remove any stale PEs.
++ *
++ * NB: This needs to be recursive to ensure the leaf PEs get removed
++ * before their parents do. Although this is possible to do recursively
++ * we don't since this is easier to read and we need to garantee
++ * the leaf nodes will be handled first.
++ */
++static void eeh_pe_cleanup(struct eeh_pe *pe)
++{
++ struct eeh_pe *child_pe, *tmp;
++
++ list_for_each_entry_safe(child_pe, tmp, &pe->child_list, child)
++ eeh_pe_cleanup(child_pe);
++
++ if (pe->state & EEH_PE_KEEP)
++ return;
++
++ if (!(pe->state & EEH_PE_INVALID))
++ return;
++
++ if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) {
++ list_del(&pe->child);
++ kfree(pe);
++ }
++}
++
+ /**
+ * eeh_handle_normal_event - Handle EEH events on a specific PE
+ * @pe: EEH PE - which should not be used after we return, as it may
+@@ -782,8 +809,6 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
+ return;
+ }
+
+- eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
+-
+ eeh_pe_update_time_stamp(pe);
+ pe->freeze_count++;
+ if (pe->freeze_count > eeh_max_freezes) {
+@@ -793,6 +818,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
+ result = PCI_ERS_RESULT_DISCONNECT;
+ }
+
++ eeh_for_each_pe(pe, tmp_pe)
++ eeh_pe_for_each_dev(tmp_pe, edev, tmp)
++ edev->mode &= ~EEH_DEV_NO_HANDLER;
++
+ /* Walk the various device drivers attached to this slot through
+ * a reset sequence, giving each an opportunity to do what it needs
+ * to accomplish the reset. Each child gets a report of the
+@@ -969,6 +998,12 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
+ return;
+ }
+ }
++
++ /*
++ * Clean up any PEs without devices. While marked as EEH_PE_RECOVERYING
++ * we don't want to modify the PE tree structure so we do it here.
++ */
++ eeh_pe_cleanup(pe);
+ eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
+ }
+
+@@ -981,7 +1016,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
+ */
+ void eeh_handle_special_event(void)
+ {
+- struct eeh_pe *pe, *phb_pe;
++ struct eeh_pe *pe, *phb_pe, *tmp_pe;
++ struct eeh_dev *edev, *tmp_edev;
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ unsigned long flags;
+@@ -1040,6 +1076,7 @@ void eeh_handle_special_event(void)
+ */
+ if (rc == EEH_NEXT_ERR_FROZEN_PE ||
+ rc == EEH_NEXT_ERR_FENCED_PHB) {
++ eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
+ eeh_handle_normal_event(pe);
+ } else {
+ pci_lock_rescan_remove();
+@@ -1050,6 +1087,10 @@ void eeh_handle_special_event(void)
+ (phb_pe->state & EEH_PE_RECOVERING))
+ continue;
+
++ eeh_for_each_pe(pe, tmp_pe)
++ eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
++ edev->mode &= ~EEH_DEV_NO_HANDLER;
++
+ /* Notify all devices to be down */
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
+ eeh_set_channel_state(pe, pci_channel_io_perm_failure);
+diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c
+index 64cfbe41174b..e36653e5f76b 100644
+--- a/arch/powerpc/kernel/eeh_event.c
++++ b/arch/powerpc/kernel/eeh_event.c
+@@ -121,6 +121,14 @@ int __eeh_send_failure_event(struct eeh_pe *pe)
+ }
+ event->pe = pe;
+
++ /*
++ * Mark the PE as recovering before inserting it in the queue.
++ * This prevents the PE from being free()ed by a hotplug driver
++ * while the PE is sitting in the event queue.
++ */
++ if (pe)
++ eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
++
+ /* We may or may not be called in an interrupt context */
+ spin_lock_irqsave(&eeh_eventlist_lock, flags);
+ list_add(&event->list, &eeh_eventlist);
+diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
+index 854cef7b18f4..f0813d50e0b1 100644
+--- a/arch/powerpc/kernel/eeh_pe.c
++++ b/arch/powerpc/kernel/eeh_pe.c
+@@ -491,6 +491,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
+ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
+ {
+ struct eeh_pe *pe, *parent, *child;
++ bool keep, recover;
+ int cnt;
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
+
+@@ -516,10 +517,21 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
+ */
+ while (1) {
+ parent = pe->parent;
++
++ /* PHB PEs should never be removed */
+ if (pe->type & EEH_PE_PHB)
+ break;
+
+- if (!(pe->state & EEH_PE_KEEP)) {
++ /*
++ * XXX: KEEP is set while resetting a PE. I don't think it's
++ * ever set without RECOVERING also being set. I could
++ * be wrong though so catch that with a WARN.
++ */
++ keep = !!(pe->state & EEH_PE_KEEP);
++ recover = !!(pe->state & EEH_PE_RECOVERING);
++ WARN_ON(keep && !recover);
++
++ if (!keep && !recover) {
+ if (list_empty(&pe->edevs) &&
+ list_empty(&pe->child_list)) {
+ list_del(&pe->child);
+@@ -528,6 +540,15 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
+ break;
+ }
+ } else {
++ /*
++ * Mark the PE as invalid. At the end of the recovery
++ * process any invalid PEs will be garbage collected.
++ *
++ * We need to delay the free()ing of them since we can
++ * remove edev's while traversing the PE tree which
++ * might trigger the removal of a PE and we can't
++ * deal with that (yet).
++ */
+ if (list_empty(&pe->edevs)) {
+ cnt = 0;
+ list_for_each_entry(child, &pe->child_list, child) {
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 6c51aa845bce..3e564536a237 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -556,6 +556,10 @@ FTR_SECTION_ELSE
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
+ 9:
+ /* Deliver the machine check to host kernel in V mode. */
++BEGIN_FTR_SECTION
++ ld r10,ORIG_GPR3(r1)
++ mtspr SPRN_CFAR,r10
++END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ MACHINE_CHECK_HANDLER_WINDUP
+ SET_SCRATCH0(r13) /* save r13 */
+ EXCEPTION_PROLOG_0(PACA_EXMC)
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index fff2eb22427d..65cd96c3b1d6 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -871,15 +871,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
+ return 0;
+
+ for_each_cpu(cpu, cpus) {
++ struct device *dev = get_cpu_device(cpu);
++
+ switch (state) {
+ case DOWN:
+- cpuret = cpu_down(cpu);
++ cpuret = device_offline(dev);
+ break;
+ case UP:
+- cpuret = cpu_up(cpu);
++ cpuret = device_online(dev);
+ break;
+ }
+- if (cpuret) {
++ if (cpuret < 0) {
+ pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
+ __func__,
+ ((state == UP) ? "up" : "down"),
+@@ -968,6 +970,8 @@ int rtas_ibm_suspend_me(u64 handle)
+ data.token = rtas_token("ibm,suspend-me");
+ data.complete = &done;
+
++ lock_device_hotplug();
++
+ /* All present CPUs must be online */
+ cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
+ cpuret = rtas_online_cpus_mask(offline_mask);
+@@ -1007,6 +1011,7 @@ out_hotplug_enable:
+ __func__);
+
+ out:
++ unlock_device_hotplug();
+ free_cpumask_var(offline_mask);
+ return atomic_read(&data.error);
+ }
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 47df30982de1..c8ea3a253b81 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -472,6 +472,7 @@ void system_reset_exception(struct pt_regs *regs)
+ if (debugger(regs))
+ goto out;
+
++ kmsg_dump(KMSG_DUMP_OOPS);
+ /*
+ * A system reset is a request to dump, so we always send
+ * it through the crashdump code (if fadump or kdump are
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index 8deb432c2975..2b6cc823046a 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -901,7 +901,7 @@ int __meminit radix__create_section_mapping(unsigned long start, unsigned long e
+ return -1;
+ }
+
+- return create_physical_mapping(start, end, nid);
++ return create_physical_mapping(__pa(start), __pa(end), nid);
+ }
+
+ int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
+index 3bdfc1e32096..2231959c5633 100644
+--- a/arch/powerpc/perf/imc-pmu.c
++++ b/arch/powerpc/perf/imc-pmu.c
+@@ -570,6 +570,7 @@ static int core_imc_mem_init(int cpu, int size)
+ {
+ int nid, rc = 0, core_id = (cpu / threads_per_core);
+ struct imc_mem_info *mem_info;
++ struct page *page;
+
+ /*
+ * alloc_pages_node() will allocate memory for core in the
+@@ -580,11 +581,12 @@ static int core_imc_mem_init(int cpu, int size)
+ mem_info->id = core_id;
+
+ /* We need only vbase for core counters */
+- mem_info->vbase = page_address(alloc_pages_node(nid,
+- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+- __GFP_NOWARN, get_order(size)));
+- if (!mem_info->vbase)
++ page = alloc_pages_node(nid,
++ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
++ __GFP_NOWARN, get_order(size));
++ if (!page)
+ return -ENOMEM;
++ mem_info->vbase = page_address(page);
+
+ /* Init the mutex */
+ core_imc_refc[core_id].id = core_id;
+@@ -839,15 +841,17 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
+ int nid = cpu_to_node(cpu_id);
+
+ if (!local_mem) {
++ struct page *page;
+ /*
+ * This case could happen only once at start, since we dont
+ * free the memory in cpu offline path.
+ */
+- local_mem = page_address(alloc_pages_node(nid,
++ page = alloc_pages_node(nid,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+- __GFP_NOWARN, get_order(size)));
+- if (!local_mem)
++ __GFP_NOWARN, get_order(size));
++ if (!page)
+ return -ENOMEM;
++ local_mem = page_address(page);
+
+ per_cpu(thread_imc_mem, cpu_id) = local_mem;
+ }
+@@ -1085,11 +1089,14 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
+ int core_id = (cpu_id / threads_per_core);
+
+ if (!local_mem) {
+- local_mem = page_address(alloc_pages_node(phys_id,
+- GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+- __GFP_NOWARN, get_order(size)));
+- if (!local_mem)
++ struct page *page;
++
++ page = alloc_pages_node(phys_id,
++ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
++ __GFP_NOWARN, get_order(size));
++ if (!page)
+ return -ENOMEM;
++ local_mem = page_address(page);
+ per_cpu(trace_imc_mem, cpu_id) = local_mem;
+
+ /* Initialise the counters for trace mode */
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+index e28f03e1eb5e..c75ec37bf0cd 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+@@ -36,7 +36,8 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
+ struct page *tce_mem = NULL;
+ __be64 *addr;
+
+- tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT);
++ tce_mem = alloc_pages_node(nid, GFP_ATOMIC | __GFP_NOWARN,
++ shift - PAGE_SHIFT);
+ if (!tce_mem) {
+ pr_err("Failed to allocate a TCE memory, level shift=%d\n",
+ shift);
+@@ -161,6 +162,9 @@ void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
+
+ if (ptce)
+ *ptce = cpu_to_be64(0);
++ else
++ /* Skip the rest of the level */
++ i |= tbl->it_level_size - 1;
+ }
+ }
+
+@@ -260,7 +264,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+ unsigned int table_shift = max_t(unsigned int, entries_shift + 3,
+ PAGE_SHIFT);
+ const unsigned long tce_table_size = 1UL << table_shift;
+- unsigned int tmplevels = levels;
+
+ if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
+ return -EINVAL;
+@@ -268,9 +271,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+ if (!is_power_of_2(window_size))
+ return -EINVAL;
+
+- if (alloc_userspace_copy && (window_size > (1ULL << 32)))
+- tmplevels = 1;
+-
+ /* Adjust direct table size from window_size and levels */
+ entries_shift = (entries_shift + levels - 1) / levels;
+ level_shift = entries_shift + 3;
+@@ -281,7 +281,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+
+ /* Allocate TCE table */
+ addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
+- tmplevels, tce_table_size, &offset, &total_allocated);
++ 1, tce_table_size, &offset, &total_allocated);
+
+ /* addr==NULL means that the first level allocation failed */
+ if (!addr)
+@@ -292,18 +292,18 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+ * we did not allocate as much as we wanted,
+ * release partially allocated table.
+ */
+- if (tmplevels == levels && offset < tce_table_size)
++ if (levels == 1 && offset < tce_table_size)
+ goto free_tces_exit;
+
+ /* Allocate userspace view of the TCE table */
+ if (alloc_userspace_copy) {
+ offset = 0;
+ uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
+- tmplevels, tce_table_size, &offset,
++ 1, tce_table_size, &offset,
+ &total_allocated_uas);
+ if (!uas)
+ goto free_tces_exit;
+- if (tmplevels == levels && (offset < tce_table_size ||
++ if (levels == 1 && (offset < tce_table_size ||
+ total_allocated_uas != total_allocated))
+ goto free_uas_exit;
+ }
+@@ -318,7 +318,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+
+ pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n",
+ window_size, tce_table_size, bus_offset, tbl->it_base,
+- tbl->it_userspace, tmplevels, levels);
++ tbl->it_userspace, 1, levels);
+
+ return 0;
+
+diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
+index be26ab3d99e0..33a52114267d 100644
+--- a/arch/powerpc/platforms/powernv/pci.h
++++ b/arch/powerpc/platforms/powernv/pci.h
+@@ -225,7 +225,7 @@ extern struct iommu_table_group *pnv_npu_compound_attach(
+ struct pnv_ioda_pe *pe);
+
+ /* pci-ioda-tce.c */
+-#define POWERNV_IOMMU_DEFAULT_LEVELS 1
++#define POWERNV_IOMMU_DEFAULT_LEVELS 2
+ #define POWERNV_IOMMU_MAX_LEVELS 5
+
+ extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
+index 50e7aee3c7f3..accb732dcfac 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -9,6 +9,7 @@
+ #include <linux/cpu.h>
+ #include <linux/kernel.h>
+ #include <linux/kobject.h>
++#include <linux/sched.h>
+ #include <linux/smp.h>
+ #include <linux/stat.h>
+ #include <linux/completion.h>
+@@ -206,7 +207,11 @@ static int update_dt_node(__be32 phandle, s32 scope)
+
+ prop_data += vd;
+ }
++
++ cond_resched();
+ }
++
++ cond_resched();
+ } while (rtas_rc == 1);
+
+ of_node_put(dn);
+@@ -309,8 +314,12 @@ int pseries_devicetree_update(s32 scope)
+ add_dt_node(phandle, drc_index);
+ break;
+ }
++
++ cond_resched();
+ }
+ }
++
++ cond_resched();
+ } while (rc == 1);
+
+ kfree(rtas_buf);
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 8fa012a65a71..cc682759feae 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -344,6 +344,9 @@ static void pseries_lpar_idle(void)
+ * low power mode by ceding processor to hypervisor
+ */
+
++ if (!prep_irq_for_idle())
++ return;
++
+ /* Indicate to hypervisor that we are idle. */
+ get_lppaca()->idle = 1;
+
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index 4a721fd62406..e15ccf19c153 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -2532,13 +2532,16 @@ static void dump_pacas(void)
+ static void dump_one_xive(int cpu)
+ {
+ unsigned int hwid = get_hard_smp_processor_id(cpu);
+-
+- opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
+- opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
+- opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
+- opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
+- opal_xive_dump(XIVE_DUMP_VP, hwid);
+- opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
++ bool hv = cpu_has_feature(CPU_FTR_HVMODE);
++
++ if (hv) {
++ opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
++ opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
++ opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
++ opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
++ opal_xive_dump(XIVE_DUMP_VP, hwid);
++ opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
++ }
+
+ if (setjmp(bus_error_jmp) != 0) {
+ catch_memory_errors = 0;
+diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
+index ccad1398abd4..b5cfcad953c2 100644
+--- a/arch/s390/hypfs/inode.c
++++ b/arch/s390/hypfs/inode.c
+@@ -269,7 +269,7 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root)
+ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
+ {
+ struct inode *root_inode;
+- struct dentry *root_dentry;
++ struct dentry *root_dentry, *update_file;
+ int rc = 0;
+ struct hypfs_sb_info *sbi;
+
+@@ -300,9 +300,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
+ rc = hypfs_diag_create_files(root_dentry);
+ if (rc)
+ return rc;
+- sbi->update_file = hypfs_create_update_file(root_dentry);
+- if (IS_ERR(sbi->update_file))
+- return PTR_ERR(sbi->update_file);
++ update_file = hypfs_create_update_file(root_dentry);
++ if (IS_ERR(update_file))
++ return PTR_ERR(update_file);
++ sbi->update_file = update_file;
+ hypfs_update_update(sb);
+ pr_info("Hypervisor filesystem mounted\n");
+ return 0;
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index 742ecf5b6c00..72200998687c 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -645,7 +645,9 @@ static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
+ .vector = stimer->config.apic_vector
+ };
+
+- return !kvm_apic_set_irq(vcpu, &irq, NULL);
++ if (lapic_in_kernel(vcpu))
++ return !kvm_apic_set_irq(vcpu, &irq, NULL);
++ return 0;
+ }
+
+ static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
+@@ -1854,7 +1856,13 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
+
+ ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
+ ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
+- ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
++
++ /*
++ * Direct Synthetic timers only make sense with in-kernel
++ * LAPIC
++ */
++ if (lapic_in_kernel(vcpu))
++ ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
+
+ break;
+
+diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
+index 6ad5ef48b61e..8cd2ac650b50 100644
+--- a/drivers/base/regmap/Kconfig
++++ b/drivers/base/regmap/Kconfig
+@@ -44,7 +44,7 @@ config REGMAP_IRQ
+
+ config REGMAP_SOUNDWIRE
+ tristate
+- depends on SOUNDWIRE_BUS
++ depends on SOUNDWIRE
+
+ config REGMAP_SCCB
+ tristate
+diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
+index 024060165afa..76457003f140 100644
+--- a/drivers/block/pktcdvd.c
++++ b/drivers/block/pktcdvd.c
+@@ -2594,7 +2594,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
+ if (ret)
+ return ret;
+ if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
+- WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
+ blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
+ return -EINVAL;
+ }
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index f124a2d2bb9f..92a89c8290aa 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -221,6 +221,9 @@ struct smi_info {
+ */
+ bool irq_enable_broken;
+
++ /* Is the driver in maintenance mode? */
++ bool in_maintenance_mode;
++
+ /*
+ * Did we get an attention that we did not handle?
+ */
+@@ -1007,11 +1010,20 @@ static int ipmi_thread(void *data)
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
+ &busy_until);
+- if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
++ if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+ ; /* do nothing */
+- else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
+- schedule();
+- else if (smi_result == SI_SM_IDLE) {
++ } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
++ /*
++ * In maintenance mode we run as fast as
++ * possible to allow firmware updates to
++ * complete as fast as possible, but normally
++ * don't bang on the scheduler.
++ */
++ if (smi_info->in_maintenance_mode)
++ schedule();
++ else
++ usleep_range(100, 200);
++ } else if (smi_result == SI_SM_IDLE) {
+ if (atomic_read(&smi_info->need_watch)) {
+ schedule_timeout_interruptible(100);
+ } else {
+@@ -1019,8 +1031,9 @@ static int ipmi_thread(void *data)
+ __set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+- } else
++ } else {
+ schedule_timeout_interruptible(1);
++ }
+ }
+ return 0;
+ }
+@@ -1198,6 +1211,7 @@ static void set_maintenance_mode(void *send_info, bool enable)
+
+ if (!enable)
+ atomic_set(&smi_info->req_events, 0);
++ smi_info->in_maintenance_mode = enable;
+ }
+
+ static void shutdown_smi(void *send_info);
+diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c
+index 32dd29e0a37e..4de97cc7cb54 100644
+--- a/drivers/clk/actions/owl-common.c
++++ b/drivers/clk/actions/owl-common.c
+@@ -68,16 +68,17 @@ int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks)
+ struct clk_hw *hw;
+
+ for (i = 0; i < hw_clks->num; i++) {
++ const char *name;
+
+ hw = hw_clks->hws[i];
+-
+ if (IS_ERR_OR_NULL(hw))
+ continue;
+
++ name = hw->init->name;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "Couldn't register clock %d - %s\n",
+- i, hw->init->name);
++ i, name);
+ return ret;
+ }
+ }
+diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
+index f607ee702c83..311cea0c3ae2 100644
+--- a/drivers/clk/at91/clk-main.c
++++ b/drivers/clk/at91/clk-main.c
+@@ -21,6 +21,10 @@
+
+ #define MOR_KEY_MASK (0xff << 16)
+
++#define clk_main_parent_select(s) (((s) & \
++ (AT91_PMC_MOSCEN | \
++ AT91_PMC_OSCBYPASS)) ? 1 : 0)
++
+ struct clk_main_osc {
+ struct clk_hw hw;
+ struct regmap *regmap;
+@@ -113,7 +117,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw)
+
+ regmap_read(regmap, AT91_PMC_SR, &status);
+
+- return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN);
++ return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp);
+ }
+
+ static const struct clk_ops main_osc_ops = {
+@@ -450,7 +454,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
+
+ regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
+
+- return status & AT91_PMC_MOSCEN ? 1 : 0;
++ return clk_main_parent_select(status);
+ }
+
+ static const struct clk_ops sam9x5_main_ops = {
+@@ -492,7 +496,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
+ clkmain->hw.init = &init;
+ clkmain->regmap = regmap;
+ regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
+- clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
++ clkmain->parent = clk_main_parent_select(status);
+
+ hw = &clkmain->hw;
+ ret = clk_hw_register(NULL, &clkmain->hw);
+diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c
+index 06499568cf07..db5096fa9a17 100644
+--- a/drivers/clk/clk-bulk.c
++++ b/drivers/clk/clk-bulk.c
+@@ -18,10 +18,13 @@ static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks,
+ int ret;
+ int i;
+
+- for (i = 0; i < num_clks; i++)
++ for (i = 0; i < num_clks; i++) {
++ clks[i].id = NULL;
+ clks[i].clk = NULL;
++ }
+
+ for (i = 0; i < num_clks; i++) {
++ of_property_read_string_index(np, "clock-names", i, &clks[i].id);
+ clks[i].clk = of_clk_get(np, i);
+ if (IS_ERR(clks[i].clk)) {
+ ret = PTR_ERR(clks[i].clk);
+diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
+index dd93d3acc67d..8724ef6c469a 100644
+--- a/drivers/clk/clk-qoriq.c
++++ b/drivers/clk/clk-qoriq.c
+@@ -675,7 +675,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
+ .guts_compat = "fsl,qoriq-device-config-1.0",
+ .init_periph = p5020_init_periph,
+ .cmux_groups = {
+- &p2041_cmux_grp1, &p2041_cmux_grp2
++ &p5020_cmux_grp1, &p5020_cmux_grp2
+ },
+ .cmux_to_group = {
+ 0, 1, -1
+diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
+index daf1841b2adb..f29025c99c53 100644
+--- a/drivers/clk/imx/clk-imx8mq.c
++++ b/drivers/clk/imx/clk-imx8mq.c
+@@ -396,7 +396,8 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
+ clks[IMX8MQ_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
+
+ /* AHB */
+- clks[IMX8MQ_CLK_AHB] = imx8m_clk_composite("ahb", imx8mq_ahb_sels, base + 0x9000);
++ /* AHB clock is used by the AHB bus therefore marked as critical */
++ clks[IMX8MQ_CLK_AHB] = imx8m_clk_composite_critical("ahb", imx8mq_ahb_sels, base + 0x9000);
+ clks[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
+
+ /* IPG */
+diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
+index b7213023b238..7a815ec76aa5 100644
+--- a/drivers/clk/imx/clk-pll14xx.c
++++ b/drivers/clk/imx/clk-pll14xx.c
+@@ -191,6 +191,10 @@ static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
+ tmp &= ~RST_MASK;
+ writel_relaxed(tmp, pll->base);
+
++ /* Enable BYPASS */
++ tmp |= BYPASS_MASK;
++ writel(tmp, pll->base);
++
+ div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
+ (rate->sdiv << SDIV_SHIFT);
+ writel_relaxed(div_val, pll->base + 0x4);
+@@ -250,6 +254,10 @@ static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
+ tmp &= ~RST_MASK;
+ writel_relaxed(tmp, pll->base);
+
++ /* Enable BYPASS */
++ tmp |= BYPASS_MASK;
++ writel_relaxed(tmp, pll->base);
++
+ div_val = (rate->mdiv << MDIV_SHIFT) | (rate->pdiv << PDIV_SHIFT) |
+ (rate->sdiv << SDIV_SHIFT);
+ writel_relaxed(div_val, pll->base + 0x4);
+@@ -283,16 +291,28 @@ static int clk_pll14xx_prepare(struct clk_hw *hw)
+ {
+ struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+ u32 val;
++ int ret;
+
+ /*
+ * RESETB = 1 from 0, PLL starts its normal
+ * operation after lock time
+ */
+ val = readl_relaxed(pll->base + GNRL_CTL);
++ if (val & RST_MASK)
++ return 0;
++ val |= BYPASS_MASK;
++ writel_relaxed(val, pll->base + GNRL_CTL);
+ val |= RST_MASK;
+ writel_relaxed(val, pll->base + GNRL_CTL);
+
+- return clk_pll14xx_wait_lock(pll);
++ ret = clk_pll14xx_wait_lock(pll);
++ if (ret)
++ return ret;
++
++ val &= ~BYPASS_MASK;
++ writel_relaxed(val, pll->base + GNRL_CTL);
++
++ return 0;
+ }
+
+ static int clk_pll14xx_is_prepared(struct clk_hw *hw)
+@@ -348,6 +368,7 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
+ struct clk_pll14xx *pll;
+ struct clk *clk;
+ struct clk_init_data init;
++ u32 val;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+@@ -379,6 +400,10 @@ struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
+ pll->rate_table = pll_clk->rate_table;
+ pll->rate_count = pll_clk->rate_count;
+
++ val = readl_relaxed(pll->base + GNRL_CTL);
++ val &= ~BYPASS_MASK;
++ writel_relaxed(val, pll->base + GNRL_CTL);
++
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register pll %s %lu\n",
+diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
+index 8028ff6f6610..db0b73d53551 100644
+--- a/drivers/clk/meson/axg-audio.c
++++ b/drivers/clk/meson/axg-audio.c
+@@ -992,15 +992,18 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
+
+ /* Take care to skip the registered input clocks */
+ for (i = AUD_CLKID_DDR_ARB; i < data->hw_onecell_data->num; i++) {
++ const char *name;
++
+ hw = data->hw_onecell_data->hws[i];
+ /* array might be sparse */
+ if (!hw)
+ continue;
+
++ name = hw->init->name;
++
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+- dev_err(dev, "failed to register clock %s\n",
+- hw->init->name);
++ dev_err(dev, "failed to register clock %s\n", name);
+ return ret;
+ }
+ }
+diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
+index 7131dcf9b060..95be125c3bdd 100644
+--- a/drivers/clk/qcom/gcc-sdm845.c
++++ b/drivers/clk/qcom/gcc-sdm845.c
+@@ -685,7 +685,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_names = gcc_parent_names_10,
+ .num_parents = 5,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_floor_ops,
+ },
+ };
+
+@@ -709,7 +709,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+- .ops = &clk_rcg2_ops,
++ .ops = &clk_rcg2_floor_ops,
+ },
+ };
+
+diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
+index 92ece221b0d4..7f156cf1d7a6 100644
+--- a/drivers/clk/renesas/clk-mstp.c
++++ b/drivers/clk/renesas/clk-mstp.c
+@@ -338,7 +338,8 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
+ return;
+
+ pd->name = np->name;
+- pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
++ pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
++ GENPD_FLAG_ACTIVE_WAKEUP;
+ pd->attach_dev = cpg_mstp_attach_dev;
+ pd->detach_dev = cpg_mstp_detach_dev;
+ pm_genpd_init(pd, &pm_domain_always_on_gov, false);
+diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
+index 9dfa28d6fd9f..cbe5fb468b7f 100644
+--- a/drivers/clk/renesas/renesas-cpg-mssr.c
++++ b/drivers/clk/renesas/renesas-cpg-mssr.c
+@@ -555,7 +555,8 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
+
+ genpd = &pd->genpd;
+ genpd->name = np->name;
+- genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
++ genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
++ GENPD_FLAG_ACTIVE_WAKEUP;
+ genpd->attach_dev = cpg_mssr_attach_dev;
+ genpd->detach_dev = cpg_mssr_detach_dev;
+ pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
+diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
+index ad7951b6b285..dcf4e25a0216 100644
+--- a/drivers/clk/sirf/clk-common.c
++++ b/drivers/clk/sirf/clk-common.c
+@@ -297,9 +297,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw)
+ {
+ struct clk_dmn *clk = to_dmnclk(hw);
+ u32 cfg = clkc_readl(clk->regofs);
++ const char *name = clk_hw_get_name(hw);
+
+ /* parent of io domain can only be pll3 */
+- if (strcmp(hw->init->name, "io") == 0)
++ if (strcmp(name, "io") == 0)
+ return 4;
+
+ WARN_ON((cfg & (BIT(3) - 1)) > 4);
+@@ -311,9 +312,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent)
+ {
+ struct clk_dmn *clk = to_dmnclk(hw);
+ u32 cfg = clkc_readl(clk->regofs);
++ const char *name = clk_hw_get_name(hw);
+
+ /* parent of io domain can only be pll3 */
+- if (strcmp(hw->init->name, "io") == 0)
++ if (strcmp(name, "io") == 0)
+ return -EINVAL;
+
+ cfg &= ~(BIT(3) - 1);
+@@ -353,7 +355,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ {
+ unsigned long fin;
+ unsigned ratio, wait, hold;
+- unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
++ const char *name = clk_hw_get_name(hw);
++ unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
+
+ fin = *parent_rate;
+ ratio = fin / rate;
+@@ -375,7 +378,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ struct clk_dmn *clk = to_dmnclk(hw);
+ unsigned long fin;
+ unsigned ratio, wait, hold, reg;
+- unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
++ const char *name = clk_hw_get_name(hw);
++ unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
+
+ fin = parent_rate;
+ ratio = fin / rate;
+diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c
+index e038b0447206..8bdab1c3013b 100644
+--- a/drivers/clk/sprd/common.c
++++ b/drivers/clk/sprd/common.c
+@@ -71,16 +71,17 @@ int sprd_clk_probe(struct device *dev, struct clk_hw_onecell_data *clkhw)
+ struct clk_hw *hw;
+
+ for (i = 0; i < clkhw->num; i++) {
++ const char *name;
+
+ hw = clkhw->hws[i];
+-
+ if (!hw)
+ continue;
+
++ name = hw->init->name;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "Couldn't register clock %d - %s\n",
+- i, hw->init->name);
++ i, name);
+ return ret;
+ }
+ }
+diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c
+index 36b4402bf09e..640270f51aa5 100644
+--- a/drivers/clk/sprd/pll.c
++++ b/drivers/clk/sprd/pll.c
+@@ -136,6 +136,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll,
+ k2 + refin * nint * CLK_PLL_1M;
+ }
+
++ kfree(cfg);
+ return rate;
+ }
+
+@@ -222,6 +223,7 @@ static int _sprd_pll_set_rate(const struct sprd_pll *pll,
+ if (!ret)
+ udelay(pll->udelay);
+
++ kfree(cfg);
+ return ret;
+ }
+
+diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+index cbbf06d42c2c..408a6750ddda 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
++++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+@@ -493,6 +493,9 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw,
+ [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw,
++ [CLK_MMC2] = &mmc2_clk.common.hw,
++ [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw,
++ [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c
+index fd6c347bec6a..dd7045bc48c1 100644
+--- a/drivers/clk/zte/clk-zx296718.c
++++ b/drivers/clk/zte/clk-zx296718.c
+@@ -564,6 +564,7 @@ static int __init top_clocks_init(struct device_node *np)
+ {
+ void __iomem *reg_base;
+ int i, ret;
++ const char *name;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+@@ -573,11 +574,10 @@ static int __init top_clocks_init(struct device_node *np)
+
+ for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) {
+ zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base;
++ name = zx296718_pll_clk[i].hw.init->name;
+ ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw);
+- if (ret) {
+- pr_warn("top clk %s init error!\n",
+- zx296718_pll_clk[i].hw.init->name);
+- }
++ if (ret)
++ pr_warn("top clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) {
+@@ -585,11 +585,10 @@ static int __init top_clocks_init(struct device_node *np)
+ top_hw_onecell_data.hws[top_ffactor_clk[i].id] =
+ &top_ffactor_clk[i].factor.hw;
+
++ name = top_ffactor_clk[i].factor.hw.init->name;
+ ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw);
+- if (ret) {
+- pr_warn("top clk %s init error!\n",
+- top_ffactor_clk[i].factor.hw.init->name);
+- }
++ if (ret)
++ pr_warn("top clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) {
+@@ -598,11 +597,10 @@ static int __init top_clocks_init(struct device_node *np)
+ &top_mux_clk[i].mux.hw;
+
+ top_mux_clk[i].mux.reg += (uintptr_t)reg_base;
++ name = top_mux_clk[i].mux.hw.init->name;
+ ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw);
+- if (ret) {
+- pr_warn("top clk %s init error!\n",
+- top_mux_clk[i].mux.hw.init->name);
+- }
++ if (ret)
++ pr_warn("top clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) {
+@@ -611,11 +609,10 @@ static int __init top_clocks_init(struct device_node *np)
+ &top_gate_clk[i].gate.hw;
+
+ top_gate_clk[i].gate.reg += (uintptr_t)reg_base;
++ name = top_gate_clk[i].gate.hw.init->name;
+ ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw);
+- if (ret) {
+- pr_warn("top clk %s init error!\n",
+- top_gate_clk[i].gate.hw.init->name);
+- }
++ if (ret)
++ pr_warn("top clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) {
+@@ -624,11 +621,10 @@ static int __init top_clocks_init(struct device_node *np)
+ &top_div_clk[i].div.hw;
+
+ top_div_clk[i].div.reg += (uintptr_t)reg_base;
++ name = top_div_clk[i].div.hw.init->name;
+ ret = clk_hw_register(NULL, &top_div_clk[i].div.hw);
+- if (ret) {
+- pr_warn("top clk %s init error!\n",
+- top_div_clk[i].div.hw.init->name);
+- }
++ if (ret)
++ pr_warn("top clk %s init error!\n", name);
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+@@ -754,6 +750,7 @@ static int __init lsp0_clocks_init(struct device_node *np)
+ {
+ void __iomem *reg_base;
+ int i, ret;
++ const char *name;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+@@ -767,11 +764,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
+ &lsp0_mux_clk[i].mux.hw;
+
+ lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base;
++ name = lsp0_mux_clk[i].mux.hw.init->name;
+ ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw);
+- if (ret) {
+- pr_warn("lsp0 clk %s init error!\n",
+- lsp0_mux_clk[i].mux.hw.init->name);
+- }
++ if (ret)
++ pr_warn("lsp0 clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) {
+@@ -780,11 +776,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
+ &lsp0_gate_clk[i].gate.hw;
+
+ lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base;
++ name = lsp0_gate_clk[i].gate.hw.init->name;
+ ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw);
+- if (ret) {
+- pr_warn("lsp0 clk %s init error!\n",
+- lsp0_gate_clk[i].gate.hw.init->name);
+- }
++ if (ret)
++ pr_warn("lsp0 clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) {
+@@ -793,11 +788,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
+ &lsp0_div_clk[i].div.hw;
+
+ lsp0_div_clk[i].div.reg += (uintptr_t)reg_base;
++ name = lsp0_div_clk[i].div.hw.init->name;
+ ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw);
+- if (ret) {
+- pr_warn("lsp0 clk %s init error!\n",
+- lsp0_div_clk[i].div.hw.init->name);
+- }
++ if (ret)
++ pr_warn("lsp0 clk %s init error!\n", name);
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+@@ -862,6 +856,7 @@ static int __init lsp1_clocks_init(struct device_node *np)
+ {
+ void __iomem *reg_base;
+ int i, ret;
++ const char *name;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+@@ -875,11 +870,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
+ &lsp0_mux_clk[i].mux.hw;
+
+ lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base;
++ name = lsp1_mux_clk[i].mux.hw.init->name;
+ ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw);
+- if (ret) {
+- pr_warn("lsp1 clk %s init error!\n",
+- lsp1_mux_clk[i].mux.hw.init->name);
+- }
++ if (ret)
++ pr_warn("lsp1 clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) {
+@@ -888,11 +882,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
+ &lsp1_gate_clk[i].gate.hw;
+
+ lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base;
++ name = lsp1_gate_clk[i].gate.hw.init->name;
+ ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw);
+- if (ret) {
+- pr_warn("lsp1 clk %s init error!\n",
+- lsp1_gate_clk[i].gate.hw.init->name);
+- }
++ if (ret)
++ pr_warn("lsp1 clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) {
+@@ -901,11 +894,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
+ &lsp1_div_clk[i].div.hw;
+
+ lsp1_div_clk[i].div.reg += (uintptr_t)reg_base;
++ name = lsp1_div_clk[i].div.hw.init->name;
+ ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw);
+- if (ret) {
+- pr_warn("lsp1 clk %s init error!\n",
+- lsp1_div_clk[i].div.hw.init->name);
+- }
++ if (ret)
++ pr_warn("lsp1 clk %s init error!\n", name);
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+@@ -979,6 +971,7 @@ static int __init audio_clocks_init(struct device_node *np)
+ {
+ void __iomem *reg_base;
+ int i, ret;
++ const char *name;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+@@ -992,11 +985,10 @@ static int __init audio_clocks_init(struct device_node *np)
+ &audio_mux_clk[i].mux.hw;
+
+ audio_mux_clk[i].mux.reg += (uintptr_t)reg_base;
++ name = audio_mux_clk[i].mux.hw.init->name;
+ ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw);
+- if (ret) {
+- pr_warn("audio clk %s init error!\n",
+- audio_mux_clk[i].mux.hw.init->name);
+- }
++ if (ret)
++ pr_warn("audio clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) {
+@@ -1005,11 +997,10 @@ static int __init audio_clocks_init(struct device_node *np)
+ &audio_adiv_clk[i].hw;
+
+ audio_adiv_clk[i].reg_base += (uintptr_t)reg_base;
++ name = audio_adiv_clk[i].hw.init->name;
+ ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw);
+- if (ret) {
+- pr_warn("audio clk %s init error!\n",
+- audio_adiv_clk[i].hw.init->name);
+- }
++ if (ret)
++ pr_warn("audio clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) {
+@@ -1018,11 +1009,10 @@ static int __init audio_clocks_init(struct device_node *np)
+ &audio_div_clk[i].div.hw;
+
+ audio_div_clk[i].div.reg += (uintptr_t)reg_base;
++ name = audio_div_clk[i].div.hw.init->name;
+ ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw);
+- if (ret) {
+- pr_warn("audio clk %s init error!\n",
+- audio_div_clk[i].div.hw.init->name);
+- }
++ if (ret)
++ pr_warn("audio clk %s init error!\n", name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) {
+@@ -1031,11 +1021,10 @@ static int __init audio_clocks_init(struct device_node *np)
+ &audio_gate_clk[i].gate.hw;
+
+ audio_gate_clk[i].gate.reg += (uintptr_t)reg_base;
++ name = audio_gate_clk[i].gate.hw.init->name;
+ ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw);
+- if (ret) {
+- pr_warn("audio clk %s init error!\n",
+- audio_gate_clk[i].gate.hw.init->name);
+- }
++ if (ret)
++ pr_warn("audio clk %s init error!\n", name);
+ }
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
+diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
+index 02768af0dccd..8c789b8671fc 100644
+--- a/drivers/crypto/hisilicon/sec/sec_algs.c
++++ b/drivers/crypto/hisilicon/sec/sec_algs.c
+@@ -215,17 +215,18 @@ static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
+ dma_addr_t psec_sgl, struct sec_dev_info *info)
+ {
+ struct sec_hw_sgl *sgl_current, *sgl_next;
++ dma_addr_t sgl_next_dma;
+
+- if (!hw_sgl)
+- return;
+ sgl_current = hw_sgl;
+- while (sgl_current->next) {
++ while (sgl_current) {
+ sgl_next = sgl_current->next;
+- dma_pool_free(info->hw_sgl_pool, sgl_current,
+- sgl_current->next_sgl);
++ sgl_next_dma = sgl_current->next_sgl;
++
++ dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
++
+ sgl_current = sgl_next;
++ psec_sgl = sgl_next_dma;
+ }
+- dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
+ }
+
+ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
+index 051f6c2873c7..6713cfb1995c 100644
+--- a/drivers/dma-buf/sw_sync.c
++++ b/drivers/dma-buf/sw_sync.c
+@@ -132,17 +132,14 @@ static void timeline_fence_release(struct dma_fence *fence)
+ {
+ struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
++ unsigned long flags;
+
++ spin_lock_irqsave(fence->lock, flags);
+ if (!list_empty(&pt->link)) {
+- unsigned long flags;
+-
+- spin_lock_irqsave(fence->lock, flags);
+- if (!list_empty(&pt->link)) {
+- list_del(&pt->link);
+- rb_erase(&pt->node, &parent->pt_tree);
+- }
+- spin_unlock_irqrestore(fence->lock, flags);
++ list_del(&pt->link);
++ rb_erase(&pt->node, &parent->pt_tree);
+ }
++ spin_unlock_irqrestore(fence->lock, flags);
+
+ sync_timeline_put(parent);
+ dma_fence_free(fence);
+@@ -265,7 +262,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
+ p = &parent->rb_left;
+ } else {
+ if (dma_fence_get_rcu(&other->base)) {
+- dma_fence_put(&pt->base);
++ sync_timeline_put(obj);
++ kfree(pt);
+ pt = other;
+ goto unlock;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+index e47609218839..bf0c61baa05c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+@@ -137,14 +137,14 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
+ mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
+ fb_tiled);
+ domain = amdgpu_display_supported_domains(adev);
+-
+ height = ALIGN(mode_cmd->height, 8);
+ size = mode_cmd->pitches[0] * height;
+ aligned_size = ALIGN(size, PAGE_SIZE);
+ ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+- AMDGPU_GEM_CREATE_VRAM_CLEARED,
++ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
++ AMDGPU_GEM_CREATE_VRAM_CLEARED |
++ AMDGPU_GEM_CREATE_CPU_GTT_USWC,
+ ttm_bo_type_kernel, NULL, &gobj);
+ if (ret) {
+ pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
+@@ -166,7 +166,6 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
+ dev_err(adev->dev, "FB failed to set tiling flags\n");
+ }
+
+-
+ ret = amdgpu_bo_pin(abo, domain);
+ if (ret) {
+ amdgpu_bo_unreserve(abo);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index d4fcf5475464..6fc77ac814d8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -746,7 +746,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_gem_object *gobj;
+ uint32_t handle;
+- u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
++ u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
++ AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ u32 domain;
+ int r;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
+index 9d8df68893b9..1e34dfc14355 100644
+--- a/drivers/gpu/drm/amd/amdgpu/si.c
++++ b/drivers/gpu/drm/amd/amdgpu/si.c
+@@ -1867,7 +1867,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
+
+- if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
++ if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
+ data &= ~PLL_RAMP_UP_TIME_0_MASK;
+ if (orig != data)
+@@ -1916,14 +1916,14 @@ static void si_program_aspm(struct amdgpu_device *adev)
+
+ orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
+ data &= ~LS2_EXIT_TIME_MASK;
+- if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
++ if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
+ data |= LS2_EXIT_TIME(5);
+ if (orig != data)
+ si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
+
+ orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
+ data &= ~LS2_EXIT_TIME_MASK;
+- if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
++ if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
+ data |= LS2_EXIT_TIME(5);
+ if (orig != data)
+ si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 0a7adc2925e3..191f5757ded1 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -2016,6 +2016,14 @@ void dc_set_power_state(
+ dc_resource_state_construct(dc, dc->current_state);
+
+ dc->hwss.init_hw(dc);
++
++#ifdef CONFIG_DRM_AMD_DC_DCN2_0
++ if (dc->hwss.init_sys_ctx != NULL &&
++ dc->vm_pa_config.valid) {
++ dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
++ }
++#endif
++
+ break;
+ default:
+ ASSERT(dc->current_state->stream_count == 0);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+index b0dea759cd86..8aecf044e2ae 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+@@ -154,6 +154,10 @@ bool edp_receiver_ready_T7(struct dc_link *link)
+ break;
+ udelay(25); //MAx T7 is 50ms
+ } while (++tries < 300);
++
++ if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
++ udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
++
+ return result;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index b459ce056b60..c404b5e930f0 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -261,12 +261,10 @@ bool resource_construct(
+ DC_ERR("DC: failed to create audio!\n");
+ return false;
+ }
+-
+ if (!aud->funcs->endpoint_valid(aud)) {
+ aud->funcs->destroy(&aud);
+ break;
+ }
+-
+ pool->audios[i] = aud;
+ pool->audio_count++;
+ }
+@@ -1692,24 +1690,25 @@ static struct audio *find_first_free_audio(
+ const struct resource_pool *pool,
+ enum engine_id id)
+ {
+- int i;
+- for (i = 0; i < pool->audio_count; i++) {
++ int i, available_audio_count;
++
++ available_audio_count = pool->audio_count;
++
++ for (i = 0; i < available_audio_count; i++) {
+ if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
+ /*we have enough audio endpoint, find the matching inst*/
+ if (id != i)
+ continue;
+-
+ return pool->audios[i];
+ }
+ }
+
+- /* use engine id to find free audio */
+- if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
++ /* use engine id to find free audio */
++ if ((id < available_audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
+ return pool->audios[id];
+ }
+-
+ /*not found the matching one, first come first serve*/
+- for (i = 0; i < pool->audio_count; i++) {
++ for (i = 0; i < available_audio_count; i++) {
+ if (res_ctx->is_audio_acquired[i] == false) {
+ return pool->audios[i];
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index 6c2a3d9a4c2e..283082666be5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -202,6 +202,7 @@ struct dc_panel_patch {
+ unsigned int dppowerup_delay;
+ unsigned int extra_t12_ms;
+ unsigned int extra_delay_backlight_off;
++ unsigned int extra_t7_ms;
+ };
+
+ struct dc_edid_caps {
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+index 7f6d724686f1..abb559ce6408 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+@@ -611,6 +611,8 @@ void dce_aud_az_configure(
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
+ value);
++ DC_LOG_HW_AUDIO("\n\tAUDIO:az_configure: index: %u data, 0x%x, displayName %s: \n",
++ audio->inst, value, audio_info->display_name);
+
+ /*
+ *write the port ID:
+@@ -922,7 +924,6 @@ static const struct audio_funcs funcs = {
+ .az_configure = dce_aud_az_configure,
+ .destroy = dce_aud_destroy,
+ };
+-
+ void dce_aud_destroy(struct audio **audio)
+ {
+ struct dce_audio *aud = DCE_AUD(*audio);
+@@ -953,7 +954,6 @@ struct audio *dce_audio_create(
+ audio->regs = reg;
+ audio->shifts = shifts;
+ audio->masks = masks;
+-
+ return &audio->base;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+index 7469333a2c8a..8166fdbacd73 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+@@ -357,9 +357,10 @@ bool cm_helper_translate_curve_to_hw_format(
+ seg_distr[7] = 4;
+ seg_distr[8] = 4;
+ seg_distr[9] = 4;
++ seg_distr[10] = 1;
+
+ region_start = -10;
+- region_end = 0;
++ region_end = 1;
+ }
+
+ for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index 19b1eaebe484..000a9db9dad8 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -433,6 +433,12 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
+ /* Either we've calculated the number of frames to insert,
+ * or we need to insert min duration frames
+ */
++ if (last_render_time_in_us / frames_to_insert <
++ in_out_vrr->min_duration_in_us){
++ frames_to_insert -= (frames_to_insert > 1) ?
++ 1 : 0;
++ }
++
+ if (frames_to_insert > 0)
+ inserted_frame_duration_in_us = last_render_time_in_us /
+ frames_to_insert;
+@@ -885,8 +891,8 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ struct core_freesync *core_freesync = NULL;
+ unsigned long long nominal_field_rate_in_uhz = 0;
+ unsigned int refresh_range = 0;
+- unsigned int min_refresh_in_uhz = 0;
+- unsigned int max_refresh_in_uhz = 0;
++ unsigned long long min_refresh_in_uhz = 0;
++ unsigned long long max_refresh_in_uhz = 0;
+
+ if (mod_freesync == NULL)
+ return;
+@@ -913,7 +919,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ min_refresh_in_uhz = nominal_field_rate_in_uhz;
+
+ if (!vrr_settings_require_update(core_freesync,
+- in_config, min_refresh_in_uhz, max_refresh_in_uhz,
++ in_config, (unsigned int)min_refresh_in_uhz, (unsigned int)max_refresh_in_uhz,
+ in_out_vrr))
+ return;
+
+@@ -929,15 +935,15 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ return;
+
+ } else {
+- in_out_vrr->min_refresh_in_uhz = min_refresh_in_uhz;
++ in_out_vrr->min_refresh_in_uhz = (unsigned int)min_refresh_in_uhz;
+ in_out_vrr->max_duration_in_us =
+ calc_duration_in_us_from_refresh_in_uhz(
+- min_refresh_in_uhz);
++ (unsigned int)min_refresh_in_uhz);
+
+- in_out_vrr->max_refresh_in_uhz = max_refresh_in_uhz;
++ in_out_vrr->max_refresh_in_uhz = (unsigned int)max_refresh_in_uhz;
+ in_out_vrr->min_duration_in_us =
+ calc_duration_in_us_from_refresh_in_uhz(
+- max_refresh_in_uhz);
++ (unsigned int)max_refresh_in_uhz);
+
+ refresh_range = in_out_vrr->max_refresh_in_uhz -
+ in_out_vrr->min_refresh_in_uhz;
+@@ -948,17 +954,18 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
+ in_out_vrr->fixed.ramping_active = in_config->ramping;
+
+ in_out_vrr->btr.btr_enabled = in_config->btr;
++
+ if (in_out_vrr->max_refresh_in_uhz <
+ 2 * in_out_vrr->min_refresh_in_uhz)
+ in_out_vrr->btr.btr_enabled = false;
++
+ in_out_vrr->btr.btr_active = false;
+ in_out_vrr->btr.inserted_duration_in_us = 0;
+ in_out_vrr->btr.frames_to_insert = 0;
+ in_out_vrr->btr.frame_counter = 0;
+ in_out_vrr->btr.mid_point_in_us =
+- in_out_vrr->min_duration_in_us +
+- (in_out_vrr->max_duration_in_us -
+- in_out_vrr->min_duration_in_us) / 2;
++ (in_out_vrr->min_duration_in_us +
++ in_out_vrr->max_duration_in_us) / 2;
+
+ if (in_out_vrr->state == VRR_STATE_UNSUPPORTED) {
+ in_out_vrr->adjust.v_total_min = stream->timing.v_total;
+diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+index 3666c308c34a..53676b5fec68 100644
+--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
++++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+@@ -1036,16 +1036,17 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
+ if (ret)
+ return ret;
+
++ /* Check whether panel supports fast training */
++ ret = analogix_dp_fast_link_train_detection(dp);
++ if (ret)
++ dp->psr_enable = false;
++
+ if (dp->psr_enable) {
+ ret = analogix_dp_enable_sink_psr(dp);
+ if (ret)
+ return ret;
+ }
+
+- /* Check whether panel supports fast training */
+- ret = analogix_dp_fast_link_train_detection(dp);
+- if (ret)
+- dp->psr_enable = false;
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index f59a51e19dab..d515c7cebb9c 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -293,7 +293,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+ {
+ struct tc_data *tc = aux_to_tc(aux);
+- size_t size = min_t(size_t, 8, msg->size);
++ size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size);
+ u8 request = msg->request & ~DP_AUX_I2C_MOT;
+ u8 *buf = msg->buffer;
+ u32 tmp = 0;
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+index 283ff690350e..50303ec194bb 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+@@ -320,7 +320,9 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
+ asyh->wndw.olut &= ~BIT(wndw->id);
+ }
+
+- if (!ilut && wndw->func->ilut_identity) {
++ if (!ilut && wndw->func->ilut_identity &&
++ asyw->state.fb->format->format != DRM_FORMAT_XBGR16161616F &&
++ asyw->state.fb->format->format != DRM_FORMAT_ABGR16161616F) {
+ static struct drm_property_blob dummy = {};
+ ilut = &dummy;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+index 7143ea4611aa..33a9fb5ac558 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/volt.c
+@@ -96,6 +96,8 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
+ info->min = min(info->base,
+ info->base + info->step * info->vidmask);
+ info->max = nvbios_rd32(bios, volt + 0x0e);
++ if (!info->max)
++ info->max = max(info->base, info->base + info->step * info->vidmask);
+ break;
+ case 0x50:
+ info->min = nvbios_rd32(bios, volt + 0x0a);
+diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+index 2c9c9722734f..9a2cb8aeab3a 100644
+--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
++++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+@@ -400,7 +400,13 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
+
+ /* Look up the DSI host. It needs to probe before we do. */
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
++ if (!endpoint)
++ return -ENODEV;
++
+ dsi_host_node = of_graph_get_remote_port_parent(endpoint);
++ if (!dsi_host_node)
++ goto error;
++
+ host = of_find_mipi_dsi_host_by_node(dsi_host_node);
+ of_node_put(dsi_host_node);
+ if (!host) {
+@@ -409,6 +415,9 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
+ }
+
+ info.node = of_graph_get_remote_port(endpoint);
++ if (!info.node)
++ goto error;
++
+ of_node_put(endpoint);
+
+ ts->dsi = mipi_dsi_device_register_full(host, &info);
+@@ -429,6 +438,10 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
+ return ret;
+
+ return 0;
++
++error:
++ of_node_put(endpoint);
++ return -ENODEV;
+ }
+
+ static int rpi_touchscreen_remove(struct i2c_client *i2c)
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 397a3086eac8..95e430f9fea4 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -723,9 +723,9 @@ static const struct panel_desc auo_g133han01 = {
+ static const struct display_timing auo_g185han01_timings = {
+ .pixelclock = { 120000000, 144000000, 175000000 },
+ .hactive = { 1920, 1920, 1920 },
+- .hfront_porch = { 18, 60, 74 },
+- .hback_porch = { 12, 44, 54 },
+- .hsync_len = { 10, 24, 32 },
++ .hfront_porch = { 36, 120, 148 },
++ .hback_porch = { 24, 88, 108 },
++ .hsync_len = { 20, 48, 64 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 6, 10, 40 },
+ .vback_porch = { 2, 5, 20 },
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index de1745adcccc..c7f2e073a82f 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -752,7 +752,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
+
+ radeon_encoder->output_csc = val;
+
+- if (connector->encoder->crtc) {
++ if (connector->encoder && connector->encoder->crtc) {
+ struct drm_crtc *crtc = connector->encoder->crtc;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
+index 2e96c886392b..60ee51edd782 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.c
++++ b/drivers/gpu/drm/radeon/radeon_drv.c
+@@ -344,11 +344,19 @@ radeon_pci_remove(struct pci_dev *pdev)
+ static void
+ radeon_pci_shutdown(struct pci_dev *pdev)
+ {
++ struct drm_device *ddev = pci_get_drvdata(pdev);
++
+ /* if we are running in a VM, make sure the device
+ * torn down properly on reboot/shutdown
+ */
+ if (radeon_device_is_virtual())
+ radeon_pci_remove(pdev);
++
++ /* Some adapters need to be suspended before a
++ * shutdown occurs in order to prevent an error
++ * during kexec.
++ */
++ radeon_suspend_kms(ddev, true, true, false);
+ }
+
+ static int radeon_pmops_suspend(struct device *dev)
+diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
+index 32fd6a3b37fb..6f1fef76671c 100644
+--- a/drivers/gpu/drm/stm/ltdc.c
++++ b/drivers/gpu/drm/stm/ltdc.c
+@@ -25,6 +25,7 @@
+ #include <drm/drm_fb_cma_helper.h>
+ #include <drm/drm_fourcc.h>
+ #include <drm/drm_gem_cma_helper.h>
++#include <drm/drm_gem_framebuffer_helper.h>
+ #include <drm/drm_of.h>
+ #include <drm/drm_plane_helper.h>
+ #include <drm/drm_probe_helper.h>
+@@ -875,6 +876,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
+ };
+
+ static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
++ .prepare_fb = drm_gem_fb_prepare_fb,
+ .atomic_check = ltdc_plane_atomic_check,
+ .atomic_update = ltdc_plane_atomic_update,
+ .atomic_disable = ltdc_plane_atomic_disable,
+diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
+index 87819c82bcce..f2f0739d1035 100644
+--- a/drivers/gpu/drm/tinydrm/Kconfig
++++ b/drivers/gpu/drm/tinydrm/Kconfig
+@@ -14,8 +14,8 @@ config TINYDRM_MIPI_DBI
+ config TINYDRM_HX8357D
+ tristate "DRM support for HX8357D display panels"
+ depends on DRM_TINYDRM && SPI
+- depends on BACKLIGHT_CLASS_DEVICE
+ select TINYDRM_MIPI_DBI
++ select BACKLIGHT_CLASS_DEVICE
+ help
+ DRM driver for the following HX8357D panels:
+ * YX350HV15-T 3.5" 340x350 TFT (Adafruit 3.5")
+@@ -35,8 +35,8 @@ config TINYDRM_ILI9225
+ config TINYDRM_ILI9341
+ tristate "DRM support for ILI9341 display panels"
+ depends on DRM_TINYDRM && SPI
+- depends on BACKLIGHT_CLASS_DEVICE
+ select TINYDRM_MIPI_DBI
++ select BACKLIGHT_CLASS_DEVICE
+ help
+ DRM driver for the following Ilitek ILI9341 panels:
+ * YX240QV29-T 2.4" 240x320 TFT (Adafruit 2.4")
+@@ -46,8 +46,8 @@ config TINYDRM_ILI9341
+ config TINYDRM_MI0283QT
+ tristate "DRM support for MI0283QT"
+ depends on DRM_TINYDRM && SPI
+- depends on BACKLIGHT_CLASS_DEVICE
+ select TINYDRM_MIPI_DBI
++ select BACKLIGHT_CLASS_DEVICE
+ help
+ DRM driver for the Multi-Inno MI0283QT display panel
+ If M is selected the module will be called mi0283qt.
+@@ -78,8 +78,8 @@ config TINYDRM_ST7586
+ config TINYDRM_ST7735R
+ tristate "DRM support for Sitronix ST7735R display panels"
+ depends on DRM_TINYDRM && SPI
+- depends on BACKLIGHT_CLASS_DEVICE
+ select TINYDRM_MIPI_DBI
++ select BACKLIGHT_CLASS_DEVICE
+ help
+ DRM driver Sitronix ST7735R with one of the following LCDs:
+ * JD-T18003-T01 1.8" 128x160 TFT
+diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
+index d7b409a3c0f8..66603da634fe 100644
+--- a/drivers/gpu/drm/vkms/vkms_crc.c
++++ b/drivers/gpu/drm/vkms/vkms_crc.c
+@@ -166,16 +166,24 @@ void vkms_crc_work_handle(struct work_struct *work)
+ struct drm_plane *plane;
+ u32 crc32 = 0;
+ u64 frame_start, frame_end;
++ bool crc_pending;
+ unsigned long flags;
+
+ spin_lock_irqsave(&out->state_lock, flags);
+ frame_start = crtc_state->frame_start;
+ frame_end = crtc_state->frame_end;
++ crc_pending = crtc_state->crc_pending;
++ crtc_state->frame_start = 0;
++ crtc_state->frame_end = 0;
++ crtc_state->crc_pending = false;
+ spin_unlock_irqrestore(&out->state_lock, flags);
+
+- /* _vblank_handle() hasn't updated frame_start yet */
+- if (!frame_start || frame_start == frame_end)
+- goto out;
++ /*
++ * We raced with the vblank hrtimer and previous work already computed
++ * the crc, nothing to do.
++ */
++ if (!crc_pending)
++ return;
+
+ drm_for_each_plane(plane, &vdev->drm) {
+ struct vkms_plane_state *vplane_state;
+@@ -196,20 +204,11 @@ void vkms_crc_work_handle(struct work_struct *work)
+ if (primary_crc)
+ crc32 = _vkms_get_crc(primary_crc, cursor_crc);
+
+- frame_end = drm_crtc_accurate_vblank_count(crtc);
+-
+- /* queue_work can fail to schedule crc_work; add crc for
+- * missing frames
++ /*
++ * The worker can fall behind the vblank hrtimer, make sure we catch up.
+ */
+ while (frame_start <= frame_end)
+ drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
+-
+-out:
+- /* to avoid using the same value for frame number again */
+- spin_lock_irqsave(&out->state_lock, flags);
+- crtc_state->frame_end = frame_end;
+- crtc_state->frame_start = 0;
+- spin_unlock_irqrestore(&out->state_lock, flags);
+ }
+
+ static int vkms_crc_parse_source(const char *src_name, bool *enabled)
+diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
+index e447b7588d06..77a1f5fa5d5c 100644
+--- a/drivers/gpu/drm/vkms/vkms_crtc.c
++++ b/drivers/gpu/drm/vkms/vkms_crtc.c
+@@ -30,13 +30,18 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+ * has read the data
+ */
+ spin_lock(&output->state_lock);
+- if (!state->frame_start)
++ if (!state->crc_pending)
+ state->frame_start = frame;
++ else
++ DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
++ state->frame_start, frame);
++ state->frame_end = frame;
++ state->crc_pending = true;
+ spin_unlock(&output->state_lock);
+
+ ret = queue_work(output->crc_workq, &state->crc_work);
+ if (!ret)
+- DRM_WARN("failed to queue vkms_crc_work_handle");
++ DRM_DEBUG_DRIVER("vkms_crc_work_handle already queued\n");
+ }
+
+ spin_unlock(&output->lock);
+diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
+index 738dd6206d85..92296bd8f623 100644
+--- a/drivers/gpu/drm/vkms/vkms_drv.c
++++ b/drivers/gpu/drm/vkms/vkms_drv.c
+@@ -92,7 +92,7 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
+ dev->mode_config.max_height = YRES_MAX;
+ dev->mode_config.preferred_depth = 24;
+
+- return vkms_output_init(vkmsdev);
++ return vkms_output_init(vkmsdev, 0);
+ }
+
+ static int __init vkms_init(void)
+diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
+index 81f1cfbeb936..a0adcc86079f 100644
+--- a/drivers/gpu/drm/vkms/vkms_drv.h
++++ b/drivers/gpu/drm/vkms/vkms_drv.h
+@@ -56,6 +56,8 @@ struct vkms_plane_state {
+ struct vkms_crtc_state {
+ struct drm_crtc_state base;
+ struct work_struct crc_work;
++
++ bool crc_pending;
+ u64 frame_start;
+ u64 frame_end;
+ };
+@@ -113,10 +115,10 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
+ int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq);
+
+-int vkms_output_init(struct vkms_device *vkmsdev);
++int vkms_output_init(struct vkms_device *vkmsdev, int index);
+
+ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
+- enum drm_plane_type type);
++ enum drm_plane_type type, int index);
+
+ /* Gem stuff */
+ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
+index 3b162b25312e..1442b447c707 100644
+--- a/drivers/gpu/drm/vkms/vkms_output.c
++++ b/drivers/gpu/drm/vkms/vkms_output.c
+@@ -36,7 +36,7 @@ static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
+ .get_modes = vkms_conn_get_modes,
+ };
+
+-int vkms_output_init(struct vkms_device *vkmsdev)
++int vkms_output_init(struct vkms_device *vkmsdev, int index)
+ {
+ struct vkms_output *output = &vkmsdev->output;
+ struct drm_device *dev = &vkmsdev->drm;
+@@ -46,12 +46,12 @@ int vkms_output_init(struct vkms_device *vkmsdev)
+ struct drm_plane *primary, *cursor = NULL;
+ int ret;
+
+- primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
++ primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ if (enable_cursor) {
+- cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
++ cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
+ if (IS_ERR(cursor)) {
+ ret = PTR_ERR(cursor);
+ goto err_cursor;
+diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
+index 0e67d2d42f0c..20ffc52f9194 100644
+--- a/drivers/gpu/drm/vkms/vkms_plane.c
++++ b/drivers/gpu/drm/vkms/vkms_plane.c
+@@ -168,7 +168,7 @@ static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
+ };
+
+ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
+- enum drm_plane_type type)
++ enum drm_plane_type type, int index)
+ {
+ struct drm_device *dev = &vkmsdev->drm;
+ const struct drm_plane_helper_funcs *funcs;
+@@ -190,7 +190,7 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
+ funcs = &vkms_primary_helper_funcs;
+ }
+
+- ret = drm_universal_plane_init(dev, plane, 0,
++ ret = drm_universal_plane_init(dev, plane, 1 << index,
+ &vkms_plane_funcs,
+ formats, nformats,
+ NULL, type, NULL);
+diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
+index 81df62f48c4c..6ac8becc2372 100644
+--- a/drivers/hid/hid-apple.c
++++ b/drivers/hid/hid-apple.c
+@@ -54,7 +54,6 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
+ struct apple_sc {
+ unsigned long quirks;
+ unsigned int fn_on;
+- DECLARE_BITMAP(pressed_fn, KEY_CNT);
+ DECLARE_BITMAP(pressed_numlock, KEY_CNT);
+ };
+
+@@ -181,6 +180,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ {
+ struct apple_sc *asc = hid_get_drvdata(hid);
+ const struct apple_key_translation *trans, *table;
++ bool do_translate;
++ u16 code = 0;
+
+ if (usage->code == KEY_FN) {
+ asc->fn_on = !!value;
+@@ -189,8 +190,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ }
+
+ if (fnmode) {
+- int do_translate;
+-
+ if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
+ hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
+ table = macbookair_fn_keys;
+@@ -202,25 +201,33 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
+ trans = apple_find_translation (table, usage->code);
+
+ if (trans) {
+- if (test_bit(usage->code, asc->pressed_fn))
+- do_translate = 1;
+- else if (trans->flags & APPLE_FLAG_FKEY)
+- do_translate = (fnmode == 2 && asc->fn_on) ||
+- (fnmode == 1 && !asc->fn_on);
+- else
+- do_translate = asc->fn_on;
+-
+- if (do_translate) {
+- if (value)
+- set_bit(usage->code, asc->pressed_fn);
+- else
+- clear_bit(usage->code, asc->pressed_fn);
+-
+- input_event(input, usage->type, trans->to,
+- value);
+-
+- return 1;
++ if (test_bit(trans->from, input->key))
++ code = trans->from;
++ else if (test_bit(trans->to, input->key))
++ code = trans->to;
++
++ if (!code) {
++ if (trans->flags & APPLE_FLAG_FKEY) {
++ switch (fnmode) {
++ case 1:
++ do_translate = !asc->fn_on;
++ break;
++ case 2:
++ do_translate = asc->fn_on;
++ break;
++ default:
++ /* should never happen */
++ do_translate = false;
++ }
++ } else {
++ do_translate = asc->fn_on;
++ }
++
++ code = do_translate ? trans->to : trans->from;
+ }
++
++ input_event(input, usage->type, code, value);
++ return 1;
+ }
+
+ if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 53bddb50aeba..602219a8710d 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -88,7 +88,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
+ }
+
+ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
+- struct hid_report *report, u8 *raw_data, int size)
++ struct hid_report *report, u8 *raw_data, int report_size)
+ {
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+@@ -149,7 +149,8 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
+ if (flush)
+ wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+ else if (insert)
+- wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, raw_data, size);
++ wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
++ raw_data, report_size);
+
+ return insert && !flush;
+ }
+@@ -2176,7 +2177,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
+ {
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+- char name[WACOM_NAME_MAX];
++ char name[WACOM_NAME_MAX - 20]; /* Leave some room for suffixes */
+
+ /* Generic devices name unspecified */
+ if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 58719461850d..6be98851edca 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -251,7 +251,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
+
+ static int wacom_dtus_irq(struct wacom_wac *wacom)
+ {
+- char *data = wacom->data;
++ unsigned char *data = wacom->data;
+ struct input_dev *input = wacom->pen_input;
+ unsigned short prox, pressure = 0;
+
+@@ -572,7 +572,7 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
+ strip2 = ((data[3] & 0x1f) << 8) | data[4];
+ }
+
+- prox = (buttons & ~(~0 << nbuttons)) | (keys & ~(~0 << nkeys)) |
++ prox = (buttons & ~(~0U << nbuttons)) | (keys & ~(~0U << nkeys)) |
+ (ring1 & 0x80) | (ring2 & 0x80) | strip1 | strip2;
+
+ wacom_report_numbered_buttons(input, nbuttons, buttons);
+diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
+index 66af44bfa67d..f6546de66fbc 100644
+--- a/drivers/i2c/busses/i2c-cht-wc.c
++++ b/drivers/i2c/busses/i2c-cht-wc.c
+@@ -178,6 +178,51 @@ static const struct i2c_algorithm cht_wc_i2c_adap_algo = {
+ .smbus_xfer = cht_wc_i2c_adap_smbus_xfer,
+ };
+
++/*
++ * We are an i2c-adapter which itself is part of an i2c-client. This means that
++ * transfers done through us take adapter->bus_lock twice, once for our parent
++ * i2c-adapter and once to take our own bus_lock. Lockdep does not like this
++ * nested locking, to make lockdep happy in the case of busses with muxes, the
++ * i2c-core's i2c_adapter_lock_bus function calls:
++ * rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
++ *
++ * But i2c_adapter_depth only works when the direct parent of the adapter is
++ * another adapter, as it is only meant for muxes. In our case there is an
++ * i2c-client and MFD instantiated platform_device in the parent->child chain
++ * between the 2 devices.
++ *
++ * So we override the default i2c_lock_operations and pass a hardcoded
++ * depth of 1 to rt_mutex_lock_nested, to make lockdep happy.
++ *
++ * Note that if there were to be a mux attached to our adapter, this would
++ * break things again since the i2c-mux code expects the root-adapter to have
++ * a locking depth of 0. But we always have only 1 client directly attached
++ * in the form of the Charger IC paired with the CHT Whiskey Cove PMIC.
++ */
++static void cht_wc_i2c_adap_lock_bus(struct i2c_adapter *adapter,
++ unsigned int flags)
++{
++ rt_mutex_lock_nested(&adapter->bus_lock, 1);
++}
++
++static int cht_wc_i2c_adap_trylock_bus(struct i2c_adapter *adapter,
++ unsigned int flags)
++{
++ return rt_mutex_trylock(&adapter->bus_lock);
++}
++
++static void cht_wc_i2c_adap_unlock_bus(struct i2c_adapter *adapter,
++ unsigned int flags)
++{
++ rt_mutex_unlock(&adapter->bus_lock);
++}
++
++static const struct i2c_lock_operations cht_wc_i2c_adap_lock_ops = {
++ .lock_bus = cht_wc_i2c_adap_lock_bus,
++ .trylock_bus = cht_wc_i2c_adap_trylock_bus,
++ .unlock_bus = cht_wc_i2c_adap_unlock_bus,
++};
++
+ /**** irqchip for the client connected to the extchgr i2c adapter ****/
+ static void cht_wc_i2c_irq_lock(struct irq_data *data)
+ {
+@@ -286,6 +331,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
+ adap->adapter.owner = THIS_MODULE;
+ adap->adapter.class = I2C_CLASS_HWMON;
+ adap->adapter.algo = &cht_wc_i2c_adap_algo;
++ adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops;
+ strlcpy(adap->adapter.name, "PMIC I2C Adapter",
+ sizeof(adap->adapter.name));
+ adap->adapter.dev.parent = &pdev->dev;
+diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
+index 00d5219094e5..48bba4913952 100644
+--- a/drivers/mailbox/mtk-cmdq-mailbox.c
++++ b/drivers/mailbox/mtk-cmdq-mailbox.c
+@@ -22,6 +22,7 @@
+ #define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
+
+ #define CMDQ_CURR_IRQ_STATUS 0x10
++#define CMDQ_SYNC_TOKEN_UPDATE 0x68
+ #define CMDQ_THR_SLOT_CYCLES 0x30
+ #define CMDQ_THR_BASE 0x100
+ #define CMDQ_THR_SIZE 0x80
+@@ -104,8 +105,12 @@ static void cmdq_thread_resume(struct cmdq_thread *thread)
+
+ static void cmdq_init(struct cmdq *cmdq)
+ {
++ int i;
++
+ WARN_ON(clk_enable(cmdq->clock) < 0);
+ writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
++ for (i = 0; i <= CMDQ_MAX_EVENT; i++)
++ writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
+ clk_disable(cmdq->clock);
+ }
+
+diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+index 705e17a5479c..d3676fd3cf94 100644
+--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
++++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+@@ -47,7 +47,6 @@ static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
+
+ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
+ {
+- struct device_node *np = pdev->dev.of_node;
+ struct qcom_apcs_ipc *apcs;
+ struct regmap *regmap;
+ struct resource *res;
+@@ -55,6 +54,11 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
+ void __iomem *base;
+ unsigned long i;
+ int ret;
++ const struct of_device_id apcs_clk_match_table[] = {
++ { .compatible = "qcom,msm8916-apcs-kpss-global", },
++ { .compatible = "qcom,qcs404-apcs-apps-global", },
++ {}
++ };
+
+ apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
+ if (!apcs)
+@@ -89,7 +93,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- if (of_device_is_compatible(np, "qcom,msm8916-apcs-kpss-global")) {
++ if (of_match_device(apcs_clk_match_table, &pdev->dev)) {
+ apcs->clk = platform_device_register_data(&pdev->dev,
+ "qcom-apcs-msm8916-clk",
+ -1, NULL, 0);
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 01aaac2c15be..8efd15e40a28 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3738,18 +3738,18 @@ static int raid_iterate_devices(struct dm_target *ti,
+ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
+ {
+ struct raid_set *rs = ti->private;
+- unsigned int chunk_size = to_bytes(rs->md.chunk_sectors);
++ unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
+
+- blk_limits_io_min(limits, chunk_size);
+- blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
++ blk_limits_io_min(limits, chunk_size_bytes);
++ blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
+
+ /*
+ * RAID1 and RAID10 personalities require bio splitting,
+ * RAID0/4/5/6 don't and process large discard bios properly.
+ */
+ if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
+- limits->discard_granularity = chunk_size;
+- limits->max_discard_sectors = chunk_size;
++ limits->discard_granularity = chunk_size_bytes;
++ limits->max_discard_sectors = rs->md.chunk_sectors;
+ }
+ }
+
+diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
+index aed2c0447966..3c271b14e7c6 100644
+--- a/drivers/mfd/intel-lpss-pci.c
++++ b/drivers/mfd/intel-lpss-pci.c
+@@ -35,6 +35,8 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
+ info->mem = &pdev->resource[0];
+ info->irq = pdev->irq;
+
++ pdev->d3cold_delay = 0;
++
+ /* Probably it is enough to set this for iDMA capable devices only */
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
+index ca3d17e43ed8..ac88caca5ad4 100644
+--- a/drivers/net/dsa/rtl8366.c
++++ b/drivers/net/dsa/rtl8366.c
+@@ -339,10 +339,12 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+ {
+ struct realtek_smi *smi = ds->priv;
++ u16 vid;
+ int ret;
+
+- if (!smi->ops->is_vlan_valid(smi, port))
+- return -EINVAL;
++ for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
++ if (!smi->ops->is_vlan_valid(smi, vid))
++ return -EINVAL;
+
+ dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
+ vlan->vid_begin, vlan->vid_end);
+@@ -370,8 +372,9 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
+ u16 vid;
+ int ret;
+
+- if (!smi->ops->is_vlan_valid(smi, port))
+- return;
++ for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
++ if (!smi->ops->is_vlan_valid(smi, vid))
++ return;
+
+ dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
+ port,
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+index 6c685b920713..bf17cf3ef613 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+@@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
+ static int alloc_uld_rxqs(struct adapter *adap,
+ struct sge_uld_rxq_info *rxq_info, bool lro)
+ {
+- struct sge *s = &adap->sge;
+ unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
++ int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
+ struct sge_ofld_rxq *q = rxq_info->uldrxq;
+ unsigned short *ids = rxq_info->rspq_id;
+- unsigned int bmap_idx = 0;
++ struct sge *s = &adap->sge;
+ unsigned int per_chan;
+- int i, err, msi_idx, que_idx = 0;
+
+ per_chan = rxq_info->nrxq / adap->params.nports;
+
+@@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap,
+
+ if (msi_idx >= 0) {
+ bmap_idx = get_msix_idx_from_bmap(adap);
++ if (bmap_idx < 0) {
++ err = -ENOSPC;
++ goto freeout;
++ }
+ msi_idx = adap->msix_info_ulds[bmap_idx].idx;
+ }
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false,
+diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
+index 457444894d80..b4b8ba00ee01 100644
+--- a/drivers/net/ethernet/qlogic/qla3xxx.c
++++ b/drivers/net/ethernet/qlogic/qla3xxx.c
+@@ -2787,6 +2787,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
+ netdev_err(qdev->ndev,
+ "PCI mapping failed with error: %d\n",
+ err);
++ dev_kfree_skb_irq(skb);
+ ql_free_large_buffers(qdev);
+ return -ENOMEM;
+ }
+diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
+index ce78714f536f..a505b2ab88b8 100644
+--- a/drivers/net/usb/hso.c
++++ b/drivers/net/usb/hso.c
+@@ -2620,14 +2620,18 @@ static struct hso_device *hso_create_bulk_serial_device(
+ */
+ if (serial->tiocmget) {
+ tiocmget = serial->tiocmget;
++ tiocmget->endp = hso_get_ep(interface,
++ USB_ENDPOINT_XFER_INT,
++ USB_DIR_IN);
++ if (!tiocmget->endp) {
++ dev_err(&interface->dev, "Failed to find INT IN ep\n");
++ goto exit;
++ }
++
+ tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (tiocmget->urb) {
+ mutex_init(&tiocmget->mutex);
+ init_waitqueue_head(&tiocmget->waitq);
+- tiocmget->endp = hso_get_ep(
+- interface,
+- USB_ENDPOINT_XFER_INT,
+- USB_DIR_IN);
+ } else
+ hso_free_tiomget(serial);
+ }
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 26c5207466af..54390b77ae21 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1349,6 +1349,7 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
+ {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
+ {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
++ {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
+ {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 5f5722bf6762..7370e06a0e4b 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -887,9 +887,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
+ return 0;
+ }
+
+-static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
+- struct sk_buff *skb,
+- struct sk_buff_head *list)
++static int xennet_fill_frags(struct netfront_queue *queue,
++ struct sk_buff *skb,
++ struct sk_buff_head *list)
+ {
+ RING_IDX cons = queue->rx.rsp_cons;
+ struct sk_buff *nskb;
+@@ -908,7 +908,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
+ if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+ queue->rx.rsp_cons = ++cons + skb_queue_len(list);
+ kfree_skb(nskb);
+- return ~0U;
++ return -ENOENT;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+@@ -919,7 +919,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
+ kfree_skb(nskb);
+ }
+
+- return cons;
++ queue->rx.rsp_cons = cons;
++
++ return 0;
+ }
+
+ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
+@@ -1045,8 +1047,7 @@ err:
+ skb->data_len = rx->status;
+ skb->len += rx->status;
+
+- i = xennet_fill_frags(queue, skb, &tmpq);
+- if (unlikely(i == ~0U))
++ if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
+ goto err;
+
+ if (rx->flags & XEN_NETRXF_csum_blank)
+@@ -1056,7 +1057,7 @@ err:
+
+ __skb_queue_tail(&rxq, skb);
+
+- queue->rx.rsp_cons = ++i;
++ i = ++queue->rx.rsp_cons;
+ work_done++;
+ }
+
+diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
+index 2ab92409210a..297bf928d652 100644
+--- a/drivers/pci/Kconfig
++++ b/drivers/pci/Kconfig
+@@ -181,7 +181,7 @@ config PCI_LABEL
+
+ config PCI_HYPERV
+ tristate "Hyper-V PCI Frontend"
+- depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
++ depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
+ help
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
+diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
+index cee5f2f590e2..14a6ba4067fb 100644
+--- a/drivers/pci/controller/dwc/pci-exynos.c
++++ b/drivers/pci/controller/dwc/pci-exynos.c
+@@ -465,7 +465,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
+
+ ep->phy = devm_of_phy_get(dev, np, NULL);
+ if (IS_ERR(ep->phy)) {
+- if (PTR_ERR(ep->phy) == -EPROBE_DEFER)
++ if (PTR_ERR(ep->phy) != -ENODEV)
+ return PTR_ERR(ep->phy);
+
+ ep->phy = NULL;
+diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
+index 9b5cb5b70389..aabf22eaa6b9 100644
+--- a/drivers/pci/controller/dwc/pci-imx6.c
++++ b/drivers/pci/controller/dwc/pci-imx6.c
+@@ -1173,8 +1173,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
+
+ imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+ if (IS_ERR(imx6_pcie->vpcie)) {
+- if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
++ return PTR_ERR(imx6_pcie->vpcie);
+ imx6_pcie->vpcie = NULL;
+ }
+
+diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
+index be61d96cc95e..ca9aa4501e7e 100644
+--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
++++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
+@@ -44,6 +44,7 @@ static const struct pci_epc_features ls_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
++ .bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4),
+ };
+
+ static const struct pci_epc_features*
+diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
+index 954bc2b74bbc..811b5c6d62ea 100644
+--- a/drivers/pci/controller/dwc/pcie-histb.c
++++ b/drivers/pci/controller/dwc/pcie-histb.c
+@@ -340,8 +340,8 @@ static int histb_pcie_probe(struct platform_device *pdev)
+
+ hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie");
+ if (IS_ERR(hipcie->vpcie)) {
+- if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ if (PTR_ERR(hipcie->vpcie) != -ENODEV)
++ return PTR_ERR(hipcie->vpcie);
+ hipcie->vpcie = NULL;
+ }
+
+diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
+index 464ba2538d52..03c42e8684f6 100644
+--- a/drivers/pci/controller/pci-tegra.c
++++ b/drivers/pci/controller/pci-tegra.c
+@@ -1994,14 +1994,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+ err = of_pci_get_devfn(port);
+ if (err < 0) {
+ dev_err(dev, "failed to parse address: %d\n", err);
+- return err;
++ goto err_node_put;
+ }
+
+ index = PCI_SLOT(err);
+
+ if (index < 1 || index > soc->num_ports) {
+ dev_err(dev, "invalid port number: %d\n", index);
+- return -EINVAL;
++ err = -EINVAL;
++ goto err_node_put;
+ }
+
+ index--;
+@@ -2010,12 +2011,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+ if (err < 0) {
+ dev_err(dev, "failed to parse # of lanes: %d\n",
+ err);
+- return err;
++ goto err_node_put;
+ }
+
+ if (value > 16) {
+ dev_err(dev, "invalid # of lanes: %u\n", value);
+- return -EINVAL;
++ err = -EINVAL;
++ goto err_node_put;
+ }
+
+ lanes |= value << (index << 3);
+@@ -2029,13 +2031,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+ lane += value;
+
+ rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
+- if (!rp)
+- return -ENOMEM;
++ if (!rp) {
++ err = -ENOMEM;
++ goto err_node_put;
++ }
+
+ err = of_address_to_resource(port, 0, &rp->regs);
+ if (err < 0) {
+ dev_err(dev, "failed to parse address: %d\n", err);
+- return err;
++ goto err_node_put;
+ }
+
+ INIT_LIST_HEAD(&rp->list);
+@@ -2062,6 +2066,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+ return err;
+
+ return 0;
++
++err_node_put:
++ of_node_put(port);
++ return err;
+ }
+
+ /*
+diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
+index 8d20f1793a61..ef8e677ce9d1 100644
+--- a/drivers/pci/controller/pcie-rockchip-host.c
++++ b/drivers/pci/controller/pcie-rockchip-host.c
+@@ -608,29 +608,29 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
+
+ rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
+ if (IS_ERR(rockchip->vpcie12v)) {
+- if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
++ return PTR_ERR(rockchip->vpcie12v);
+ dev_info(dev, "no vpcie12v regulator found\n");
+ }
+
+ rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
+ if (IS_ERR(rockchip->vpcie3v3)) {
+- if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
++ return PTR_ERR(rockchip->vpcie3v3);
+ dev_info(dev, "no vpcie3v3 regulator found\n");
+ }
+
+ rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
+ if (IS_ERR(rockchip->vpcie1v8)) {
+- if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV)
++ return PTR_ERR(rockchip->vpcie1v8);
+ dev_info(dev, "no vpcie1v8 regulator found\n");
+ }
+
+ rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
+ if (IS_ERR(rockchip->vpcie0v9)) {
+- if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
+- return -EPROBE_DEFER;
++ if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV)
++ return PTR_ERR(rockchip->vpcie0v9);
+ dev_info(dev, "no vpcie0v9 regulator found\n");
+ }
+
+diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
+index bcd5d357ca23..c3899ee1db99 100644
+--- a/drivers/pci/hotplug/rpaphp_core.c
++++ b/drivers/pci/hotplug/rpaphp_core.c
+@@ -230,7 +230,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
+ struct of_drc_info drc;
+ const __be32 *value;
+ char cell_drc_name[MAX_DRC_NAME_LEN];
+- int j, fndit;
++ int j;
+
+ info = of_find_property(dn->parent, "ibm,drc-info", NULL);
+ if (info == NULL)
+@@ -245,17 +245,13 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
+
+ /* Should now know end of current entry */
+
+- if (my_index > drc.last_drc_index)
+- continue;
+-
+- fndit = 1;
+- break;
++ /* Found it */
++ if (my_index <= drc.last_drc_index) {
++ sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
++ my_index);
++ break;
++ }
+ }
+- /* Found it */
+-
+- if (fndit)
+- sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
+- my_index);
+
+ if (((drc_name == NULL) ||
+ (drc_name && !strcmp(drc_name, cell_drc_name))) &&
+diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
+index 83fb077d0b41..702966e4fcaa 100644
+--- a/drivers/pci/pci-bridge-emul.c
++++ b/drivers/pci/pci-bridge-emul.c
+@@ -38,7 +38,7 @@ struct pci_bridge_reg_behavior {
+ u32 rsvd;
+ };
+
+-const static struct pci_bridge_reg_behavior pci_regs_behavior[] = {
++static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
+ [PCI_VENDOR_ID / 4] = { .ro = ~0 },
+ [PCI_COMMAND / 4] = {
+ .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+@@ -173,7 +173,7 @@ const static struct pci_bridge_reg_behavior pci_regs_behavior[] = {
+ },
+ };
+
+-const static struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
++static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
+ [PCI_CAP_LIST_ID / 4] = {
+ /*
+ * Capability ID, Next Capability Pointer and
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 088fcdc8d2b4..f2ab112c0a71 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -884,8 +884,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+- if (dev->current_state != state && printk_ratelimit())
+- pci_info(dev, "Refused to change power state, currently in D%d\n",
++ if (dev->current_state != state)
++ pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n",
+ dev->current_state);
+
+ /*
+diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+index 6c640837073e..5bfa56f3847e 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
++++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+@@ -192,8 +192,8 @@ static const unsigned int uart_rts_b_pins[] = { GPIODV_27 };
+
+ static const unsigned int uart_tx_c_pins[] = { GPIOY_13 };
+ static const unsigned int uart_rx_c_pins[] = { GPIOY_14 };
+-static const unsigned int uart_cts_c_pins[] = { GPIOX_11 };
+-static const unsigned int uart_rts_c_pins[] = { GPIOX_12 };
++static const unsigned int uart_cts_c_pins[] = { GPIOY_11 };
++static const unsigned int uart_rts_c_pins[] = { GPIOY_12 };
+
+ static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 };
+ static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 };
+@@ -439,10 +439,10 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
+ GROUP(pwm_f_x, 3, 18),
+
+ /* Bank Y */
+- GROUP(uart_cts_c, 1, 19),
+- GROUP(uart_rts_c, 1, 18),
+- GROUP(uart_tx_c, 1, 17),
+- GROUP(uart_rx_c, 1, 16),
++ GROUP(uart_cts_c, 1, 17),
++ GROUP(uart_rts_c, 1, 16),
++ GROUP(uart_tx_c, 1, 19),
++ GROUP(uart_rx_c, 1, 18),
+ GROUP(pwm_a_y, 1, 21),
+ GROUP(pwm_f_y, 1, 20),
+ GROUP(i2s_out_ch23_y, 1, 5),
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 9b9c61e3f065..977792654e01 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -565,15 +565,25 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
+ !(regval & BIT(INTERRUPT_MASK_OFF)))
+ continue;
+ irq = irq_find_mapping(gc->irq.domain, irqnr + i);
+- generic_handle_irq(irq);
++ if (irq != 0)
++ generic_handle_irq(irq);
+
+ /* Clear interrupt.
+ * We must read the pin register again, in case the
+ * value was changed while executing
+ * generic_handle_irq() above.
++ * If we didn't find a mapping for the interrupt,
++ * disable it in order to avoid a system hang caused
++ * by an interrupt storm.
+ */
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ regval = readl(regs + i);
++ if (irq == 0) {
++ regval &= ~BIT(INTERRUPT_ENABLE_OFF);
++ dev_dbg(&gpio_dev->pdev->dev,
++ "Disabling spurious GPIO IRQ %d\n",
++ irqnr + i);
++ }
+ writel(regval, regs + i);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ ret = IRQ_HANDLED;
+diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
+index eba872ce4a7c..c82ad4b629e3 100644
+--- a/drivers/pinctrl/pinctrl-stmfx.c
++++ b/drivers/pinctrl/pinctrl-stmfx.c
+@@ -296,29 +296,29 @@ static int stmfx_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ case PIN_CONFIG_BIAS_DISABLE:
++ case PIN_CONFIG_DRIVE_PUSH_PULL:
++ ret = stmfx_pinconf_set_type(pctl, pin, 0);
++ if (ret)
++ return ret;
++ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
++ ret = stmfx_pinconf_set_type(pctl, pin, 1);
++ if (ret)
++ return ret;
+ ret = stmfx_pinconf_set_pupd(pctl, pin, 0);
+ if (ret)
+ return ret;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+- ret = stmfx_pinconf_set_pupd(pctl, pin, 1);
++ ret = stmfx_pinconf_set_type(pctl, pin, 1);
+ if (ret)
+ return ret;
+- break;
+- case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+- if (!dir)
+- ret = stmfx_pinconf_set_type(pctl, pin, 1);
+- else
+- ret = stmfx_pinconf_set_type(pctl, pin, 0);
++ ret = stmfx_pinconf_set_pupd(pctl, pin, 1);
+ if (ret)
+ return ret;
+ break;
+- case PIN_CONFIG_DRIVE_PUSH_PULL:
+- if (!dir)
+- ret = stmfx_pinconf_set_type(pctl, pin, 0);
+- else
+- ret = stmfx_pinconf_set_type(pctl, pin, 1);
++ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
++ ret = stmfx_pinconf_set_type(pctl, pin, 1);
+ if (ret)
+ return ret;
+ break;
+diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
+index abcfbad94f00..849c3b34e887 100644
+--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
++++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
+@@ -32,7 +32,9 @@ static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
+
+ static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg)
+ {
+- writel(val, pmx->regs[bank] + reg);
++ writel_relaxed(val, pmx->regs[bank] + reg);
++ /* make sure pinmux register write completed */
++ pmx_readl(pmx, bank, reg);
+ }
+
+ static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
+index c61f00b72e15..a577218d1ab7 100644
+--- a/drivers/ptp/ptp_qoriq.c
++++ b/drivers/ptp/ptp_qoriq.c
+@@ -507,6 +507,8 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
+ ptp_qoriq->regs.etts_regs = base + ETTS_REGS_OFFSET;
+ }
+
++ spin_lock_init(&ptp_qoriq->lock);
++
+ ktime_get_real_ts64(&now);
+ ptp_qoriq_settime(&ptp_qoriq->caps, &now);
+
+@@ -514,7 +516,6 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
+ (ptp_qoriq->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
+ (ptp_qoriq->cksel & CKSEL_MASK) << CKSEL_SHIFT;
+
+- spin_lock_init(&ptp_qoriq->lock);
+ spin_lock_irqsave(&ptp_qoriq->lock, flags);
+
+ regs = &ptp_qoriq->regs;
+diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
+index a075e77617dc..3450d615974d 100644
+--- a/drivers/rtc/rtc-pcf85363.c
++++ b/drivers/rtc/rtc-pcf85363.c
+@@ -166,7 +166,12 @@ static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ buf[DT_YEARS] = bin2bcd(tm->tm_year % 100);
+
+ ret = regmap_bulk_write(pcf85363->regmap, CTRL_STOP_EN,
+- tmp, sizeof(tmp));
++ tmp, 2);
++ if (ret)
++ return ret;
++
++ ret = regmap_bulk_write(pcf85363->regmap, DT_100THS,
++ buf, sizeof(tmp) - 2);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
+index 7ee673a25fd0..4f9a107a0427 100644
+--- a/drivers/rtc/rtc-snvs.c
++++ b/drivers/rtc/rtc-snvs.c
+@@ -279,6 +279,10 @@ static int snvs_rtc_probe(struct platform_device *pdev)
+ if (!data)
+ return -ENOMEM;
+
++ data->rtc = devm_rtc_allocate_device(&pdev->dev);
++ if (IS_ERR(data->rtc))
++ return PTR_ERR(data->rtc);
++
+ data->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "regmap");
+
+ if (IS_ERR(data->regmap)) {
+@@ -343,10 +347,9 @@ static int snvs_rtc_probe(struct platform_device *pdev)
+ goto error_rtc_device_register;
+ }
+
+- data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+- &snvs_rtc_ops, THIS_MODULE);
+- if (IS_ERR(data->rtc)) {
+- ret = PTR_ERR(data->rtc);
++ data->rtc->ops = &snvs_rtc_ops;
++ ret = rtc_register_device(data->rtc);
++ if (ret) {
+ dev_err(&pdev->dev, "failed to register rtc: %d\n", ret);
+ goto error_rtc_device_register;
+ }
+diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
+index 39b8cc4574b4..c6ed0b12e807 100644
+--- a/drivers/scsi/scsi_logging.c
++++ b/drivers/scsi/scsi_logging.c
+@@ -15,57 +15,15 @@
+ #include <scsi/scsi_eh.h>
+ #include <scsi/scsi_dbg.h>
+
+-#define SCSI_LOG_SPOOLSIZE 4096
+-
+-#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
+-#warning SCSI logging bitmask too large
+-#endif
+-
+-struct scsi_log_buf {
+- char buffer[SCSI_LOG_SPOOLSIZE];
+- unsigned long map;
+-};
+-
+-static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
+-
+ static char *scsi_log_reserve_buffer(size_t *len)
+ {
+- struct scsi_log_buf *buf;
+- unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
+- unsigned long idx = 0;
+-
+- preempt_disable();
+- buf = this_cpu_ptr(&scsi_format_log);
+- idx = find_first_zero_bit(&buf->map, map_bits);
+- if (likely(idx < map_bits)) {
+- while (test_and_set_bit(idx, &buf->map)) {
+- idx = find_next_zero_bit(&buf->map, map_bits, idx);
+- if (idx >= map_bits)
+- break;
+- }
+- }
+- if (WARN_ON(idx >= map_bits)) {
+- preempt_enable();
+- return NULL;
+- }
+- *len = SCSI_LOG_BUFSIZE;
+- return buf->buffer + idx * SCSI_LOG_BUFSIZE;
++ *len = 128;
++ return kmalloc(*len, GFP_ATOMIC);
+ }
+
+ static void scsi_log_release_buffer(char *bufptr)
+ {
+- struct scsi_log_buf *buf;
+- unsigned long idx;
+- int ret;
+-
+- buf = this_cpu_ptr(&scsi_format_log);
+- if (bufptr >= buf->buffer &&
+- bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
+- idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
+- ret = test_and_clear_bit(idx, &buf->map);
+- WARN_ON(!ret);
+- }
+- preempt_enable();
++ kfree(bufptr);
+ }
+
+ static inline const char *scmd_name(const struct scsi_cmnd *scmd)
+diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
+index 3a01cfd70fdc..f518273cfbe3 100644
+--- a/drivers/soundwire/Kconfig
++++ b/drivers/soundwire/Kconfig
+@@ -4,7 +4,7 @@
+ #
+
+ menuconfig SOUNDWIRE
+- bool "SoundWire support"
++ tristate "SoundWire support"
+ help
+ SoundWire is a 2-Pin interface with data and clock line ratified
+ by the MIPI Alliance. SoundWire is used for transporting data
+@@ -17,17 +17,12 @@ if SOUNDWIRE
+
+ comment "SoundWire Devices"
+
+-config SOUNDWIRE_BUS
+- tristate
+- select REGMAP_SOUNDWIRE
+-
+ config SOUNDWIRE_CADENCE
+ tristate
+
+ config SOUNDWIRE_INTEL
+ tristate "Intel SoundWire Master driver"
+ select SOUNDWIRE_CADENCE
+- select SOUNDWIRE_BUS
+ depends on X86 && ACPI && SND_SOC
+ help
+ SoundWire Intel Master driver.
+diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
+index fd99a831b92a..45b7e5001653 100644
+--- a/drivers/soundwire/Makefile
++++ b/drivers/soundwire/Makefile
+@@ -5,7 +5,7 @@
+
+ #Bus Objs
+ soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o
+-obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o
++obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
+
+ #Cadence Objs
+ soundwire-cadence-objs := cadence_master.o
+diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
+index 60293a00a14e..8a670bc86c0c 100644
+--- a/drivers/soundwire/intel.c
++++ b/drivers/soundwire/intel.c
+@@ -283,6 +283,16 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
+
+ if (pcm) {
+ count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
++
++ /*
++ * WORKAROUND: on all existing Intel controllers, pdi
++ * number 2 reports channel count as 1 even though it
++ * supports 8 channels. Performing hardcoding for pdi
++ * number 2.
++ */
++ if (pdi_num == 2)
++ count = 7;
++
+ } else {
+ count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
+ count = ((count & SDW_SHIM_PDMSCAP_CPSS) >>
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 703948c9fbe1..02206162eaa9 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -438,11 +438,20 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
+ pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+
+ /*
+- * Try to reset the device. The success of this is dependent on
+- * being able to lock the device, which is not always possible.
++ * Try to get the locks ourselves to prevent a deadlock. The
++ * success of this is dependent on being able to lock the device,
++ * which is not always possible.
++ * We can not use the "try" reset interface here, which will
++ * overwrite the previously restored configuration information.
+ */
+- if (vdev->reset_works && !pci_try_reset_function(pdev))
+- vdev->needs_reset = false;
++ if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
++ if (device_trylock(&pdev->dev)) {
++ if (!__pci_reset_function_locked(pdev))
++ vdev->needs_reset = false;
++ device_unlock(&pdev->dev);
++ }
++ pci_cfg_access_unlock(pdev);
++ }
+
+ pci_restore_state(pdev);
+ out:
+diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
+index 021b727e8b5c..6afd0d3ae569 100644
+--- a/drivers/video/fbdev/ssd1307fb.c
++++ b/drivers/video/fbdev/ssd1307fb.c
+@@ -432,7 +432,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
+ if (ret < 0)
+ return ret;
+
+- ret = ssd1307fb_write_cmd(par->client, 0x0);
++ ret = ssd1307fb_write_cmd(par->client, par->page_offset);
+ if (ret < 0)
+ return ret;
+
+diff --git a/fs/9p/cache.c b/fs/9p/cache.c
+index 995e332eee5c..eb2151fb6049 100644
+--- a/fs/9p/cache.c
++++ b/fs/9p/cache.c
+@@ -51,6 +51,8 @@ void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
+ if (!v9ses->cachetag) {
+ if (v9fs_random_cachetag(v9ses) < 0) {
+ v9ses->fscache = NULL;
++ kfree(v9ses->cachetag);
++ v9ses->cachetag = NULL;
+ return;
+ }
+ }
+diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
+index 8e83741b02e0..d4d4fdfac1a6 100644
+--- a/fs/ext4/block_validity.c
++++ b/fs/ext4/block_validity.c
+@@ -38,6 +38,7 @@ int __init ext4_init_system_zone(void)
+
+ void ext4_exit_system_zone(void)
+ {
++ rcu_barrier();
+ kmem_cache_destroy(ext4_system_zone_cachep);
+ }
+
+@@ -49,17 +50,26 @@ static inline int can_merge(struct ext4_system_zone *entry1,
+ return 0;
+ }
+
++static void release_system_zone(struct ext4_system_blocks *system_blks)
++{
++ struct ext4_system_zone *entry, *n;
++
++ rbtree_postorder_for_each_entry_safe(entry, n,
++ &system_blks->root, node)
++ kmem_cache_free(ext4_system_zone_cachep, entry);
++}
++
+ /*
+ * Mark a range of blocks as belonging to the "system zone" --- that
+ * is, filesystem metadata blocks which should never be used by
+ * inodes.
+ */
+-static int add_system_zone(struct ext4_sb_info *sbi,
++static int add_system_zone(struct ext4_system_blocks *system_blks,
+ ext4_fsblk_t start_blk,
+ unsigned int count)
+ {
+ struct ext4_system_zone *new_entry = NULL, *entry;
+- struct rb_node **n = &sbi->system_blks.rb_node, *node;
++ struct rb_node **n = &system_blks->root.rb_node, *node;
+ struct rb_node *parent = NULL, *new_node = NULL;
+
+ while (*n) {
+@@ -91,7 +101,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
+ new_node = &new_entry->node;
+
+ rb_link_node(new_node, parent, n);
+- rb_insert_color(new_node, &sbi->system_blks);
++ rb_insert_color(new_node, &system_blks->root);
+ }
+
+ /* Can we merge to the left? */
+@@ -101,7 +111,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
+ if (can_merge(entry, new_entry)) {
+ new_entry->start_blk = entry->start_blk;
+ new_entry->count += entry->count;
+- rb_erase(node, &sbi->system_blks);
++ rb_erase(node, &system_blks->root);
+ kmem_cache_free(ext4_system_zone_cachep, entry);
+ }
+ }
+@@ -112,7 +122,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
+ entry = rb_entry(node, struct ext4_system_zone, node);
+ if (can_merge(new_entry, entry)) {
+ new_entry->count += entry->count;
+- rb_erase(node, &sbi->system_blks);
++ rb_erase(node, &system_blks->root);
+ kmem_cache_free(ext4_system_zone_cachep, entry);
+ }
+ }
+@@ -126,7 +136,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
+ int first = 1;
+
+ printk(KERN_INFO "System zones: ");
+- node = rb_first(&sbi->system_blks);
++ node = rb_first(&sbi->system_blks->root);
+ while (node) {
+ entry = rb_entry(node, struct ext4_system_zone, node);
+ printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
+@@ -137,7 +147,47 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
+ printk(KERN_CONT "\n");
+ }
+
+-static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
++/*
++ * Returns 1 if the passed-in block region (start_blk,
++ * start_blk+count) is valid; 0 if some part of the block region
++ * overlaps with filesystem metadata blocks.
++ */
++static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
++ struct ext4_system_blocks *system_blks,
++ ext4_fsblk_t start_blk,
++ unsigned int count)
++{
++ struct ext4_system_zone *entry;
++ struct rb_node *n;
++
++ if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
++ (start_blk + count < start_blk) ||
++ (start_blk + count > ext4_blocks_count(sbi->s_es))) {
++ sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
++ return 0;
++ }
++
++ if (system_blks == NULL)
++ return 1;
++
++ n = system_blks->root.rb_node;
++ while (n) {
++ entry = rb_entry(n, struct ext4_system_zone, node);
++ if (start_blk + count - 1 < entry->start_blk)
++ n = n->rb_left;
++ else if (start_blk >= (entry->start_blk + entry->count))
++ n = n->rb_right;
++ else {
++ sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
++ return 0;
++ }
++ }
++ return 1;
++}
++
++static int ext4_protect_reserved_inode(struct super_block *sb,
++ struct ext4_system_blocks *system_blks,
++ u32 ino)
+ {
+ struct inode *inode;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -163,14 +213,15 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
+ if (n == 0) {
+ i++;
+ } else {
+- if (!ext4_data_block_valid(sbi, map.m_pblk, n)) {
++ if (!ext4_data_block_valid_rcu(sbi, system_blks,
++ map.m_pblk, n)) {
+ ext4_error(sb, "blocks %llu-%llu from inode %u "
+ "overlap system zone", map.m_pblk,
+ map.m_pblk + map.m_len - 1, ino);
+ err = -EFSCORRUPTED;
+ break;
+ }
+- err = add_system_zone(sbi, map.m_pblk, n);
++ err = add_system_zone(system_blks, map.m_pblk, n);
+ if (err < 0)
+ break;
+ i += n;
+@@ -180,94 +231,130 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
+ return err;
+ }
+
++static void ext4_destroy_system_zone(struct rcu_head *rcu)
++{
++ struct ext4_system_blocks *system_blks;
++
++ system_blks = container_of(rcu, struct ext4_system_blocks, rcu);
++ release_system_zone(system_blks);
++ kfree(system_blks);
++}
++
++/*
++ * Build system zone rbtree which is used for block validity checking.
++ *
++ * The update of system_blks pointer in this function is protected by
++ * sb->s_umount semaphore. However we have to be careful as we can be
++ * racing with ext4_data_block_valid() calls reading system_blks rbtree
++ * protected only by RCU. That's why we first build the rbtree and then
++ * swap it in place.
++ */
+ int ext4_setup_system_zone(struct super_block *sb)
+ {
+ ext4_group_t ngroups = ext4_get_groups_count(sb);
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
++ struct ext4_system_blocks *system_blks;
+ struct ext4_group_desc *gdp;
+ ext4_group_t i;
+ int flex_size = ext4_flex_bg_size(sbi);
+ int ret;
+
+ if (!test_opt(sb, BLOCK_VALIDITY)) {
+- if (sbi->system_blks.rb_node)
++ if (sbi->system_blks)
+ ext4_release_system_zone(sb);
+ return 0;
+ }
+- if (sbi->system_blks.rb_node)
++ if (sbi->system_blks)
+ return 0;
+
++ system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
++ if (!system_blks)
++ return -ENOMEM;
++
+ for (i=0; i < ngroups; i++) {
+ cond_resched();
+ if (ext4_bg_has_super(sb, i) &&
+ ((i < 5) || ((i % flex_size) == 0)))
+- add_system_zone(sbi, ext4_group_first_block_no(sb, i),
++ add_system_zone(system_blks,
++ ext4_group_first_block_no(sb, i),
+ ext4_bg_num_gdb(sb, i) + 1);
+ gdp = ext4_get_group_desc(sb, i, NULL);
+- ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1);
++ ret = add_system_zone(system_blks,
++ ext4_block_bitmap(sb, gdp), 1);
+ if (ret)
+- return ret;
+- ret = add_system_zone(sbi, ext4_inode_bitmap(sb, gdp), 1);
++ goto err;
++ ret = add_system_zone(system_blks,
++ ext4_inode_bitmap(sb, gdp), 1);
+ if (ret)
+- return ret;
+- ret = add_system_zone(sbi, ext4_inode_table(sb, gdp),
++ goto err;
++ ret = add_system_zone(system_blks,
++ ext4_inode_table(sb, gdp),
+ sbi->s_itb_per_group);
+ if (ret)
+- return ret;
++ goto err;
+ }
+ if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) {
+- ret = ext4_protect_reserved_inode(sb,
++ ret = ext4_protect_reserved_inode(sb, system_blks,
+ le32_to_cpu(sbi->s_es->s_journal_inum));
+ if (ret)
+- return ret;
++ goto err;
+ }
+
++ /*
++ * System blks rbtree complete, announce it once to prevent racing
++ * with ext4_data_block_valid() accessing the rbtree at the same
++ * time.
++ */
++ rcu_assign_pointer(sbi->system_blks, system_blks);
++
+ if (test_opt(sb, DEBUG))
+ debug_print_tree(sbi);
+ return 0;
++err:
++ release_system_zone(system_blks);
++ kfree(system_blks);
++ return ret;
+ }
+
+-/* Called when the filesystem is unmounted */
++/*
++ * Called when the filesystem is unmounted or when remounting it with
++ * noblock_validity specified.
++ *
++ * The update of system_blks pointer in this function is protected by
++ * sb->s_umount semaphore. However we have to be careful as we can be
++ * racing with ext4_data_block_valid() calls reading system_blks rbtree
++ * protected only by RCU. So we first clear the system_blks pointer and
++ * then free the rbtree only after RCU grace period expires.
++ */
+ void ext4_release_system_zone(struct super_block *sb)
+ {
+- struct ext4_system_zone *entry, *n;
++ struct ext4_system_blocks *system_blks;
+
+- rbtree_postorder_for_each_entry_safe(entry, n,
+- &EXT4_SB(sb)->system_blks, node)
+- kmem_cache_free(ext4_system_zone_cachep, entry);
++ system_blks = rcu_dereference_protected(EXT4_SB(sb)->system_blks,
++ lockdep_is_held(&sb->s_umount));
++ rcu_assign_pointer(EXT4_SB(sb)->system_blks, NULL);
+
+- EXT4_SB(sb)->system_blks = RB_ROOT;
++ if (system_blks)
++ call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
+ }
+
+-/*
+- * Returns 1 if the passed-in block region (start_blk,
+- * start_blk+count) is valid; 0 if some part of the block region
+- * overlaps with filesystem metadata blocks.
+- */
+ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
+ unsigned int count)
+ {
+- struct ext4_system_zone *entry;
+- struct rb_node *n = sbi->system_blks.rb_node;
++ struct ext4_system_blocks *system_blks;
++ int ret;
+
+- if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+- (start_blk + count < start_blk) ||
+- (start_blk + count > ext4_blocks_count(sbi->s_es))) {
+- sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
+- return 0;
+- }
+- while (n) {
+- entry = rb_entry(n, struct ext4_system_zone, node);
+- if (start_blk + count - 1 < entry->start_blk)
+- n = n->rb_left;
+- else if (start_blk >= (entry->start_blk + entry->count))
+- n = n->rb_right;
+- else {
+- sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
+- return 0;
+- }
+- }
+- return 1;
++ /*
++ * Lock the system zone to prevent it being released concurrently
++ * when doing a remount which inverse current "[no]block_validity"
++ * mount option.
++ */
++ rcu_read_lock();
++ system_blks = rcu_dereference(sbi->system_blks);
++ ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
++ count);
++ rcu_read_unlock();
++ return ret;
+ }
+
+ int ext4_check_blockref(const char *function, unsigned int line,
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 1cb67859e051..0014b1c5e6be 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -184,6 +184,14 @@ struct ext4_map_blocks {
+ unsigned int m_flags;
+ };
+
++/*
++ * Block validity checking, system zone rbtree.
++ */
++struct ext4_system_blocks {
++ struct rb_root root;
++ struct rcu_head rcu;
++};
++
+ /*
+ * Flags for ext4_io_end->flags
+ */
+@@ -1420,7 +1428,7 @@ struct ext4_sb_info {
+ int s_jquota_fmt; /* Format of quota to use */
+ #endif
+ unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
+- struct rb_root system_blks;
++ struct ext4_system_blocks __rcu *system_blks;
+
+ #ifdef EXTENTS_STATS
+ /* ext4 extents stats */
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 973f1e818770..01038aff5d8e 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -894,7 +894,21 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
+
+ static int f2fs_drop_inode(struct inode *inode)
+ {
++ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int ret;
++
++ /*
++ * during filesystem shutdown, if checkpoint is disabled,
++ * drop useless meta/node dirty pages.
++ */
++ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
++ if (inode->i_ino == F2FS_NODE_INO(sbi) ||
++ inode->i_ino == F2FS_META_INO(sbi)) {
++ trace_f2fs_drop_inode(inode, 1);
++ return 1;
++ }
++ }
++
+ /*
+ * This is to avoid a deadlock condition like below.
+ * writeback_single_inode(inode)
+diff --git a/fs/fat/dir.c b/fs/fat/dir.c
+index 1bda2ab6745b..814ad2c2ba80 100644
+--- a/fs/fat/dir.c
++++ b/fs/fat/dir.c
+@@ -1100,8 +1100,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
+ err = -ENOMEM;
+ goto error;
+ }
++ /* Avoid race with userspace read via bdev */
++ lock_buffer(bhs[n]);
+ memset(bhs[n]->b_data, 0, sb->s_blocksize);
+ set_buffer_uptodate(bhs[n]);
++ unlock_buffer(bhs[n]);
+ mark_buffer_dirty_inode(bhs[n], dir);
+
+ n++;
+@@ -1158,6 +1161,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
+ fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
+
+ de = (struct msdos_dir_entry *)bhs[0]->b_data;
++ /* Avoid race with userspace read via bdev */
++ lock_buffer(bhs[0]);
+ /* filling the new directory slots ("." and ".." entries) */
+ memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
+ memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
+@@ -1180,6 +1185,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
+ de[0].size = de[1].size = 0;
+ memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
+ set_buffer_uptodate(bhs[0]);
++ unlock_buffer(bhs[0]);
+ mark_buffer_dirty_inode(bhs[0], dir);
+
+ err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
+@@ -1237,11 +1243,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
+
+ /* fill the directory entry */
+ copy = min(size, sb->s_blocksize);
++ /* Avoid race with userspace read via bdev */
++ lock_buffer(bhs[n]);
+ memcpy(bhs[n]->b_data, slots, copy);
+- slots += copy;
+- size -= copy;
+ set_buffer_uptodate(bhs[n]);
++ unlock_buffer(bhs[n]);
+ mark_buffer_dirty_inode(bhs[n], dir);
++ slots += copy;
++ size -= copy;
+ if (!size)
+ break;
+ n++;
+diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
+index 265983635f2b..3647c65a0f48 100644
+--- a/fs/fat/fatent.c
++++ b/fs/fat/fatent.c
+@@ -388,8 +388,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
+ err = -ENOMEM;
+ goto error;
+ }
++ /* Avoid race with userspace read via bdev */
++ lock_buffer(c_bh);
+ memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
+ set_buffer_uptodate(c_bh);
++ unlock_buffer(c_bh);
+ mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
+ if (sb->s_flags & SB_SYNCHRONOUS)
+ err = sync_dirty_buffer(c_bh);
+diff --git a/fs/fs_context.c b/fs/fs_context.c
+index 103643c68e3f..87c2c9687d90 100644
+--- a/fs/fs_context.c
++++ b/fs/fs_context.c
+@@ -279,10 +279,8 @@ static struct fs_context *alloc_fs_context(struct file_system_type *fs_type,
+ fc->user_ns = get_user_ns(reference->d_sb->s_user_ns);
+ break;
+ case FS_CONTEXT_FOR_RECONFIGURE:
+- /* We don't pin any namespaces as the superblock's
+- * subscriptions cannot be changed at this point.
+- */
+ atomic_inc(&reference->d_sb->s_active);
++ fc->user_ns = get_user_ns(reference->d_sb->s_user_ns);
+ fc->root = dget(reference);
+ break;
+ }
+diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
+index e78657742bd8..3883633e82eb 100644
+--- a/fs/ocfs2/dlm/dlmunlock.c
++++ b/fs/ocfs2/dlm/dlmunlock.c
+@@ -90,7 +90,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
+ enum dlm_status status;
+ int actions = 0;
+ int in_use;
+- u8 owner;
++ u8 owner;
++ int recovery_wait = 0;
+
+ mlog(0, "master_node = %d, valblk = %d\n", master_node,
+ flags & LKM_VALBLK);
+@@ -193,9 +194,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
+ }
+ if (flags & LKM_CANCEL)
+ lock->cancel_pending = 0;
+- else
+- lock->unlock_pending = 0;
+-
++ else {
++ if (!lock->unlock_pending)
++ recovery_wait = 1;
++ else
++ lock->unlock_pending = 0;
++ }
+ }
+
+ /* get an extra ref on lock. if we are just switching
+@@ -229,6 +233,17 @@ leave:
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+
++ if (recovery_wait) {
++ spin_lock(&res->spinlock);
++ /* Unlock request will directly succeed after owner dies,
++ * and the lock is already removed from grant list. We have to
++ * wait for RECOVERING done or we miss the chance to purge it
++ * since the removement is much faster than RECOVERING proc.
++ */
++ __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
++ spin_unlock(&res->spinlock);
++ }
++
+ /* let the caller's final dlm_lock_put handle the actual kfree */
+ if (actions & DLM_UNLOCK_FREE_LOCK) {
+ /* this should always be coupled with list removal */
+diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
+index 5b7709894415..db9f67d34af3 100644
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -144,6 +144,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
+ if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n",
+ (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type,
+ &header_length) == 3) {
++ time->tv_nsec *= 1000;
+ if (data_type == 'C')
+ *compressed = true;
+ else
+@@ -151,6 +152,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
+ } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n",
+ (time64_t *)&time->tv_sec, &time->tv_nsec,
+ &header_length) == 2) {
++ time->tv_nsec *= 1000;
+ *compressed = false;
+ } else {
+ time->tv_sec = 0;
+diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
+index ccb73422c2fa..e6f54ef6698b 100644
+--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
++++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
+@@ -20,6 +20,9 @@
+ #define CMDQ_WFE_WAIT BIT(15)
+ #define CMDQ_WFE_WAIT_VALUE 0x1
+
++/** cmdq event maximum */
++#define CMDQ_MAX_EVENT 0x3ff
++
+ /*
+ * CMDQ_CODE_MASK:
+ * set write mask
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index dd436da7eccc..9feb59ac8550 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -2375,4 +2375,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
+ #define pci_notice_ratelimited(pdev, fmt, arg...) \
+ dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
+
++#define pci_info_ratelimited(pdev, fmt, arg...) \
++ dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
++
+ #endif /* LINUX_PCI_H */
+diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
+index 54ade13a9b15..4e8899972db4 100644
+--- a/include/linux/soc/mediatek/mtk-cmdq.h
++++ b/include/linux/soc/mediatek/mtk-cmdq.h
+@@ -13,9 +13,6 @@
+
+ #define CMDQ_NO_TIMEOUT 0xffffffffu
+
+-/** cmdq event maximum */
+-#define CMDQ_MAX_EVENT 0x3ff
+-
+ struct cmdq_pkt;
+
+ struct cmdq_client {
+diff --git a/include/scsi/scsi_dbg.h b/include/scsi/scsi_dbg.h
+index e03bd9d41fa8..7b196d234626 100644
+--- a/include/scsi/scsi_dbg.h
++++ b/include/scsi/scsi_dbg.h
+@@ -6,8 +6,6 @@ struct scsi_cmnd;
+ struct scsi_device;
+ struct scsi_sense_hdr;
+
+-#define SCSI_LOG_BUFSIZE 128
+-
+ extern void scsi_print_command(struct scsi_cmnd *);
+ extern size_t __scsi_format_command(char *, size_t,
+ const unsigned char *, size_t);
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index fa06b528c73c..0972c48d81d7 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -1071,7 +1071,7 @@ TRACE_EVENT(rxrpc_recvmsg,
+ ),
+
+ TP_fast_assign(
+- __entry->call = call->debug_id;
++ __entry->call = call ? call->debug_id : 0;
+ __entry->why = why;
+ __entry->seq = seq;
+ __entry->offset = offset;
+diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
+index d5870723b8ad..15d70a90b50d 100644
+--- a/kernel/kexec_core.c
++++ b/kernel/kexec_core.c
+@@ -300,6 +300,8 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
+ {
+ struct page *pages;
+
++ if (fatal_signal_pending(current))
++ return NULL;
+ pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
+ if (pages) {
+ unsigned int count, i;
+diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
+index c4ce08f43bd6..ab4a4606d19b 100644
+--- a/kernel/livepatch/core.c
++++ b/kernel/livepatch/core.c
+@@ -1175,6 +1175,7 @@ err:
+ pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
+ patch->mod->name, obj->mod->name, obj->mod->name);
+ mod->klp_alive = false;
++ obj->mod = NULL;
+ klp_cleanup_module_patches_limited(mod, patch);
+ mutex_unlock(&klp_mutex);
+
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index cbdfae379896..120ec6f64bbc 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -599,7 +599,7 @@ config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
+ int "Maximum kmemleak early log entries"
+ depends on DEBUG_KMEMLEAK
+ range 200 40000
+- default 400
++ default 16000
+ help
+ Kmemleak must track all the memory allocations to avoid
+ reporting false positives. Since memory may be allocated or
+diff --git a/net/core/sock.c b/net/core/sock.c
+index df7b38b60164..3d54153b8325 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1696,8 +1696,6 @@ static void __sk_destruct(struct rcu_head *head)
+ sk_filter_uncharge(sk, filter);
+ RCU_INIT_POINTER(sk->sk_filter, NULL);
+ }
+- if (rcu_access_pointer(sk->sk_reuseport_cb))
+- reuseport_detach_sock(sk);
+
+ sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
+
+@@ -1724,7 +1722,14 @@ static void __sk_destruct(struct rcu_head *head)
+
+ void sk_destruct(struct sock *sk)
+ {
+- if (sock_flag(sk, SOCK_RCU_FREE))
++ bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
++
++ if (rcu_access_pointer(sk->sk_reuseport_cb)) {
++ reuseport_detach_sock(sk);
++ use_call_rcu = true;
++ }
++
++ if (use_call_rcu)
+ call_rcu(&sk->sk_rcu, __sk_destruct);
+ else
+ __sk_destruct(&sk->sk_rcu);
+diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
+index a53a543fe055..52690bb3e40f 100644
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -1446,6 +1446,7 @@ static void erspan_setup(struct net_device *dev)
+ struct ip_tunnel *t = netdev_priv(dev);
+
+ ether_setup(dev);
++ dev->max_mtu = 0;
+ dev->netdev_ops = &erspan_netdev_ops;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index f6b7b11835ee..148dfcb5cbd9 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -915,16 +915,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
+ if (peer->rate_tokens == 0 ||
+ time_after(jiffies,
+ (peer->rate_last +
+- (ip_rt_redirect_load << peer->rate_tokens)))) {
++ (ip_rt_redirect_load << peer->n_redirects)))) {
+ __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
+
+ icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
+ peer->rate_last = jiffies;
+- ++peer->rate_tokens;
+ ++peer->n_redirects;
+ #ifdef CONFIG_IP_ROUTE_VERBOSE
+ if (log_martians &&
+- peer->rate_tokens == ip_rt_redirect_number)
++ peer->n_redirects == ip_rt_redirect_number)
+ net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
+ &ip_hdr(skb)->saddr, inet_iif(skb),
+ &ip_hdr(skb)->daddr, &gw);
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 3e8b38c73d8c..483323332d74 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -198,8 +198,13 @@ static bool retransmits_timed_out(struct sock *sk,
+ return false;
+
+ start_ts = tcp_sk(sk)->retrans_stamp;
+- if (likely(timeout == 0))
+- timeout = tcp_model_timeout(sk, boundary, TCP_RTO_MIN);
++ if (likely(timeout == 0)) {
++ unsigned int rto_base = TCP_RTO_MIN;
++
++ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
++ rto_base = tcp_timeout_init(sk);
++ timeout = tcp_model_timeout(sk, boundary, rto_base);
++ }
+
+ return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
+ }
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index acab7738f733..665f26e32d77 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -833,6 +833,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
+ int is_udplite = IS_UDPLITE(sk);
+ int offset = skb_transport_offset(skb);
+ int len = skb->len - offset;
++ int datalen = len - sizeof(*uh);
+ __wsum csum = 0;
+
+ /*
+@@ -866,10 +867,12 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
+ return -EIO;
+ }
+
+- skb_shinfo(skb)->gso_size = cork->gso_size;
+- skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+- skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
+- cork->gso_size);
++ if (datalen > cork->gso_size) {
++ skb_shinfo(skb)->gso_size = cork->gso_size;
++ skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
++ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
++ cork->gso_size);
++ }
+ goto csum_partial;
+ }
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 2454fce6fbfa..c94bc461e268 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -5962,13 +5962,20 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
+ switch (event) {
+ case RTM_NEWADDR:
+ /*
+- * If the address was optimistic
+- * we inserted the route at the start of
+- * our DAD process, so we don't need
+- * to do it again
++ * If the address was optimistic we inserted the route at the
++ * start of our DAD process, so we don't need to do it again.
++ * If the device was taken down in the middle of the DAD
++ * cycle there is a race where we could get here without a
++ * host route, so nothing to insert. That will be fixed when
++ * the device is brought up.
+ */
+- if (!rcu_access_pointer(ifp->rt->fib6_node))
++ if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
+ ip6_ins_rt(net, ifp->rt);
++ } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
++ pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
++ &ifp->addr, ifp->idev->dev->name);
++ }
++
+ if (ifp->idev->cnf.forwarding)
+ addrconf_join_anycast(ifp);
+ if (!ipv6_addr_any(&ifp->peer_addr))
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index fa014d5f1732..a593aaf25748 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -221,6 +221,16 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
+ if (ipv6_addr_is_multicast(&hdr->saddr))
+ goto err;
+
++ /* While RFC4291 is not explicit about v4mapped addresses
++ * in IPv6 headers, it seems clear linux dual-stack
++ * model can not deal properly with these.
++ * Security models could be fooled by ::ffff:127.0.0.1 for example.
++ *
++ * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
++ */
++ if (ipv6_addr_v4mapped(&hdr->saddr))
++ goto err;
++
+ skb->transport_header = skb->network_header + sizeof(*hdr);
+ IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
+
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 1258be19e186..f0b5edd861d0 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1122,6 +1122,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+ __wsum csum = 0;
+ int offset = skb_transport_offset(skb);
+ int len = skb->len - offset;
++ int datalen = len - sizeof(*uh);
+
+ /*
+ * Create a UDP header
+@@ -1154,8 +1155,12 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
+ return -EIO;
+ }
+
+- skb_shinfo(skb)->gso_size = cork->gso_size;
+- skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
++ if (datalen > cork->gso_size) {
++ skb_shinfo(skb)->gso_size = cork->gso_size;
++ skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
++ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
++ cork->gso_size);
++ }
+ goto csum_partial;
+ }
+
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index 8dfea26536c9..ccdd790e163a 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -107,9 +107,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+ llcp_sock->service_name = kmemdup(llcp_addr.service_name,
+ llcp_sock->service_name_len,
+ GFP_KERNEL);
+-
++ if (!llcp_sock->service_name) {
++ ret = -ENOMEM;
++ goto put_dev;
++ }
+ llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
+ if (llcp_sock->ssap == LLCP_SAP_MAX) {
++ kfree(llcp_sock->service_name);
++ llcp_sock->service_name = NULL;
+ ret = -EADDRINUSE;
+ goto put_dev;
+ }
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index ea64c90b14e8..17e6ca62f1be 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -970,7 +970,8 @@ static int nfc_genl_dep_link_down(struct sk_buff *skb, struct genl_info *info)
+ int rc;
+ u32 idx;
+
+- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
++ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
++ !info->attrs[NFC_ATTR_TARGET_INDEX])
+ return -EINVAL;
+
+ idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+@@ -1018,7 +1019,8 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info)
+ struct sk_buff *msg = NULL;
+ u32 idx;
+
+- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
++ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
++ !info->attrs[NFC_ATTR_FIRMWARE_NAME])
+ return -EINVAL;
+
+ idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+diff --git a/net/rds/ib.c b/net/rds/ib.c
+index b8d581b779b2..992e03ceee9f 100644
+--- a/net/rds/ib.c
++++ b/net/rds/ib.c
+@@ -143,6 +143,9 @@ static void rds_ib_add_one(struct ib_device *device)
+ refcount_set(&rds_ibdev->refcount, 1);
+ INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
+
++ INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
++ INIT_LIST_HEAD(&rds_ibdev->conn_list);
++
+ rds_ibdev->max_wrs = device->attrs.max_qp_wr;
+ rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
+
+@@ -203,9 +206,6 @@ static void rds_ib_add_one(struct ib_device *device)
+ device->name,
+ rds_ibdev->use_fastreg ? "FRMR" : "FMR");
+
+- INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
+- INIT_LIST_HEAD(&rds_ibdev->conn_list);
+-
+ down_write(&rds_ib_devices_lock);
+ list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
+ up_write(&rds_ib_devices_lock);
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index 06c7a2da21bc..39b427dc7512 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -1127,6 +1127,33 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
+ [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
+ };
+
++static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
++ struct nlattr *opt,
++ struct netlink_ext_ack *extack)
++{
++ int err;
++
++ if (!opt) {
++ NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
++ return -EINVAL;
++ }
++
++ err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt,
++ cbq_policy, extack);
++ if (err < 0)
++ return err;
++
++ if (tb[TCA_CBQ_WRROPT]) {
++ const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
++
++ if (wrr->priority > TC_CBQ_MAXPRIO) {
++ NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
++ err = -EINVAL;
++ }
++ }
++ return err;
++}
++
+ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+ {
+@@ -1139,13 +1166,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
+ hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+ q->delay_timer.function = cbq_undelay;
+
+- if (!opt) {
+- NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
+- return -EINVAL;
+- }
+-
+- err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
+- extack);
++ err = cbq_opt_parse(tb, opt, extack);
+ if (err < 0)
+ return err;
+
+@@ -1464,13 +1485,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
+ struct cbq_class *parent;
+ struct qdisc_rate_table *rtab = NULL;
+
+- if (!opt) {
+- NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
+- return -EINVAL;
+- }
+-
+- err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
+- extack);
++ err = cbq_opt_parse(tb, opt, extack);
+ if (err < 0)
+ return err;
+
+diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
+index 4a403d35438f..284ab2dcf47f 100644
+--- a/net/sched/sch_cbs.c
++++ b/net/sched/sch_cbs.c
+@@ -306,7 +306,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
+ if (err < 0)
+ goto skip;
+
+- if (ecmd.base.speed != SPEED_UNKNOWN)
++ if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
+ speed = ecmd.base.speed;
+
+ skip:
+diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
+index bad1cbe59a56..05605b30bef3 100644
+--- a/net/sched/sch_dsmark.c
++++ b/net/sched/sch_dsmark.c
+@@ -361,6 +361,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
+ goto errout;
+
+ err = -EINVAL;
++ if (!tb[TCA_DSMARK_INDICES])
++ goto errout;
+ indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
+
+ if (hweight32(indices) != 1)
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 11c2873ec68b..2f2967dcf15a 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -668,12 +668,11 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
+ if (err < 0)
+ goto skip;
+
+- if (ecmd.base.speed != SPEED_UNKNOWN)
++ if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
+ speed = ecmd.base.speed;
+
+ skip:
+- picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
+- speed * 1000 * 1000);
++ picos_per_byte = (USEC_PER_SEC * 8) / speed;
+
+ atomic64_set(&q->picos_per_byte, picos_per_byte);
+ netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index 2050fd386642..fd9af899637d 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -163,6 +163,7 @@ struct tipc_link {
+ struct {
+ u16 len;
+ u16 limit;
++ struct sk_buff *target_bskb;
+ } backlog[5];
+ u16 snd_nxt;
+ u16 prev_from;
+@@ -872,6 +873,7 @@ static void link_prepare_wakeup(struct tipc_link *l)
+ void tipc_link_reset(struct tipc_link *l)
+ {
+ struct sk_buff_head list;
++ u32 imp;
+
+ __skb_queue_head_init(&list);
+
+@@ -893,11 +895,10 @@ void tipc_link_reset(struct tipc_link *l)
+ __skb_queue_purge(&l->deferdq);
+ __skb_queue_purge(&l->backlogq);
+ __skb_queue_purge(&l->failover_deferdq);
+- l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
+- l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
+- l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
+- l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
+- l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
++ for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
++ l->backlog[imp].len = 0;
++ l->backlog[imp].target_bskb = NULL;
++ }
+ kfree_skb(l->reasm_buf);
+ kfree_skb(l->failover_reasm_skb);
+ l->reasm_buf = NULL;
+@@ -938,7 +939,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+ struct sk_buff_head *transmq = &l->transmq;
+ struct sk_buff_head *backlogq = &l->backlogq;
+- struct sk_buff *skb, *_skb, *bskb;
++ struct sk_buff *skb, *_skb, **tskb;
+ int pkt_cnt = skb_queue_len(list);
+ int rc = 0;
+
+@@ -988,19 +989,21 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
+ seqno++;
+ continue;
+ }
+- if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
++ tskb = &l->backlog[imp].target_bskb;
++ if (tipc_msg_bundle(*tskb, hdr, mtu)) {
+ kfree_skb(__skb_dequeue(list));
+ l->stats.sent_bundled++;
+ continue;
+ }
+- if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
++ if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
+ kfree_skb(__skb_dequeue(list));
+- __skb_queue_tail(backlogq, bskb);
+- l->backlog[msg_importance(buf_msg(bskb))].len++;
++ __skb_queue_tail(backlogq, *tskb);
++ l->backlog[imp].len++;
+ l->stats.sent_bundled++;
+ l->stats.sent_bundles++;
+ continue;
+ }
++ l->backlog[imp].target_bskb = NULL;
+ l->backlog[imp].len += skb_queue_len(list);
+ skb_queue_splice_tail_init(list, backlogq);
+ }
+@@ -1016,6 +1019,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
+ u16 seqno = l->snd_nxt;
+ u16 ack = l->rcv_nxt - 1;
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
++ u32 imp;
+
+ while (skb_queue_len(&l->transmq) < l->window) {
+ skb = skb_peek(&l->backlogq);
+@@ -1026,7 +1030,10 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
+ break;
+ __skb_dequeue(&l->backlogq);
+ hdr = buf_msg(skb);
+- l->backlog[msg_importance(hdr)].len--;
++ imp = msg_importance(hdr);
++ l->backlog[imp].len--;
++ if (unlikely(skb == l->backlog[imp].target_bskb))
++ l->backlog[imp].target_bskb = NULL;
+ __skb_queue_tail(&l->transmq, skb);
+ /* next retransmit attempt */
+ if (link_is_bc_sndlink(l))
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index f48e5857210f..b956ce4a40ef 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -484,10 +484,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
+ bmsg = buf_msg(_skb);
+ tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
+ INT_H_SIZE, dnode);
+- if (msg_isdata(msg))
+- msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
+- else
+- msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
++ msg_set_importance(bmsg, msg_importance(msg));
+ msg_set_seqno(bmsg, msg_seqno(msg));
+ msg_set_ack(bmsg, msg_ack(msg));
+ msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index ab47bf3ab66e..2ab43b2bba31 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -638,7 +638,7 @@ struct sock *__vsock_create(struct net *net,
+ }
+ EXPORT_SYMBOL_GPL(__vsock_create);
+
+-static void __vsock_release(struct sock *sk)
++static void __vsock_release(struct sock *sk, int level)
+ {
+ if (sk) {
+ struct sk_buff *skb;
+@@ -648,9 +648,17 @@ static void __vsock_release(struct sock *sk)
+ vsk = vsock_sk(sk);
+ pending = NULL; /* Compiler warning. */
+
++ /* The release call is supposed to use lock_sock_nested()
++ * rather than lock_sock(), if a sock lock should be acquired.
++ */
+ transport->release(vsk);
+
+- lock_sock(sk);
++ /* When "level" is SINGLE_DEPTH_NESTING, use the nested
++ * version to avoid the warning "possible recursive locking
++ * detected". When "level" is 0, lock_sock_nested(sk, level)
++ * is the same as lock_sock(sk).
++ */
++ lock_sock_nested(sk, level);
+ sock_orphan(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+@@ -659,7 +667,7 @@ static void __vsock_release(struct sock *sk)
+
+ /* Clean up any sockets that never were accepted. */
+ while ((pending = vsock_dequeue_accept(sk)) != NULL) {
+- __vsock_release(pending);
++ __vsock_release(pending, SINGLE_DEPTH_NESTING);
+ sock_put(pending);
+ }
+
+@@ -708,7 +716,7 @@ EXPORT_SYMBOL_GPL(vsock_stream_has_space);
+
+ static int vsock_release(struct socket *sock)
+ {
+- __vsock_release(sock->sk);
++ __vsock_release(sock->sk, 0);
+ sock->sk = NULL;
+ sock->state = SS_FREE;
+
+diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
+index 6c81a911fc02..920f14705346 100644
+--- a/net/vmw_vsock/hyperv_transport.c
++++ b/net/vmw_vsock/hyperv_transport.c
+@@ -528,7 +528,7 @@ static void hvs_release(struct vsock_sock *vsk)
+ struct sock *sk = sk_vsock(vsk);
+ bool remove_sock;
+
+- lock_sock(sk);
++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ remove_sock = hvs_close_lock_held(vsk);
+ release_sock(sk);
+ if (remove_sock)
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index 6f1a8aff65c5..a7adffd062c7 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -790,7 +790,7 @@ void virtio_transport_release(struct vsock_sock *vsk)
+ struct sock *sk = &vsk->sk;
+ bool remove_sock = true;
+
+- lock_sock(sk);
++ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ if (sk->sk_type == SOCK_STREAM)
+ remove_sock = virtio_transport_close(vsk);
+
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 3ec7ac70c313..c106167423a1 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -3403,7 +3403,7 @@ static int selinux_inode_copy_up_xattr(const char *name)
+ static int selinux_kernfs_init_security(struct kernfs_node *kn_dir,
+ struct kernfs_node *kn)
+ {
+- const struct task_security_struct *tsec = current_security();
++ const struct task_security_struct *tsec = selinux_cred(current_cred());
+ u32 parent_sid, newsid, clen;
+ int rc;
+ char *context;
+diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
+index 91c5395dd20c..586b7abd0aa7 100644
+--- a/security/selinux/include/objsec.h
++++ b/security/selinux/include/objsec.h
+@@ -37,16 +37,6 @@ struct task_security_struct {
+ u32 sockcreate_sid; /* fscreate SID */
+ };
+
+-/*
+- * get the subjective security ID of the current task
+- */
+-static inline u32 current_sid(void)
+-{
+- const struct task_security_struct *tsec = current_security();
+-
+- return tsec->sid;
+-}
+-
+ enum label_initialized {
+ LABEL_INVALID, /* invalid or not initialized */
+ LABEL_INITIALIZED, /* initialized */
+@@ -185,4 +175,14 @@ static inline struct ipc_security_struct *selinux_ipc(
+ return ipc->security + selinux_blob_sizes.lbs_ipc;
+ }
+
++/*
++ * get the subjective security ID of the current task
++ */
++static inline u32 current_sid(void)
++{
++ const struct task_security_struct *tsec = selinux_cred(current_cred());
++
++ return tsec->sid;
++}
++
+ #endif /* _SELINUX_OBJSEC_H_ */
+diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
+index f1c93a7be9ec..38ac3da4e791 100644
+--- a/security/smack/smack_access.c
++++ b/security/smack/smack_access.c
+@@ -465,7 +465,7 @@ char *smk_parse_smack(const char *string, int len)
+ if (i == 0 || i >= SMK_LONGLABEL)
+ return ERR_PTR(-EINVAL);
+
+- smack = kzalloc(i + 1, GFP_KERNEL);
++ smack = kzalloc(i + 1, GFP_NOFS);
+ if (smack == NULL)
+ return ERR_PTR(-ENOMEM);
+
+@@ -500,7 +500,7 @@ int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap,
+ if ((m & *cp) == 0)
+ continue;
+ rc = netlbl_catmap_setbit(&sap->attr.mls.cat,
+- cat, GFP_KERNEL);
++ cat, GFP_NOFS);
+ if (rc < 0) {
+ netlbl_catmap_free(sap->attr.mls.cat);
+ return rc;
+@@ -536,7 +536,7 @@ struct smack_known *smk_import_entry(const char *string, int len)
+ if (skp != NULL)
+ goto freeout;
+
+- skp = kzalloc(sizeof(*skp), GFP_KERNEL);
++ skp = kzalloc(sizeof(*skp), GFP_NOFS);
+ if (skp == NULL) {
+ skp = ERR_PTR(-ENOMEM);
+ goto freeout;
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 4c5e5a438f8b..36b6b9d4cbaf 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -288,7 +288,7 @@ static struct smack_known *smk_fetch(const char *name, struct inode *ip,
+ if (!(ip->i_opflags & IOP_XATTR))
+ return ERR_PTR(-EOPNOTSUPP);
+
+- buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
++ buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS);
+ if (buffer == NULL)
+ return ERR_PTR(-ENOMEM);
+
+@@ -937,7 +937,8 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
+
+ if (rc != 0)
+ return rc;
+- } else if (bprm->unsafe)
++ }
++ if (bprm->unsafe & ~LSM_UNSAFE_PTRACE)
+ return -EPERM;
+
+ bsp->smk_task = isp->smk_task;
+@@ -3925,6 +3926,8 @@ access_check:
+ skp = smack_ipv6host_label(&sadd);
+ if (skp == NULL)
+ skp = smack_net_ambient;
++ if (skb == NULL)
++ break;
+ #ifdef CONFIG_AUDIT
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = family;
+diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
+index b8265ee9923f..614b31aad168 100644
+--- a/tools/testing/selftests/net/udpgso.c
++++ b/tools/testing/selftests/net/udpgso.c
+@@ -89,12 +89,9 @@ struct testcase testcases_v4[] = {
+ .tfail = true,
+ },
+ {
+- /* send a single MSS: will fail with GSO, because the segment
+- * logic in udp4_ufo_fragment demands a gso skb to be > MTU
+- */
++ /* send a single MSS: will fall back to no GSO */
+ .tlen = CONST_MSS_V4,
+ .gso_len = CONST_MSS_V4,
+- .tfail = true,
+ .r_num_mss = 1,
+ },
+ {
+@@ -139,10 +136,9 @@ struct testcase testcases_v4[] = {
+ .tfail = true,
+ },
+ {
+- /* send a single 1B MSS: will fail, see single MSS above */
++ /* send a single 1B MSS: will fall back to no GSO */
+ .tlen = 1,
+ .gso_len = 1,
+- .tfail = true,
+ .r_num_mss = 1,
+ },
+ {
+@@ -196,12 +192,9 @@ struct testcase testcases_v6[] = {
+ .tfail = true,
+ },
+ {
+- /* send a single MSS: will fail with GSO, because the segment
+- * logic in udp4_ufo_fragment demands a gso skb to be > MTU
+- */
++ /* send a single MSS: will fall back to no GSO */
+ .tlen = CONST_MSS_V6,
+ .gso_len = CONST_MSS_V6,
+- .tfail = true,
+ .r_num_mss = 1,
+ },
+ {
+@@ -246,10 +239,9 @@ struct testcase testcases_v6[] = {
+ .tfail = true,
+ },
+ {
+- /* send a single 1B MSS: will fail, see single MSS above */
++ /* send a single 1B MSS: will fall back to no GSO */
+ .tlen = 1,
+ .gso_len = 1,
+- .tfail = true,
+ .r_num_mss = 1,
+ },
+ {
+diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h
+index 97f9f491c541..c402464b038f 100644
+--- a/tools/testing/selftests/powerpc/tm/tm.h
++++ b/tools/testing/selftests/powerpc/tm/tm.h
+@@ -55,7 +55,8 @@ static inline bool failure_is_unavailable(void)
+ static inline bool failure_is_reschedule(void)
+ {
+ if ((failure_code() & TM_CAUSE_RESCHED) == TM_CAUSE_RESCHED ||
+- (failure_code() & TM_CAUSE_KVM_RESCHED) == TM_CAUSE_KVM_RESCHED)
++ (failure_code() & TM_CAUSE_KVM_RESCHED) == TM_CAUSE_KVM_RESCHED ||
++ (failure_code() & TM_CAUSE_KVM_FAC_UNAV) == TM_CAUSE_KVM_FAC_UNAV)
+ return true;
+
+ return false;
+diff --git a/usr/Makefile b/usr/Makefile
+index 4a70ae43c9cb..bdb3f52fadc4 100644
+--- a/usr/Makefile
++++ b/usr/Makefile
+@@ -11,6 +11,9 @@ datafile_y = initramfs_data.cpio$(suffix_y)
+ datafile_d_y = .$(datafile_y).d
+ AFLAGS_initramfs_data.o += -DINITRAMFS_IMAGE="usr/$(datafile_y)"
+
++# clean rules do not have CONFIG_INITRAMFS_COMPRESSION. So clean up after all
++# possible compression formats.
++clean-files += initramfs_data.cpio*
+
+ # Generate builtin.o based on initramfs_data.o
+ obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-10-05 11:43 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-10-05 11:43 UTC (permalink / raw
To: gentoo-commits
commit: 29079edde606895b6285af1ff25148d87828600a
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Oct 5 11:42:57 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Oct 5 11:42:57 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=29079edd
Linux patch 5.2.19
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1018_linux-5.2.19.patch | 12350 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 12354 insertions(+)
diff --git a/0000_README b/0000_README
index dc5ec25..71b680e 100644
--- a/0000_README
+++ b/0000_README
@@ -115,6 +115,10 @@ Patch: 1017_linux-5.2.18.patch
From: https://www.kernel.org
Desc: Linux 5.2.18
+Patch: 1018_linux-5.2.19.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.19
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1018_linux-5.2.19.patch b/1018_linux-5.2.19.patch
new file mode 100644
index 0000000..66abaa4
--- /dev/null
+++ b/1018_linux-5.2.19.patch
@@ -0,0 +1,12350 @@
+diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
+index 7d7c191102a7..11298f0ce44d 100644
+--- a/Documentation/sound/hd-audio/models.rst
++++ b/Documentation/sound/hd-audio/models.rst
+@@ -260,6 +260,9 @@ alc295-hp-x360
+ HP Spectre X360 fixups
+ alc-sense-combo
+ Headset button support for Chrome platform
++huawei-mbx-stereo
++ Enable initialization verbs for Huawei MBX stereo speakers;
++ might be risky, try this at your own risk
+
+ ALC66x/67x/892
+ ==============
+diff --git a/Makefile b/Makefile
+index 440e473687eb..5c981a5c882f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 18
++SUBLEVEL = 19
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
+index ebfe28c2f544..a1fd3e63e86e 100644
+--- a/arch/arm/boot/dts/am3517-evm.dts
++++ b/arch/arm/boot/dts/am3517-evm.dts
+@@ -124,10 +124,11 @@
+ };
+
+ lcd0: display@0 {
+- compatible = "panel-dpi";
++ /* This isn't the exact LCD, but the timings meet spec */
++ /* To make it work, set CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4 */
++ compatible = "newhaven,nhd-4.3-480272ef-atxl";
+ label = "15";
+- status = "okay";
+- pinctrl-names = "default";
++ backlight = <&bl>;
+ enable-gpios = <&gpio6 16 GPIO_ACTIVE_HIGH>; /* gpio176, lcd INI */
+ vcc-supply = <&vdd_io_reg>;
+
+@@ -136,22 +137,6 @@
+ remote-endpoint = <&dpi_out>;
+ };
+ };
+-
+- panel-timing {
+- clock-frequency = <9000000>;
+- hactive = <480>;
+- vactive = <272>;
+- hfront-porch = <3>;
+- hback-porch = <2>;
+- hsync-len = <42>;
+- vback-porch = <3>;
+- vfront-porch = <4>;
+- vsync-len = <11>;
+- hsync-active = <0>;
+- vsync-active = <0>;
+- de-active = <1>;
+- pixelclk-active = <1>;
+- };
+ };
+
+ bl: backlight {
+diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
+index f78db6809cca..9eb48cabcca4 100644
+--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
++++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
+@@ -440,6 +440,7 @@
+ regulator-name = "vdd_ldo10";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
++ regulator-always-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
+index e0f470fe54c8..4398f2d1fe88 100644
+--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
++++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
+@@ -440,6 +440,7 @@
+ regulator-name = "vdd_ldo10";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
++ regulator-always-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
+index 895fbde4d433..c1ed83131b49 100644
+--- a/arch/arm/boot/dts/imx7-colibri.dtsi
++++ b/arch/arm/boot/dts/imx7-colibri.dtsi
+@@ -323,6 +323,7 @@
+ vmmc-supply = <®_module_3v3>;
+ vqmmc-supply = <®_DCDC3>;
+ non-removable;
++ sdhci-caps-mask = <0x80000000 0x0>;
+ };
+
+ &iomuxc {
+diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
+index e61567437d73..62d5e9a4a781 100644
+--- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
++++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
+@@ -44,7 +44,7 @@
+ <&clks IMX7D_ENET1_TIME_ROOT_CLK>;
+ assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
+ assigned-clock-rates = <0>, <100000000>;
+- phy-mode = "rgmii";
++ phy-mode = "rgmii-id";
+ phy-handle = <ðphy0>;
+ fsl,magic-packet;
+ status = "okay";
+@@ -70,7 +70,7 @@
+ <&clks IMX7D_ENET2_TIME_ROOT_CLK>;
+ assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
+ assigned-clock-rates = <0>, <100000000>;
+- phy-mode = "rgmii";
++ phy-mode = "rgmii-id";
+ phy-handle = <ðphy1>;
+ fsl,magic-packet;
+ status = "okay";
+diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
+index 642e809e757a..449cc7616da6 100644
+--- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
++++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
+@@ -108,7 +108,6 @@
+ &dss {
+ status = "ok";
+ vdds_dsi-supply = <&vpll2>;
+- vdda_video-supply = <&video_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dss_dpi_pins1>;
+ port {
+@@ -124,44 +123,20 @@
+ display0 = &lcd0;
+ };
+
+- video_reg: video_reg {
+- pinctrl-names = "default";
+- pinctrl-0 = <&panel_pwr_pins>;
+- compatible = "regulator-fixed";
+- regulator-name = "fixed-supply";
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
+- gpio = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */
+- };
+-
+ lcd0: display {
+- compatible = "panel-dpi";
++ /* This isn't the exact LCD, but the timings meet spec */
++ /* To make it work, set CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=4 */
++ compatible = "newhaven,nhd-4.3-480272ef-atxl";
+ label = "15";
+- status = "okay";
+- /* default-on; */
+ pinctrl-names = "default";
+-
++ pinctrl-0 = <&panel_pwr_pins>;
++ backlight = <&bl>;
++ enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>;
+ port {
+ lcd_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ };
+ };
+-
+- panel-timing {
+- clock-frequency = <9000000>;
+- hactive = <480>;
+- vactive = <272>;
+- hfront-porch = <3>;
+- hback-porch = <2>;
+- hsync-len = <42>;
+- vback-porch = <3>;
+- vfront-porch = <4>;
+- vsync-len = <11>;
+- hsync-active = <0>;
+- vsync-active = <0>;
+- de-active = <1>;
+- pixelclk-active = <1>;
+- };
+ };
+
+ bl: backlight {
+diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
+index c7bf9c493646..64eb896907bf 100644
+--- a/arch/arm/configs/omap2plus_defconfig
++++ b/arch/arm/configs/omap2plus_defconfig
+@@ -363,6 +363,7 @@ CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1=m
+ CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1=m
+ CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11=m
+ CONFIG_DRM_TILCDC=m
++CONFIG_DRM_PANEL_SIMPLE=m
+ CONFIG_FB=y
+ CONFIG_FIRMWARE_EDID=y
+ CONFIG_FB_MODE_HELPERS=y
+diff --git a/arch/arm/mach-at91/.gitignore b/arch/arm/mach-at91/.gitignore
+new file mode 100644
+index 000000000000..2ecd6f51c8a9
+--- /dev/null
++++ b/arch/arm/mach-at91/.gitignore
+@@ -0,0 +1 @@
++pm_data-offsets.h
+diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
+index 31b61f0e1c07..de64301dcff2 100644
+--- a/arch/arm/mach-at91/Makefile
++++ b/arch/arm/mach-at91/Makefile
+@@ -19,9 +19,10 @@ ifeq ($(CONFIG_PM_DEBUG),y)
+ CFLAGS_pm.o += -DDEBUG
+ endif
+
+-include/generated/at91_pm_data-offsets.h: arch/arm/mach-at91/pm_data-offsets.s FORCE
++$(obj)/pm_data-offsets.h: $(obj)/pm_data-offsets.s FORCE
+ $(call filechk,offsets,__PM_DATA_OFFSETS_H__)
+
+-arch/arm/mach-at91/pm_suspend.o: include/generated/at91_pm_data-offsets.h
++$(obj)/pm_suspend.o: $(obj)/pm_data-offsets.h
+
+ targets += pm_data-offsets.s
++clean-files += pm_data-offsets.h
+diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
+index c751f047b116..ed57c879d4e1 100644
+--- a/arch/arm/mach-at91/pm_suspend.S
++++ b/arch/arm/mach-at91/pm_suspend.S
+@@ -10,7 +10,7 @@
+ #include <linux/linkage.h>
+ #include <linux/clk/at91_pmc.h>
+ #include "pm.h"
+-#include "generated/at91_pm_data-offsets.h"
++#include "pm_data-offsets.h"
+
+ #define SRAMC_SELF_FRESH_ACTIVE 0x01
+ #define SRAMC_SELF_FRESH_EXIT 0x00
+diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
+index 1f0da76a39de..7b7280c21ee0 100644
+--- a/arch/arm/mach-ep93xx/edb93xx.c
++++ b/arch/arm/mach-ep93xx/edb93xx.c
+@@ -103,7 +103,7 @@ static struct spi_board_info edb93xx_spi_board_info[] __initdata = {
+ };
+
+ static struct gpiod_lookup_table edb93xx_spi_cs_gpio_table = {
+- .dev_id = "ep93xx-spi.0",
++ .dev_id = "spi0",
+ .table = {
+ GPIO_LOOKUP("A", 6, "cs", GPIO_ACTIVE_LOW),
+ { },
+diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
+index e2658e22bba1..8a53b74dc4b2 100644
+--- a/arch/arm/mach-ep93xx/simone.c
++++ b/arch/arm/mach-ep93xx/simone.c
+@@ -73,7 +73,7 @@ static struct spi_board_info simone_spi_devices[] __initdata = {
+ * v1.3 parts will still work, since the signal on SFRMOUT is automatic.
+ */
+ static struct gpiod_lookup_table simone_spi_cs_gpio_table = {
+- .dev_id = "ep93xx-spi.0",
++ .dev_id = "spi0",
+ .table = {
+ GPIO_LOOKUP("A", 1, "cs", GPIO_ACTIVE_LOW),
+ { },
+diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
+index 582e06e104fd..e0e1b11032f1 100644
+--- a/arch/arm/mach-ep93xx/ts72xx.c
++++ b/arch/arm/mach-ep93xx/ts72xx.c
+@@ -267,7 +267,7 @@ static struct spi_board_info bk3_spi_board_info[] __initdata = {
+ * goes through CPLD
+ */
+ static struct gpiod_lookup_table bk3_spi_cs_gpio_table = {
+- .dev_id = "ep93xx-spi.0",
++ .dev_id = "spi0",
+ .table = {
+ GPIO_LOOKUP("F", 3, "cs", GPIO_ACTIVE_LOW),
+ { },
+@@ -316,7 +316,7 @@ static struct spi_board_info ts72xx_spi_devices[] __initdata = {
+ };
+
+ static struct gpiod_lookup_table ts72xx_spi_cs_gpio_table = {
+- .dev_id = "ep93xx-spi.0",
++ .dev_id = "spi0",
+ .table = {
+ /* DIO_17 */
+ GPIO_LOOKUP("F", 2, "cs", GPIO_ACTIVE_LOW),
+diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
+index a88a1d807b32..cbcba3136d74 100644
+--- a/arch/arm/mach-ep93xx/vision_ep9307.c
++++ b/arch/arm/mach-ep93xx/vision_ep9307.c
+@@ -242,7 +242,7 @@ static struct spi_board_info vision_spi_board_info[] __initdata = {
+ };
+
+ static struct gpiod_lookup_table vision_spi_cs_gpio_table = {
+- .dev_id = "ep93xx-spi.0",
++ .dev_id = "spi0",
+ .table = {
+ GPIO_LOOKUP_IDX("A", 6, "cs", 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("A", 7, "cs", 1, GPIO_ACTIVE_LOW),
+diff --git a/arch/arm/mach-omap2/.gitignore b/arch/arm/mach-omap2/.gitignore
+new file mode 100644
+index 000000000000..79a8d6ea7152
+--- /dev/null
++++ b/arch/arm/mach-omap2/.gitignore
+@@ -0,0 +1 @@
++pm-asm-offsets.h
+diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
+index 85d1b13c9215..26baeb6477af 100644
+--- a/arch/arm/mach-omap2/Makefile
++++ b/arch/arm/mach-omap2/Makefile
+@@ -236,9 +236,10 @@ obj-y += omap_phy_internal.o
+
+ obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
+
+-include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE
++$(obj)/pm-asm-offsets.h: $(obj)/pm-asm-offsets.s FORCE
+ $(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__)
+
+-$(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h
++$(obj)/sleep33xx.o $(obj)/sleep43xx.o: $(obj)/pm-asm-offsets.h
+
+ targets += pm-asm-offsets.s
++clean-files += pm-asm-offsets.h
+diff --git a/arch/arm/mach-omap2/sleep33xx.S b/arch/arm/mach-omap2/sleep33xx.S
+index 47a816468cdb..a003769121aa 100644
+--- a/arch/arm/mach-omap2/sleep33xx.S
++++ b/arch/arm/mach-omap2/sleep33xx.S
+@@ -6,7 +6,6 @@
+ * Dave Gerlach, Vaibhav Bedia
+ */
+
+-#include <generated/ti-pm-asm-offsets.h>
+ #include <linux/linkage.h>
+ #include <linux/platform_data/pm33xx.h>
+ #include <linux/ti-emif-sram.h>
+@@ -15,6 +14,7 @@
+
+ #include "iomap.h"
+ #include "cm33xx.h"
++#include "pm-asm-offsets.h"
+
+ #define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED 0x00030000
+ #define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE 0x0003
+diff --git a/arch/arm/mach-omap2/sleep43xx.S b/arch/arm/mach-omap2/sleep43xx.S
+index 0c1031442571..27b13d47cf19 100644
+--- a/arch/arm/mach-omap2/sleep43xx.S
++++ b/arch/arm/mach-omap2/sleep43xx.S
+@@ -6,7 +6,6 @@
+ * Dave Gerlach, Vaibhav Bedia
+ */
+
+-#include <generated/ti-pm-asm-offsets.h>
+ #include <linux/linkage.h>
+ #include <linux/ti-emif-sram.h>
+ #include <linux/platform_data/pm33xx.h>
+@@ -19,6 +18,7 @@
+ #include "iomap.h"
+ #include "omap-secure.h"
+ #include "omap44xx.h"
++#include "pm-asm-offsets.h"
+ #include "prm33xx.h"
+ #include "prcm43xx.h"
+
+diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
+index a7cfe07156f4..e65ee8180c35 100644
+--- a/arch/arm/mach-zynq/platsmp.c
++++ b/arch/arm/mach-zynq/platsmp.c
+@@ -57,7 +57,7 @@ int zynq_cpun_start(u32 address, int cpu)
+ * 0x4: Jump by mov instruction
+ * 0x8: Jumping address
+ */
+- memcpy((__force void *)zero, &zynq_secondary_trampoline,
++ memcpy_toio(zero, &zynq_secondary_trampoline,
+ trampoline_size);
+ writel(address, zero + trampoline_size);
+
+diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
+index 61d834157bc0..382e1c2855e8 100644
+--- a/arch/arm/mm/copypage-xscale.c
++++ b/arch/arm/mm/copypage-xscale.c
+@@ -42,6 +42,7 @@ static void mc_copy_user_page(void *from, void *to)
+ * when prefetching destination as well. (NP)
+ */
+ asm volatile ("\
++.arch xscale \n\
+ pld [%0, #0] \n\
+ pld [%0, #32] \n\
+ pld [%1, #0] \n\
+@@ -106,8 +107,9 @@ void
+ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
+ {
+ void *ptr, *kaddr = kmap_atomic(page);
+- asm volatile(
+- "mov r1, %2 \n\
++ asm volatile("\
++.arch xscale \n\
++ mov r1, %2 \n\
+ mov r2, #0 \n\
+ mov r3, #0 \n\
+ 1: mov ip, %0 \n\
+diff --git a/arch/arm/plat-samsung/watchdog-reset.c b/arch/arm/plat-samsung/watchdog-reset.c
+index ce42cc640a61..71d85ff323f7 100644
+--- a/arch/arm/plat-samsung/watchdog-reset.c
++++ b/arch/arm/plat-samsung/watchdog-reset.c
+@@ -62,6 +62,7 @@ void samsung_wdt_reset(void)
+ #ifdef CONFIG_OF
+ static const struct of_device_id s3c2410_wdt_match[] = {
+ { .compatible = "samsung,s3c2410-wdt" },
++ { .compatible = "samsung,s3c6410-wdt" },
+ {},
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+index 994468671b19..f911bd36c6d0 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+@@ -800,6 +800,7 @@
+ <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
++ max-frequency = <150000000>;
+ status = "disabled";
+ };
+
+@@ -811,6 +812,7 @@
+ <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
++ max-frequency = <150000000>;
+ status = "disabled";
+ };
+
+@@ -822,6 +824,7 @@
+ <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
+ fifo-depth = <0x100>;
++ max-frequency = <150000000>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index e7d46631cc42..b1454d117cd2 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -51,14 +51,6 @@
+ #define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
+ MIDR_ARCHITECTURE_MASK)
+
+-#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \
+-({ \
+- u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \
+- u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \
+- \
+- _model == (model) && rv >= (rv_min) && rv <= (rv_max); \
+- })
+-
+ #define ARM_CPU_IMP_ARM 0x41
+ #define ARM_CPU_IMP_APM 0x50
+ #define ARM_CPU_IMP_CAVIUM 0x43
+@@ -159,10 +151,19 @@ struct midr_range {
+ #define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r)
+ #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
+
++static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
++ u32 rv_max)
++{
++ u32 _model = midr & MIDR_CPU_MODEL_MASK;
++ u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);
++
++ return _model == model && rv >= rv_min && rv <= rv_max;
++}
++
+ static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
+ {
+- return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
+- range->rv_min, range->rv_max);
++ return midr_is_cpu_model_range(midr, range->model,
++ range->rv_min, range->rv_max);
+ }
+
+ static inline bool
+diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
+index ed57b760f38c..a17393ff6677 100644
+--- a/arch/arm64/include/asm/exception.h
++++ b/arch/arm64/include/asm/exception.h
+@@ -30,4 +30,6 @@ static inline u32 disr_to_esr(u64 disr)
+ return esr;
+ }
+
++asmlinkage void enter_from_user_mode(void);
++
+ #endif /* __ASM_EXCEPTION_H */
+diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
+index 8af7a85f76bd..bc3949064725 100644
+--- a/arch/arm64/include/asm/tlbflush.h
++++ b/arch/arm64/include/asm/tlbflush.h
+@@ -251,6 +251,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
+ dsb(ishst);
+ __tlbi(vaae1is, addr);
+ dsb(ish);
++ isb();
+ }
+ #endif
+
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 68faf535f40a..d3fbb89a31e5 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -894,7 +894,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
+ u32 midr = read_cpuid_id();
+
+ /* Cavium ThunderX pass 1.x and 2.x */
+- return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
++ return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
+ }
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 320a30dbe35e..84a822748c84 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -30,9 +30,9 @@
+ * Context tracking subsystem. Used to instrument transitions
+ * between user and kernel mode.
+ */
+- .macro ct_user_exit
++ .macro ct_user_exit_irqoff
+ #ifdef CONFIG_CONTEXT_TRACKING
+- bl context_tracking_user_exit
++ bl enter_from_user_mode
+ #endif
+ .endm
+
+@@ -792,8 +792,8 @@ el0_cp15:
+ /*
+ * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
+ */
++ ct_user_exit_irqoff
+ enable_daif
+- ct_user_exit
+ mov x0, x25
+ mov x1, sp
+ bl do_cp15instr
+@@ -805,8 +805,8 @@ el0_da:
+ * Data abort handling
+ */
+ mrs x26, far_el1
++ ct_user_exit_irqoff
+ enable_daif
+- ct_user_exit
+ clear_address_tag x0, x26
+ mov x1, x25
+ mov x2, sp
+@@ -818,11 +818,11 @@ el0_ia:
+ */
+ mrs x26, far_el1
+ gic_prio_kentry_setup tmp=x0
++ ct_user_exit_irqoff
+ enable_da_f
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+ #endif
+- ct_user_exit
+ mov x0, x26
+ mov x1, x25
+ mov x2, sp
+@@ -832,8 +832,8 @@ el0_fpsimd_acc:
+ /*
+ * Floating Point or Advanced SIMD access
+ */
++ ct_user_exit_irqoff
+ enable_daif
+- ct_user_exit
+ mov x0, x25
+ mov x1, sp
+ bl do_fpsimd_acc
+@@ -842,8 +842,8 @@ el0_sve_acc:
+ /*
+ * Scalable Vector Extension access
+ */
++ ct_user_exit_irqoff
+ enable_daif
+- ct_user_exit
+ mov x0, x25
+ mov x1, sp
+ bl do_sve_acc
+@@ -852,8 +852,8 @@ el0_fpsimd_exc:
+ /*
+ * Floating Point, Advanced SIMD or SVE exception
+ */
++ ct_user_exit_irqoff
+ enable_daif
+- ct_user_exit
+ mov x0, x25
+ mov x1, sp
+ bl do_fpsimd_exc
+@@ -868,11 +868,11 @@ el0_sp_pc:
+ * Stack or PC alignment exception handling
+ */
+ gic_prio_kentry_setup tmp=x0
++ ct_user_exit_irqoff
+ enable_da_f
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+ #endif
+- ct_user_exit
+ mov x0, x26
+ mov x1, x25
+ mov x2, sp
+@@ -882,8 +882,8 @@ el0_undef:
+ /*
+ * Undefined instruction
+ */
++ ct_user_exit_irqoff
+ enable_daif
+- ct_user_exit
+ mov x0, sp
+ bl do_undefinstr
+ b ret_to_user
+@@ -891,8 +891,8 @@ el0_sys:
+ /*
+ * System instructions, for trapped cache maintenance instructions
+ */
++ ct_user_exit_irqoff
+ enable_daif
+- ct_user_exit
+ mov x0, x25
+ mov x1, sp
+ bl do_sysinstr
+@@ -902,17 +902,18 @@ el0_dbg:
+ * Debug exception handling
+ */
+ tbnz x24, #0, el0_inv // EL0 only
++ mrs x24, far_el1
+ gic_prio_kentry_setup tmp=x3
+- mrs x0, far_el1
++ ct_user_exit_irqoff
++ mov x0, x24
+ mov x1, x25
+ mov x2, sp
+ bl do_debug_exception
+ enable_da_f
+- ct_user_exit
+ b ret_to_user
+ el0_inv:
++ ct_user_exit_irqoff
+ enable_daif
+- ct_user_exit
+ mov x0, sp
+ mov x1, #BAD_SYNC
+ mov x2, x25
+@@ -925,13 +926,13 @@ el0_irq:
+ kernel_entry 0
+ el0_irq_naked:
+ gic_prio_irq_setup pmr=x20, tmp=x0
++ ct_user_exit_irqoff
+ enable_da_f
+
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+ #endif
+
+- ct_user_exit
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ tbz x22, #55, 1f
+ bl do_el0_irq_bp_hardening
+@@ -958,13 +959,14 @@ ENDPROC(el1_error)
+ el0_error:
+ kernel_entry 0
+ el0_error_naked:
+- mrs x1, esr_el1
++ mrs x25, esr_el1
+ gic_prio_kentry_setup tmp=x2
++ ct_user_exit_irqoff
+ enable_dbg
+ mov x0, sp
++ mov x1, x25
+ bl do_serror
+ enable_da_f
+- ct_user_exit
+ b ret_to_user
+ ENDPROC(el0_error)
+
+diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
+new file mode 100644
+index 000000000000..25a2a9b479c2
+--- /dev/null
++++ b/arch/arm64/kernel/image-vars.h
+@@ -0,0 +1,51 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Linker script variables to be set after section resolution, as
++ * ld.lld does not like variables assigned before SECTIONS is processed.
++ */
++#ifndef __ARM64_KERNEL_IMAGE_VARS_H
++#define __ARM64_KERNEL_IMAGE_VARS_H
++
++#ifndef LINKER_SCRIPT
++#error This file should only be included in vmlinux.lds.S
++#endif
++
++#ifdef CONFIG_EFI
++
++__efistub_stext_offset = stext - _text;
++
++/*
++ * The EFI stub has its own symbol namespace prefixed by __efistub_, to
++ * isolate it from the kernel proper. The following symbols are legally
++ * accessed by the stub, so provide some aliases to make them accessible.
++ * Only include data symbols here, or text symbols of functions that are
++ * guaranteed to be safe when executed at another offset than they were
++ * linked at. The routines below are all implemented in assembler in a
++ * position independent manner
++ */
++__efistub_memcmp = __pi_memcmp;
++__efistub_memchr = __pi_memchr;
++__efistub_memcpy = __pi_memcpy;
++__efistub_memmove = __pi_memmove;
++__efistub_memset = __pi_memset;
++__efistub_strlen = __pi_strlen;
++__efistub_strnlen = __pi_strnlen;
++__efistub_strcmp = __pi_strcmp;
++__efistub_strncmp = __pi_strncmp;
++__efistub_strrchr = __pi_strrchr;
++__efistub___flush_dcache_area = __pi___flush_dcache_area;
++
++#ifdef CONFIG_KASAN
++__efistub___memcpy = __pi_memcpy;
++__efistub___memmove = __pi_memmove;
++__efistub___memset = __pi_memset;
++#endif
++
++__efistub__text = _text;
++__efistub__end = _end;
++__efistub__edata = _edata;
++__efistub_screen_info = screen_info;
++
++#endif
++
++#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
+diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
+index 2b85c0d6fa3d..c7d38c660372 100644
+--- a/arch/arm64/kernel/image.h
++++ b/arch/arm64/kernel/image.h
+@@ -65,46 +65,4 @@
+ DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \
+ DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
+
+-#ifdef CONFIG_EFI
+-
+-/*
+- * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol:
+- * https://github.com/ClangBuiltLinux/linux/issues/561
+- */
+-__efistub_stext_offset = ABSOLUTE(stext - _text);
+-
+-/*
+- * The EFI stub has its own symbol namespace prefixed by __efistub_, to
+- * isolate it from the kernel proper. The following symbols are legally
+- * accessed by the stub, so provide some aliases to make them accessible.
+- * Only include data symbols here, or text symbols of functions that are
+- * guaranteed to be safe when executed at another offset than they were
+- * linked at. The routines below are all implemented in assembler in a
+- * position independent manner
+- */
+-__efistub_memcmp = __pi_memcmp;
+-__efistub_memchr = __pi_memchr;
+-__efistub_memcpy = __pi_memcpy;
+-__efistub_memmove = __pi_memmove;
+-__efistub_memset = __pi_memset;
+-__efistub_strlen = __pi_strlen;
+-__efistub_strnlen = __pi_strnlen;
+-__efistub_strcmp = __pi_strcmp;
+-__efistub_strncmp = __pi_strncmp;
+-__efistub_strrchr = __pi_strrchr;
+-__efistub___flush_dcache_area = __pi___flush_dcache_area;
+-
+-#ifdef CONFIG_KASAN
+-__efistub___memcpy = __pi_memcpy;
+-__efistub___memmove = __pi_memmove;
+-__efistub___memset = __pi_memset;
+-#endif
+-
+-__efistub__text = _text;
+-__efistub__end = _end;
+-__efistub__edata = _edata;
+-__efistub_screen_info = screen_info;
+-
+-#endif
+-
+ #endif /* __ARM64_KERNEL_IMAGE_H */
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 985721a1264c..b6706a886037 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -7,9 +7,11 @@
+ */
+
+ #include <linux/bug.h>
++#include <linux/context_tracking.h>
+ #include <linux/signal.h>
+ #include <linux/personality.h>
+ #include <linux/kallsyms.h>
++#include <linux/kprobes.h>
+ #include <linux/spinlock.h>
+ #include <linux/uaccess.h>
+ #include <linux/hardirq.h>
+@@ -905,6 +907,13 @@ asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
+ nmi_exit();
+ }
+
++asmlinkage void enter_from_user_mode(void)
++{
++ CT_WARN_ON(ct_state() != CONTEXT_USER);
++ user_exit_irqoff();
++}
++NOKPROBE_SYMBOL(enter_from_user_mode);
++
+ void __pte_error(const char *file, int line, unsigned long val)
+ {
+ pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
+diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
+index 7fa008374907..803b24d2464a 100644
+--- a/arch/arm64/kernel/vmlinux.lds.S
++++ b/arch/arm64/kernel/vmlinux.lds.S
+@@ -245,6 +245,8 @@ SECTIONS
+ HEAD_SYMBOLS
+ }
+
++#include "image-vars.h"
++
+ /*
+ * The HYP init code and ID map text can't be longer than a page each,
+ * and should not cross a page boundary.
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index f3c795278def..b1ee6cb4b17f 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -570,8 +570,12 @@ void free_initmem(void)
+ #ifdef CONFIG_BLK_DEV_INITRD
+ void __init free_initrd_mem(unsigned long start, unsigned long end)
+ {
++ unsigned long aligned_start, aligned_end;
++
++ aligned_start = __virt_to_phys(start) & PAGE_MASK;
++ aligned_end = PAGE_ALIGN(__virt_to_phys(end));
++ memblock_free(aligned_start, aligned_end - aligned_start);
+ free_reserved_area((void *)start, (void *)end, 0, "initrd");
+- memblock_free(__virt_to_phys(start), end - start);
+ }
+ #endif
+
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index 7dbf2be470f6..28a8f7b87ff0 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -286,6 +286,15 @@ skip_pgd:
+ msr sctlr_el1, x18
+ isb
+
++ /*
++ * Invalidate the local I-cache so that any instructions fetched
++ * speculatively from the PoC are discarded, since they may have
++ * been dynamically patched at the PoU.
++ */
++ ic iallu
++ dsb nsh
++ isb
++
+ /* Set the flag to zero to indicate that we're all done */
+ str wzr, [flag_ptr]
+ ret
+diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
+index 326448f9df16..1a42ba885188 100644
+--- a/arch/ia64/kernel/module.c
++++ b/arch/ia64/kernel/module.c
+@@ -914,10 +914,14 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
+ void
+ module_arch_cleanup (struct module *mod)
+ {
+- if (mod->arch.init_unw_table)
++ if (mod->arch.init_unw_table) {
+ unw_remove_unwind_table(mod->arch.init_unw_table);
+- if (mod->arch.core_unw_table)
++ mod->arch.init_unw_table = NULL;
++ }
++ if (mod->arch.core_unw_table) {
+ unw_remove_unwind_table(mod->arch.core_unw_table);
++ mod->arch.core_unw_table = NULL;
++ }
+ }
+
+ void *dereference_module_function_descriptor(struct module *mod, void *ptr)
+diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
+index 533008262b69..5e5601c382b8 100644
+--- a/arch/m68k/include/asm/atarihw.h
++++ b/arch/m68k/include/asm/atarihw.h
+@@ -22,7 +22,6 @@
+
+ #include <linux/types.h>
+ #include <asm/bootinfo-atari.h>
+-#include <asm/raw_io.h>
+ #include <asm/kmap.h>
+
+ extern u_long atari_mch_cookie;
+@@ -132,14 +131,6 @@ extern struct atari_hw_present atari_hw_present;
+ */
+
+
+-#define atari_readb raw_inb
+-#define atari_writeb raw_outb
+-
+-#define atari_inb_p raw_inb
+-#define atari_outb_p raw_outb
+-
+-
+-
+ #include <linux/mm.h>
+ #include <asm/cacheflush.h>
+
+diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
+index 6c03ca5bc436..819f611dccf2 100644
+--- a/arch/m68k/include/asm/io_mm.h
++++ b/arch/m68k/include/asm/io_mm.h
+@@ -29,7 +29,11 @@
+ #include <asm-generic/iomap.h>
+
+ #ifdef CONFIG_ATARI
+-#include <asm/atarihw.h>
++#define atari_readb raw_inb
++#define atari_writeb raw_outb
++
++#define atari_inb_p raw_inb
++#define atari_outb_p raw_outb
+ #endif
+
+
+diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
+index d9a08bed4b12..f653b60f2afc 100644
+--- a/arch/m68k/include/asm/macintosh.h
++++ b/arch/m68k/include/asm/macintosh.h
+@@ -4,6 +4,7 @@
+
+ #include <linux/seq_file.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+
+ #include <asm/bootinfo-mac.h>
+
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index c345b79414a9..403f7e193833 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -39,13 +39,11 @@ endif
+ uname := $(shell uname -m)
+ KBUILD_DEFCONFIG := $(if $(filter ppc%,$(uname)),$(uname),ppc64)_defconfig
+
+-ifdef CONFIG_PPC64
+ new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
+
+ ifeq ($(new_nm),y)
+ NM := $(NM) --synthetic
+ endif
+-endif
+
+ # BITS is used as extension for files which are available in a 32 bit
+ # and a 64 bit version to simplify shared Makefiles.
+diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
+index 186109bdd41b..e04b20625cb9 100644
+--- a/arch/powerpc/platforms/powernv/opal-imc.c
++++ b/arch/powerpc/platforms/powernv/opal-imc.c
+@@ -53,9 +53,9 @@ static void export_imc_mode_and_cmd(struct device_node *node,
+ struct imc_pmu *pmu_ptr)
+ {
+ static u64 loc, *imc_mode_addr, *imc_cmd_addr;
+- int chip = 0, nid;
+ char mode[16], cmd[16];
+ u32 cb_offset;
++ struct imc_mem_info *ptr = pmu_ptr->mem_info;
+
+ imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
+
+@@ -69,20 +69,20 @@ static void export_imc_mode_and_cmd(struct device_node *node,
+ if (of_property_read_u32(node, "cb_offset", &cb_offset))
+ cb_offset = IMC_CNTL_BLK_OFFSET;
+
+- for_each_node(nid) {
+- loc = (u64)(pmu_ptr->mem_info[chip].vbase) + cb_offset;
++ while (ptr->vbase != NULL) {
++ loc = (u64)(ptr->vbase) + cb_offset;
+ imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
+- sprintf(mode, "imc_mode_%d", nid);
++ sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
+ if (!imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
+ imc_mode_addr))
+ goto err;
+
+ imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
+- sprintf(cmd, "imc_cmd_%d", nid);
++ sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
+ if (!imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
+ imc_cmd_addr))
+ goto err;
+- chip++;
++ ptr++;
+ }
+ return;
+
+diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
+index d00f84add5f4..6d2dbb5089d5 100644
+--- a/arch/s390/crypto/aes_s390.c
++++ b/arch/s390/crypto/aes_s390.c
+@@ -586,6 +586,9 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc,
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
++ if (!nbytes)
++ return -EINVAL;
++
+ if (unlikely(!xts_ctx->fc))
+ return xts_fallback_encrypt(desc, dst, src, nbytes);
+
+@@ -600,6 +603,9 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc,
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct blkcipher_walk walk;
+
++ if (!nbytes)
++ return -EINVAL;
++
+ if (unlikely(!xts_ctx->fc))
+ return xts_fallback_decrypt(desc, dst, src, nbytes);
+
+diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
+index 70d87db54e62..4c0690fc5167 100644
+--- a/arch/s390/include/asm/string.h
++++ b/arch/s390/include/asm/string.h
+@@ -71,11 +71,16 @@ extern void *__memmove(void *dest, const void *src, size_t n);
+ #define memcpy(dst, src, len) __memcpy(dst, src, len)
+ #define memmove(dst, src, len) __memmove(dst, src, len)
+ #define memset(s, c, n) __memset(s, c, n)
++#define strlen(s) __strlen(s)
++
++#define __no_sanitize_prefix_strfunc(x) __##x
+
+ #ifndef __NO_FORTIFY
+ #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+ #endif
+
++#else
++#define __no_sanitize_prefix_strfunc(x) x
+ #endif /* defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) */
+
+ void *__memset16(uint16_t *s, uint16_t v, size_t count);
+@@ -163,8 +168,8 @@ static inline char *strcpy(char *dst, const char *src)
+ }
+ #endif
+
+-#ifdef __HAVE_ARCH_STRLEN
+-static inline size_t strlen(const char *s)
++#if defined(__HAVE_ARCH_STRLEN) || (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__))
++static inline size_t __no_sanitize_prefix_strfunc(strlen)(const char *s)
+ {
+ register unsigned long r0 asm("0") = 0;
+ const char *tmp = s;
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+index f60ddd655c78..82a57d344b9b 100644
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -58,6 +58,9 @@
+ #define INTEL_FAM6_ICELAKE_MOBILE 0x7E
+ #define INTEL_FAM6_ICELAKE_NNPI 0x9D
+
++#define INTEL_FAM6_TIGERLAKE_L 0x8C
++#define INTEL_FAM6_TIGERLAKE 0x8D
++
+ /* "Small Core" Processors (Atom) */
+
+ #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 65d49452e6e0..6d02a0c3fe92 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1576,6 +1576,13 @@ bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
+ void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
+ struct kvm_lapic_irq *irq);
+
++static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
++{
++ /* We can only post Fixed and LowPrio IRQs */
++ return (irq->delivery_mode == dest_Fixed ||
++ irq->delivery_mode == dest_LowestPrio);
++}
++
+ static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
+ {
+ if (kvm_x86_ops->vcpu_blocking)
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 002aedc69393..8c26b696d893 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -21,6 +21,7 @@
+ #define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
+ #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
+ #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
++#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
+
+ /* Protect the PCI config register pairs used for SMN and DF indirect access. */
+ static DEFINE_MUTEX(smn_mutex);
+@@ -50,6 +51,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
+ {}
+ };
+ EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
+@@ -63,6 +65,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
++ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
+ {}
+ };
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 2f067b443326..a18d6dd934e5 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1462,54 +1462,72 @@ static void lapic_setup_esr(void)
+ oldvalue, value);
+ }
+
+-static void apic_pending_intr_clear(void)
++#define APIC_IR_REGS APIC_ISR_NR
++#define APIC_IR_BITS (APIC_IR_REGS * 32)
++#define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG)
++
++union apic_ir {
++ unsigned long map[APIC_IR_MAPSIZE];
++ u32 regs[APIC_IR_REGS];
++};
++
++static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr)
+ {
+- long long max_loops = cpu_khz ? cpu_khz : 1000000;
+- unsigned long long tsc = 0, ntsc;
+- unsigned int queued;
+- unsigned long value;
+- int i, j, acked = 0;
++ int i, bit;
++
++ /* Read the IRRs */
++ for (i = 0; i < APIC_IR_REGS; i++)
++ irr->regs[i] = apic_read(APIC_IRR + i * 0x10);
++
++ /* Read the ISRs */
++ for (i = 0; i < APIC_IR_REGS; i++)
++ isr->regs[i] = apic_read(APIC_ISR + i * 0x10);
+
+- if (boot_cpu_has(X86_FEATURE_TSC))
+- tsc = rdtsc();
+ /*
+- * After a crash, we no longer service the interrupts and a pending
+- * interrupt from previous kernel might still have ISR bit set.
+- *
+- * Most probably by now CPU has serviced that pending interrupt and
+- * it might not have done the ack_APIC_irq() because it thought,
+- * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
+- * does not clear the ISR bit and cpu thinks it has already serivced
+- * the interrupt. Hence a vector might get locked. It was noticed
+- * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
++ * If the ISR map is not empty. ACK the APIC and run another round
++ * to verify whether a pending IRR has been unblocked and turned
++ * into a ISR.
+ */
+- do {
+- queued = 0;
+- for (i = APIC_ISR_NR - 1; i >= 0; i--)
+- queued |= apic_read(APIC_IRR + i*0x10);
+-
+- for (i = APIC_ISR_NR - 1; i >= 0; i--) {
+- value = apic_read(APIC_ISR + i*0x10);
+- for_each_set_bit(j, &value, 32) {
+- ack_APIC_irq();
+- acked++;
+- }
+- }
+- if (acked > 256) {
+- pr_err("LAPIC pending interrupts after %d EOI\n", acked);
+- break;
+- }
+- if (queued) {
+- if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
+- ntsc = rdtsc();
+- max_loops = (long long)cpu_khz << 10;
+- max_loops -= ntsc - tsc;
+- } else {
+- max_loops--;
+- }
+- }
+- } while (queued && max_loops > 0);
+- WARN_ON(max_loops <= 0);
++ if (!bitmap_empty(isr->map, APIC_IR_BITS)) {
++ /*
++ * There can be multiple ISR bits set when a high priority
++ * interrupt preempted a lower priority one. Issue an ACK
++ * per set bit.
++ */
++ for_each_set_bit(bit, isr->map, APIC_IR_BITS)
++ ack_APIC_irq();
++ return true;
++ }
++
++ return !bitmap_empty(irr->map, APIC_IR_BITS);
++}
++
++/*
++ * After a crash, we no longer service the interrupts and a pending
++ * interrupt from previous kernel might still have ISR bit set.
++ *
++ * Most probably by now the CPU has serviced that pending interrupt and it
++ * might not have done the ack_APIC_irq() because it thought, interrupt
++ * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear
++ * the ISR bit and cpu thinks it has already serivced the interrupt. Hence
++ * a vector might get locked. It was noticed for timer irq (vector
++ * 0x31). Issue an extra EOI to clear ISR.
++ *
++ * If there are pending IRR bits they turn into ISR bits after a higher
++ * priority ISR bit has been acked.
++ */
++static void apic_pending_intr_clear(void)
++{
++ union apic_ir irr, isr;
++ unsigned int i;
++
++ /* 512 loops are way oversized and give the APIC a chance to obey. */
++ for (i = 0; i < 512; i++) {
++ if (!apic_check_and_ack(&irr, &isr))
++ return;
++ }
++ /* Dump the IRR/ISR content if that failed */
++ pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map);
+ }
+
+ /**
+@@ -1532,6 +1550,14 @@ static void setup_local_APIC(void)
+ return;
+ }
+
++ /*
++ * If this comes from kexec/kcrash the APIC might be enabled in
++ * SPIV. Soft disable it before doing further initialization.
++ */
++ value = apic_read(APIC_SPIV);
++ value &= ~APIC_SPIV_APIC_ENABLED;
++ apic_write(APIC_SPIV, value);
++
+ #ifdef CONFIG_X86_32
+ /* Pound the ESR really hard over the head with a big hammer - mbligh */
+ if (lapic_is_integrated() && apic->disable_esr) {
+@@ -1577,6 +1603,7 @@ static void setup_local_APIC(void)
+ value &= ~APIC_TPRI_MASK;
+ apic_write(APIC_TASKPRI, value);
+
++ /* Clear eventually stale ISR/IRR bits */
+ apic_pending_intr_clear();
+
+ /*
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index fdacb864c3dd..2c5676b0a6e7 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -398,6 +398,17 @@ static int activate_reserved(struct irq_data *irqd)
+ if (!irqd_can_reserve(irqd))
+ apicd->can_reserve = false;
+ }
++
++ /*
++ * Check to ensure that the effective affinity mask is a subset
++ * the user supplied affinity mask, and warn the user if it is not
++ */
++ if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
++ irq_data_get_affinity_mask(irqd))) {
++ pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
++ irqd->irq);
++ }
++
+ return ret;
+ }
+
+diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
+index 4693e2f3a03e..f2a749586252 100644
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -179,6 +179,12 @@ asmlinkage __visible void smp_reboot_interrupt(void)
+ irq_exit();
+ }
+
++static int register_stop_handler(void)
++{
++ return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
++ NMI_FLAG_FIRST, "smp_stop");
++}
++
+ static void native_stop_other_cpus(int wait)
+ {
+ unsigned long flags;
+@@ -212,39 +218,41 @@ static void native_stop_other_cpus(int wait)
+ apic->send_IPI_allbutself(REBOOT_VECTOR);
+
+ /*
+- * Don't wait longer than a second if the caller
+- * didn't ask us to wait.
++ * Don't wait longer than a second for IPI completion. The
++ * wait request is not checked here because that would
++ * prevent an NMI shutdown attempt in case that not all
++ * CPUs reach shutdown state.
+ */
+ timeout = USEC_PER_SEC;
+- while (num_online_cpus() > 1 && (wait || timeout--))
++ while (num_online_cpus() > 1 && timeout--)
+ udelay(1);
+ }
+-
+- /* if the REBOOT_VECTOR didn't work, try with the NMI */
+- if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) {
+- if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
+- NMI_FLAG_FIRST, "smp_stop"))
+- /* Note: we ignore failures here */
+- /* Hope the REBOOT_IRQ is good enough */
+- goto finish;
+-
+- /* sync above data before sending IRQ */
+- wmb();
+
+- pr_emerg("Shutting down cpus with NMI\n");
++ /* if the REBOOT_VECTOR didn't work, try with the NMI */
++ if (num_online_cpus() > 1) {
++ /*
++ * If NMI IPI is enabled, try to register the stop handler
++ * and send the IPI. In any case try to wait for the other
++ * CPUs to stop.
++ */
++ if (!smp_no_nmi_ipi && !register_stop_handler()) {
++ /* Sync above data before sending IRQ */
++ wmb();
+
+- apic->send_IPI_allbutself(NMI_VECTOR);
++ pr_emerg("Shutting down cpus with NMI\n");
+
++ apic->send_IPI_allbutself(NMI_VECTOR);
++ }
+ /*
+- * Don't wait longer than a 10 ms if the caller
+- * didn't ask us to wait.
++ * Don't wait longer than 10 ms if the caller didn't
++ * reqeust it. If wait is true, the machine hangs here if
++ * one or more CPUs do not reach shutdown state.
+ */
+ timeout = USEC_PER_MSEC * 10;
+ while (num_online_cpus() > 1 && (wait || timeout--))
+ udelay(1);
+ }
+
+-finish:
+ local_irq_save(flags);
+ disable_local_APIC();
+ mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 4a387a235424..2fe9912aeed7 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -5377,6 +5377,8 @@ done_prefixes:
+ ctxt->memopp->addr.mem.ea + ctxt->_eip);
+
+ done:
++ if (rc == X86EMUL_PROPAGATE_FAULT)
++ ctxt->have_exception = true;
+ return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
+ }
+
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 66055ca29b6b..3070b1dd923a 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -389,8 +389,6 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
+ mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
+ << shadow_nonpresent_or_rsvd_mask_len;
+
+- page_header(__pa(sptep))->mmio_cached = true;
+-
+ trace_mark_mmio_spte(sptep, gfn, access, gen);
+ mmu_spte_set(sptep, mask);
+ }
+@@ -5607,13 +5605,13 @@ slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
+ }
+
+-static void free_mmu_pages(struct kvm_vcpu *vcpu)
++static void free_mmu_pages(struct kvm_mmu *mmu)
+ {
+- free_page((unsigned long)vcpu->arch.mmu->pae_root);
+- free_page((unsigned long)vcpu->arch.mmu->lm_root);
++ free_page((unsigned long)mmu->pae_root);
++ free_page((unsigned long)mmu->lm_root);
+ }
+
+-static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
++static int alloc_mmu_pages(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+ {
+ struct page *page;
+ int i;
+@@ -5634,9 +5632,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
+ if (!page)
+ return -ENOMEM;
+
+- vcpu->arch.mmu->pae_root = page_address(page);
++ mmu->pae_root = page_address(page);
+ for (i = 0; i < 4; ++i)
+- vcpu->arch.mmu->pae_root[i] = INVALID_PAGE;
++ mmu->pae_root[i] = INVALID_PAGE;
+
+ return 0;
+ }
+@@ -5644,6 +5642,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
+ int kvm_mmu_create(struct kvm_vcpu *vcpu)
+ {
+ uint i;
++ int ret;
+
+ vcpu->arch.mmu = &vcpu->arch.root_mmu;
+ vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
+@@ -5661,7 +5660,19 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
+ vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
+
+ vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
+- return alloc_mmu_pages(vcpu);
++
++ ret = alloc_mmu_pages(vcpu, &vcpu->arch.guest_mmu);
++ if (ret)
++ return ret;
++
++ ret = alloc_mmu_pages(vcpu, &vcpu->arch.root_mmu);
++ if (ret)
++ goto fail_allocate_root;
++
++ return ret;
++ fail_allocate_root:
++ free_mmu_pages(&vcpu->arch.guest_mmu);
++ return ret;
+ }
+
+
+@@ -5939,7 +5950,7 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
+ }
+ EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
+
+-static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
++void kvm_mmu_zap_all(struct kvm *kvm)
+ {
+ struct kvm_mmu_page *sp, *node;
+ LIST_HEAD(invalid_list);
+@@ -5948,14 +5959,10 @@ static void __kvm_mmu_zap_all(struct kvm *kvm, bool mmio_only)
+ spin_lock(&kvm->mmu_lock);
+ restart:
+ list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
+- if (mmio_only && !sp->mmio_cached)
+- continue;
+ if (sp->role.invalid && sp->root_count)
+ continue;
+- if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) {
+- WARN_ON_ONCE(mmio_only);
++ if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
+ goto restart;
+- }
+ if (cond_resched_lock(&kvm->mmu_lock))
+ goto restart;
+ }
+@@ -5964,11 +5971,6 @@ restart:
+ spin_unlock(&kvm->mmu_lock);
+ }
+
+-void kvm_mmu_zap_all(struct kvm *kvm)
+-{
+- return __kvm_mmu_zap_all(kvm, false);
+-}
+-
+ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
+ {
+ WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
+@@ -5990,7 +5992,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
+ */
+ if (unlikely(gen == 0)) {
+ kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
+- __kvm_mmu_zap_all(kvm, true);
++ kvm_mmu_zap_all_fast(kvm);
+ }
+ }
+
+@@ -6134,7 +6136,8 @@ unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
+ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
+ {
+ kvm_mmu_unload(vcpu);
+- free_mmu_pages(vcpu);
++ free_mmu_pages(&vcpu->arch.root_mmu);
++ free_mmu_pages(&vcpu->arch.guest_mmu);
+ mmu_free_memory_caches(vcpu);
+ }
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 4ca86e70d3b4..ee7db075dd4d 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -5252,7 +5252,8 @@ get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
+
+ kvm_set_msi_irq(kvm, e, &irq);
+
+- if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
++ if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
++ !kvm_irq_is_postable(&irq)) {
+ pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
+ __func__, irq.vector);
+ return -1;
+@@ -5306,6 +5307,7 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ * 1. When cannot target interrupt to a specific vcpu.
+ * 2. Unsetting posted interrupt.
+ * 3. APIC virtialization is disabled for the vcpu.
++ * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
+ */
+ if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
+ kvm_vcpu_apicv_active(&svm->vcpu)) {
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index d5c12d5a5905..e5fdd5f14c8e 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7325,10 +7325,14 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ * irqbalance to make the interrupts single-CPU.
+ *
+ * We will support full lowest-priority interrupt later.
++ *
++ * In addition, we can only inject generic interrupts using
++ * the PI mechanism, refuse to route others through it.
+ */
+
+ kvm_set_msi_irq(kvm, e, &irq);
+- if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
++ if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
++ !kvm_irq_is_postable(&irq)) {
+ /*
+ * Make sure the IRTE is in remapped mode if
+ * we don't handle it in posted mode.
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 4000bcff47b0..2c118c9da16c 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -669,8 +669,14 @@ static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+ data, offset, len, access);
+ }
+
++static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
++{
++ return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) |
++ rsvd_bits(1, 2);
++}
++
+ /*
+- * Load the pae pdptrs. Return true is they are all valid.
++ * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise.
+ */
+ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
+ {
+@@ -689,8 +695,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
+ }
+ for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
+ if ((pdpte[i] & PT_PRESENT_MASK) &&
+- (pdpte[i] &
+- vcpu->arch.mmu->guest_rsvd_check.rsvd_bits_mask[0][2])) {
++ (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
+ ret = 0;
+ goto out;
+ }
+@@ -6481,8 +6486,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
+ if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
+ emulation_type))
+ return EMULATE_DONE;
+- if (ctxt->have_exception && inject_emulated_exception(vcpu))
++ if (ctxt->have_exception) {
++ /*
++ * #UD should result in just EMULATION_FAILED, and trap-like
++ * exception should not be encountered during decode.
++ */
++ WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
++ exception_type(ctxt->exception.vector) == EXCPT_TRAP);
++ inject_emulated_exception(vcpu);
+ return EMULATE_DONE;
++ }
+ if (emulation_type & EMULTYPE_SKIP)
+ return EMULATE_FAIL;
+ return handle_emulation_failure(vcpu, emulation_type);
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index e6dad600614c..4123100e0eaf 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -861,9 +861,9 @@ void numa_remove_cpu(int cpu)
+ */
+ const struct cpumask *cpumask_of_node(int node)
+ {
+- if (node >= nr_node_ids) {
++ if ((unsigned)node >= nr_node_ids) {
+ printk(KERN_WARNING
+- "cpumask_of_node(%d): node > nr_node_ids(%u)\n",
++ "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
+ node, nr_node_ids);
+ dump_stack();
+ return cpu_none_mask;
+diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
+index b196524759ec..7f2140414440 100644
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -330,13 +330,15 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
+
+ pud = pud_offset(p4d, addr);
+ if (pud_none(*pud)) {
+- addr += PUD_SIZE;
++ WARN_ON_ONCE(addr & ~PUD_MASK);
++ addr = round_up(addr + 1, PUD_SIZE);
+ continue;
+ }
+
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd)) {
+- addr += PMD_SIZE;
++ WARN_ON_ONCE(addr & ~PMD_MASK);
++ addr = round_up(addr + 1, PMD_SIZE);
+ continue;
+ }
+
+@@ -666,6 +668,8 @@ void __init pti_init(void)
+ */
+ void pti_finalize(void)
+ {
++ if (!boot_cpu_has(X86_FEATURE_PTI))
++ return;
+ /*
+ * We need to clone everything (again) that maps parts of the
+ * kernel image.
+diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
+index b393eaa798ef..0099826c88a8 100644
+--- a/arch/x86/platform/intel/iosf_mbi.c
++++ b/arch/x86/platform/intel/iosf_mbi.c
+@@ -17,6 +17,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/capability.h>
+ #include <linux/pm_qos.h>
++#include <linux/wait.h>
+
+ #include <asm/iosf_mbi.h>
+
+@@ -201,23 +202,45 @@ EXPORT_SYMBOL(iosf_mbi_available);
+ #define PUNIT_SEMAPHORE_BIT BIT(0)
+ #define PUNIT_SEMAPHORE_ACQUIRE BIT(1)
+
+-static DEFINE_MUTEX(iosf_mbi_punit_mutex);
+-static DEFINE_MUTEX(iosf_mbi_block_punit_i2c_access_count_mutex);
++static DEFINE_MUTEX(iosf_mbi_pmic_access_mutex);
+ static BLOCKING_NOTIFIER_HEAD(iosf_mbi_pmic_bus_access_notifier);
+-static u32 iosf_mbi_block_punit_i2c_access_count;
++static DECLARE_WAIT_QUEUE_HEAD(iosf_mbi_pmic_access_waitq);
++static u32 iosf_mbi_pmic_punit_access_count;
++static u32 iosf_mbi_pmic_i2c_access_count;
+ static u32 iosf_mbi_sem_address;
+ static unsigned long iosf_mbi_sem_acquired;
+ static struct pm_qos_request iosf_mbi_pm_qos;
+
+ void iosf_mbi_punit_acquire(void)
+ {
+- mutex_lock(&iosf_mbi_punit_mutex);
++ /* Wait for any I2C PMIC accesses from in kernel drivers to finish. */
++ mutex_lock(&iosf_mbi_pmic_access_mutex);
++ while (iosf_mbi_pmic_i2c_access_count != 0) {
++ mutex_unlock(&iosf_mbi_pmic_access_mutex);
++ wait_event(iosf_mbi_pmic_access_waitq,
++ iosf_mbi_pmic_i2c_access_count == 0);
++ mutex_lock(&iosf_mbi_pmic_access_mutex);
++ }
++ /*
++ * We do not need to do anything to allow the PUNIT to safely access
++ * the PMIC, other then block in kernel accesses to the PMIC.
++ */
++ iosf_mbi_pmic_punit_access_count++;
++ mutex_unlock(&iosf_mbi_pmic_access_mutex);
+ }
+ EXPORT_SYMBOL(iosf_mbi_punit_acquire);
+
+ void iosf_mbi_punit_release(void)
+ {
+- mutex_unlock(&iosf_mbi_punit_mutex);
++ bool do_wakeup;
++
++ mutex_lock(&iosf_mbi_pmic_access_mutex);
++ iosf_mbi_pmic_punit_access_count--;
++ do_wakeup = iosf_mbi_pmic_punit_access_count == 0;
++ mutex_unlock(&iosf_mbi_pmic_access_mutex);
++
++ if (do_wakeup)
++ wake_up(&iosf_mbi_pmic_access_waitq);
+ }
+ EXPORT_SYMBOL(iosf_mbi_punit_release);
+
+@@ -256,34 +279,32 @@ static void iosf_mbi_reset_semaphore(void)
+ * already blocked P-Unit accesses because it wants them blocked over multiple
+ * i2c-transfers, for e.g. read-modify-write of an I2C client register.
+ *
+- * The P-Unit accesses already being blocked is tracked through the
+- * iosf_mbi_block_punit_i2c_access_count variable which is protected by the
+- * iosf_mbi_block_punit_i2c_access_count_mutex this mutex is hold for the
+- * entire duration of the function.
+- *
+- * If access is not blocked yet, this function takes the following steps:
++ * To allow safe PMIC i2c bus accesses this function takes the following steps:
+ *
+ * 1) Some code sends request to the P-Unit which make it access the PMIC
+ * I2C bus. Testing has shown that the P-Unit does not check its internal
+ * PMIC bus semaphore for these requests. Callers of these requests call
+ * iosf_mbi_punit_acquire()/_release() around their P-Unit accesses, these
+- * functions lock/unlock the iosf_mbi_punit_mutex.
+- * As the first step we lock the iosf_mbi_punit_mutex, to wait for any in
+- * flight requests to finish and to block any new requests.
++ * functions increase/decrease iosf_mbi_pmic_punit_access_count, so first
++ * we wait for iosf_mbi_pmic_punit_access_count to become 0.
++ *
++ * 2) Check iosf_mbi_pmic_i2c_access_count, if access has already
++ * been blocked by another caller, we only need to increment
++ * iosf_mbi_pmic_i2c_access_count and we can skip the other steps.
+ *
+- * 2) Some code makes such P-Unit requests from atomic contexts where it
++ * 3) Some code makes such P-Unit requests from atomic contexts where it
+ * cannot call iosf_mbi_punit_acquire() as that may sleep.
+ * As the second step we call a notifier chain which allows any code
+ * needing P-Unit resources from atomic context to acquire them before
+ * we take control over the PMIC I2C bus.
+ *
+- * 3) When CPU cores enter C6 or C7 the P-Unit needs to talk to the PMIC
++ * 4) When CPU cores enter C6 or C7 the P-Unit needs to talk to the PMIC
+ * if this happens while the kernel itself is accessing the PMIC I2C bus
+ * the SoC hangs.
+ * As the third step we call pm_qos_update_request() to disallow the CPU
+ * to enter C6 or C7.
+ *
+- * 4) The P-Unit has a PMIC bus semaphore which we can request to stop
++ * 5) The P-Unit has a PMIC bus semaphore which we can request to stop
+ * autonomous P-Unit tasks from accessing the PMIC I2C bus while we hold it.
+ * As the fourth and final step we request this semaphore and wait for our
+ * request to be acknowledged.
+@@ -297,12 +318,18 @@ int iosf_mbi_block_punit_i2c_access(void)
+ if (WARN_ON(!mbi_pdev || !iosf_mbi_sem_address))
+ return -ENXIO;
+
+- mutex_lock(&iosf_mbi_block_punit_i2c_access_count_mutex);
++ mutex_lock(&iosf_mbi_pmic_access_mutex);
+
+- if (iosf_mbi_block_punit_i2c_access_count > 0)
++ while (iosf_mbi_pmic_punit_access_count != 0) {
++ mutex_unlock(&iosf_mbi_pmic_access_mutex);
++ wait_event(iosf_mbi_pmic_access_waitq,
++ iosf_mbi_pmic_punit_access_count == 0);
++ mutex_lock(&iosf_mbi_pmic_access_mutex);
++ }
++
++ if (iosf_mbi_pmic_i2c_access_count > 0)
+ goto success;
+
+- mutex_lock(&iosf_mbi_punit_mutex);
+ blocking_notifier_call_chain(&iosf_mbi_pmic_bus_access_notifier,
+ MBI_PMIC_BUS_ACCESS_BEGIN, NULL);
+
+@@ -330,10 +357,6 @@ int iosf_mbi_block_punit_i2c_access(void)
+ iosf_mbi_sem_acquired = jiffies;
+ dev_dbg(&mbi_pdev->dev, "P-Unit semaphore acquired after %ums\n",
+ jiffies_to_msecs(jiffies - start));
+- /*
+- * Success, keep iosf_mbi_punit_mutex locked till
+- * iosf_mbi_unblock_punit_i2c_access() gets called.
+- */
+ goto success;
+ }
+
+@@ -344,15 +367,13 @@ int iosf_mbi_block_punit_i2c_access(void)
+ dev_err(&mbi_pdev->dev, "Error P-Unit semaphore timed out, resetting\n");
+ error:
+ iosf_mbi_reset_semaphore();
+- mutex_unlock(&iosf_mbi_punit_mutex);
+-
+ if (!iosf_mbi_get_sem(&sem))
+ dev_err(&mbi_pdev->dev, "P-Unit semaphore: %d\n", sem);
+ success:
+ if (!WARN_ON(ret))
+- iosf_mbi_block_punit_i2c_access_count++;
++ iosf_mbi_pmic_i2c_access_count++;
+
+- mutex_unlock(&iosf_mbi_block_punit_i2c_access_count_mutex);
++ mutex_unlock(&iosf_mbi_pmic_access_mutex);
+
+ return ret;
+ }
+@@ -360,17 +381,20 @@ EXPORT_SYMBOL(iosf_mbi_block_punit_i2c_access);
+
+ void iosf_mbi_unblock_punit_i2c_access(void)
+ {
+- mutex_lock(&iosf_mbi_block_punit_i2c_access_count_mutex);
++ bool do_wakeup = false;
+
+- iosf_mbi_block_punit_i2c_access_count--;
+- if (iosf_mbi_block_punit_i2c_access_count == 0) {
++ mutex_lock(&iosf_mbi_pmic_access_mutex);
++ iosf_mbi_pmic_i2c_access_count--;
++ if (iosf_mbi_pmic_i2c_access_count == 0) {
+ iosf_mbi_reset_semaphore();
+- mutex_unlock(&iosf_mbi_punit_mutex);
+ dev_dbg(&mbi_pdev->dev, "punit semaphore held for %ums\n",
+ jiffies_to_msecs(jiffies - iosf_mbi_sem_acquired));
++ do_wakeup = true;
+ }
++ mutex_unlock(&iosf_mbi_pmic_access_mutex);
+
+- mutex_unlock(&iosf_mbi_block_punit_i2c_access_count_mutex);
++ if (do_wakeup)
++ wake_up(&iosf_mbi_pmic_access_waitq);
+ }
+ EXPORT_SYMBOL(iosf_mbi_unblock_punit_i2c_access);
+
+@@ -379,10 +403,10 @@ int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
+ int ret;
+
+ /* Wait for the bus to go inactive before registering */
+- mutex_lock(&iosf_mbi_punit_mutex);
++ iosf_mbi_punit_acquire();
+ ret = blocking_notifier_chain_register(
+ &iosf_mbi_pmic_bus_access_notifier, nb);
+- mutex_unlock(&iosf_mbi_punit_mutex);
++ iosf_mbi_punit_release();
+
+ return ret;
+ }
+@@ -403,9 +427,9 @@ int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
+ int ret;
+
+ /* Wait for the bus to go inactive before unregistering */
+- mutex_lock(&iosf_mbi_punit_mutex);
++ iosf_mbi_punit_acquire();
+ ret = iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(nb);
+- mutex_unlock(&iosf_mbi_punit_mutex);
++ iosf_mbi_punit_release();
+
+ return ret;
+ }
+@@ -413,7 +437,7 @@ EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier);
+
+ void iosf_mbi_assert_punit_acquired(void)
+ {
+- WARN_ON(!mutex_is_locked(&iosf_mbi_punit_mutex));
++ WARN_ON(iosf_mbi_pmic_punit_access_count == 0);
+ }
+ EXPORT_SYMBOL(iosf_mbi_assert_punit_acquired);
+
+diff --git a/block/blk-flush.c b/block/blk-flush.c
+index aedd9320e605..1eec9cbe5a0a 100644
+--- a/block/blk-flush.c
++++ b/block/blk-flush.c
+@@ -214,6 +214,16 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
+
+ /* release the tag's ownership to the req cloned from */
+ spin_lock_irqsave(&fq->mq_flush_lock, flags);
++
++ if (!refcount_dec_and_test(&flush_rq->ref)) {
++ fq->rq_status = error;
++ spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
++ return;
++ }
++
++ if (fq->rq_status != BLK_STS_OK)
++ error = fq->rq_status;
++
+ hctx = flush_rq->mq_hctx;
+ if (!q->elevator) {
+ blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 68106a41f90d..4af0941fbb67 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -44,12 +44,12 @@ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
+
+ static int blk_mq_poll_stats_bkt(const struct request *rq)
+ {
+- int ddir, bytes, bucket;
++ int ddir, sectors, bucket;
+
+ ddir = rq_data_dir(rq);
+- bytes = blk_rq_bytes(rq);
++ sectors = blk_rq_stats_sectors(rq);
+
+- bucket = ddir + 2*(ilog2(bytes) - 9);
++ bucket = ddir + 2 * ilog2(sectors);
+
+ if (bucket < 0)
+ return -1;
+@@ -330,6 +330,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
+ else
+ rq->start_time_ns = 0;
+ rq->io_start_time_ns = 0;
++ rq->stats_sectors = 0;
+ rq->nr_phys_segments = 0;
+ #if defined(CONFIG_BLK_DEV_INTEGRITY)
+ rq->nr_integrity_segments = 0;
+@@ -679,9 +680,7 @@ void blk_mq_start_request(struct request *rq)
+
+ if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
+ rq->io_start_time_ns = ktime_get_ns();
+-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
+- rq->throtl_size = blk_rq_sectors(rq);
+-#endif
++ rq->stats_sectors = blk_rq_sectors(rq);
+ rq->rq_flags |= RQF_STATS;
+ rq_qos_issue(q, rq);
+ }
+@@ -911,7 +910,10 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
+ */
+ if (blk_mq_req_expired(rq, next))
+ blk_mq_rq_timed_out(rq, reserved);
+- if (refcount_dec_and_test(&rq->ref))
++
++ if (is_flush_rq(rq, hctx))
++ rq->end_io(rq, 0);
++ else if (refcount_dec_and_test(&rq->ref))
+ __blk_mq_free_request(rq);
+
+ return true;
+@@ -2853,6 +2855,8 @@ static unsigned int nr_hw_queues(struct blk_mq_tag_set *set)
+ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q)
+ {
++ int ret = -ENOMEM;
++
+ /* mark the queue as mq asap */
+ q->mq_ops = set->ops;
+
+@@ -2914,17 +2918,18 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ blk_mq_map_swqueue(q);
+
+ if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
+- int ret;
+-
+ ret = elevator_init_mq(q);
+ if (ret)
+- return ERR_PTR(ret);
++ goto err_tag_set;
+ }
+
+ return q;
+
++err_tag_set:
++ blk_mq_del_queue_tag_set(q);
+ err_hctxs:
+ kfree(q->queue_hw_ctx);
++ q->nr_hw_queues = 0;
+ err_sys_init:
+ blk_mq_sysfs_deinit(q);
+ err_poll:
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 8ab6c8153223..ee74bffe3504 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -2246,7 +2246,8 @@ void blk_throtl_stat_add(struct request *rq, u64 time_ns)
+ struct request_queue *q = rq->q;
+ struct throtl_data *td = q->td;
+
+- throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10);
++ throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
++ time_ns >> 10);
+ }
+
+ void blk_throtl_bio_endio(struct bio *bio)
+diff --git a/block/blk.h b/block/blk.h
+index 7814aa207153..7019757a5ce1 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -19,6 +19,7 @@ struct blk_flush_queue {
+ unsigned int flush_queue_delayed:1;
+ unsigned int flush_pending_idx:1;
+ unsigned int flush_running_idx:1;
++ blk_status_t rq_status;
+ unsigned long flush_pending_since;
+ struct list_head flush_queue[2];
+ struct list_head flush_data_in_flight;
+@@ -47,6 +48,12 @@ static inline void __blk_get_queue(struct request_queue *q)
+ kobject_get(&q->kobj);
+ }
+
++static inline bool
++is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
++{
++ return hctx->fq->flush_rq == req;
++}
++
+ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
+ int node, int cmd_size, gfp_t flags);
+ void blk_free_flush_queue(struct blk_flush_queue *q);
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index 1876f5712bfd..4cb1a2ca9c88 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -377,13 +377,6 @@ done:
+ * hardware queue, but we may return a request that is for a
+ * different hardware queue. This is because mq-deadline has shared
+ * state for all hardware queues, in terms of sorting, FIFOs, etc.
+- *
+- * For a zoned block device, __dd_dispatch_request() may return NULL
+- * if all the queued write requests are directed at zones that are already
+- * locked due to on-going write requests. In this case, make sure to mark
+- * the queue as needing a restart to ensure that the queue is run again
+- * and the pending writes dispatched once the target zones for the ongoing
+- * write requests are unlocked in dd_finish_request().
+ */
+ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
+ {
+@@ -392,9 +385,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
+
+ spin_lock(&dd->lock);
+ rq = __dd_dispatch_request(dd);
+- if (!rq && blk_queue_is_zoned(hctx->queue) &&
+- !list_empty(&dd->fifo_list[WRITE]))
+- blk_mq_sched_mark_restart_hctx(hctx);
+ spin_unlock(&dd->lock);
+
+ return rq;
+@@ -560,6 +550,13 @@ static void dd_prepare_request(struct request *rq, struct bio *bio)
+ * spinlock so that the zone is never unlocked while deadline_fifo_request()
+ * or deadline_next_request() are executing. This function is called for
+ * all requests, whether or not these requests complete successfully.
++ *
++ * For a zoned block device, __dd_dispatch_request() may have stopped
++ * dispatching requests if all the queued requests are write requests directed
++ * at zones that are already locked due to on-going write requests. To ensure
++ * write request dispatch progress in this case, mark the queue as needing a
++ * restart to ensure that the queue is run again after completion of the
++ * request and zones being unlocked.
+ */
+ static void dd_finish_request(struct request *rq)
+ {
+@@ -571,6 +568,8 @@ static void dd_finish_request(struct request *rq)
+
+ spin_lock_irqsave(&dd->zone_lock, flags);
+ blk_req_zone_write_unlock(rq);
++ if (!list_empty(&dd->fifo_list[WRITE]))
++ blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
+ spin_unlock_irqrestore(&dd->zone_lock, flags);
+ }
+ }
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index 23484aa877b6..460a315badcd 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -219,12 +219,13 @@ static void bsw_pwm_setup(struct lpss_private_data *pdata)
+ }
+
+ static const struct lpss_device_desc lpt_dev_desc = {
+- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
++ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
++ | LPSS_SAVE_CTX,
+ .prv_offset = 0x800,
+ };
+
+ static const struct lpss_device_desc lpt_i2c_dev_desc = {
+- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
++ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR | LPSS_SAVE_CTX,
+ .prv_offset = 0x800,
+ };
+
+@@ -236,7 +237,8 @@ static struct property_entry uart_properties[] = {
+ };
+
+ static const struct lpss_device_desc lpt_uart_dev_desc = {
+- .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
++ .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR
++ | LPSS_SAVE_CTX,
+ .clk_con_id = "baudclk",
+ .prv_offset = 0x800,
+ .setup = lpss_uart_setup,
+diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
+index 24f065114d42..2c4dda0787e8 100644
+--- a/drivers/acpi/acpi_processor.c
++++ b/drivers/acpi/acpi_processor.c
+@@ -279,9 +279,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
+ }
+
+ if (acpi_duplicate_processor_id(pr->acpi_id)) {
+- dev_err(&device->dev,
+- "Failed to get unique processor _UID (0x%x)\n",
+- pr->acpi_id);
++ if (pr->acpi_id == 0xff)
++ dev_info_once(&device->dev,
++ "Entry not well-defined, consider updating BIOS\n");
++ else
++ dev_err(&device->dev,
++ "Failed to get unique processor _UID (0x%x)\n",
++ pr->acpi_id);
+ return -ENODEV;
+ }
+
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index 993940d582f5..6875bf629f16 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -153,6 +153,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
+ int ghes_estatus_pool_init(int num_ghes)
+ {
+ unsigned long addr, len;
++ int rc;
+
+ ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
+ if (!ghes_estatus_pool)
+@@ -164,7 +165,7 @@ int ghes_estatus_pool_init(int num_ghes)
+ ghes_estatus_pool_size_request = PAGE_ALIGN(len);
+ addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
+ if (!addr)
+- return -ENOMEM;
++ goto err_pool_alloc;
+
+ /*
+ * New allocation must be visible in all pgd before it can be found by
+@@ -172,7 +173,19 @@ int ghes_estatus_pool_init(int num_ghes)
+ */
+ vmalloc_sync_all();
+
+- return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
++ rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
++ if (rc)
++ goto err_pool_add;
++
++ return 0;
++
++err_pool_add:
++ vfree((void *)addr);
++
++err_pool_alloc:
++ gen_pool_destroy(ghes_estatus_pool);
++
++ return -ENOMEM;
+ }
+
+ static int map_gen_v2(struct ghes *ghes)
+diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
+index 15f103d7532b..3b2525908dd8 100644
+--- a/drivers/acpi/cppc_acpi.c
++++ b/drivers/acpi/cppc_acpi.c
+@@ -365,8 +365,10 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
+ union acpi_object *psd = NULL;
+ struct acpi_psd_package *pdomain;
+
+- status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
+- ACPI_TYPE_PACKAGE);
++ status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
++ &buffer, ACPI_TYPE_PACKAGE);
++ if (status == AE_NOT_FOUND) /* _PSD is optional */
++ return 0;
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
+index b2ef4c2ec955..fd66a736621c 100644
+--- a/drivers/acpi/custom_method.c
++++ b/drivers/acpi/custom_method.c
+@@ -49,8 +49,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ if ((*ppos > max_size) ||
+ (*ppos + count > max_size) ||
+ (*ppos + count < count) ||
+- (count > uncopied_bytes))
++ (count > uncopied_bytes)) {
++ kfree(buf);
+ return -EINVAL;
++ }
+
+ if (copy_from_user(buf + (*ppos), user_buf, count)) {
+ kfree(buf);
+@@ -70,6 +72,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
+ }
+
++ kfree(buf);
+ return count;
+ }
+
+diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
+index d2549ae65e1b..dea8a60e18a4 100644
+--- a/drivers/acpi/pci_irq.c
++++ b/drivers/acpi/pci_irq.c
+@@ -449,8 +449,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
+ * No IRQ known to the ACPI subsystem - maybe the BIOS /
+ * driver reported one, then use it. Exit in any case.
+ */
+- if (!acpi_pci_irq_valid(dev, pin))
++ if (!acpi_pci_irq_valid(dev, pin)) {
++ kfree(entry);
+ return 0;
++ }
+
+ if (acpi_isa_register_gsi(dev))
+ dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index f7652baa6337..3e63294304c7 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -65,6 +65,12 @@ enum board_ids {
+ board_ahci_sb700, /* for SB700 and SB800 */
+ board_ahci_vt8251,
+
++ /*
++ * board IDs for Intel chipsets that support more than 6 ports
++ * *and* end up needing the PCS quirk.
++ */
++ board_ahci_pcs7,
++
+ /* aliases */
+ board_ahci_mcp_linux = board_ahci_mcp65,
+ board_ahci_mcp67 = board_ahci_mcp65,
+@@ -220,6 +226,12 @@ static const struct ata_port_info ahci_port_info[] = {
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_vt8251_ops,
+ },
++ [board_ahci_pcs7] = {
++ .flags = AHCI_FLAG_COMMON,
++ .pio_mask = ATA_PIO4,
++ .udma_mask = ATA_UDMA6,
++ .port_ops = &ahci_ops,
++ },
+ };
+
+ static const struct pci_device_id ahci_pci_tbl[] = {
+@@ -264,26 +276,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
+- { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b2), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b3), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b4), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b5), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b6), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19b7), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19bE), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19bF), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c0), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c1), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c2), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c3), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c4), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c5), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c6), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19c7), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
++ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
+@@ -623,30 +635,6 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
+ ahci_save_initial_config(&pdev->dev, hpriv);
+ }
+
+-static int ahci_pci_reset_controller(struct ata_host *host)
+-{
+- struct pci_dev *pdev = to_pci_dev(host->dev);
+- int rc;
+-
+- rc = ahci_reset_controller(host);
+- if (rc)
+- return rc;
+-
+- if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+- struct ahci_host_priv *hpriv = host->private_data;
+- u16 tmp16;
+-
+- /* configure PCS */
+- pci_read_config_word(pdev, 0x92, &tmp16);
+- if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
+- tmp16 |= hpriv->port_map;
+- pci_write_config_word(pdev, 0x92, tmp16);
+- }
+- }
+-
+- return 0;
+-}
+-
+ static void ahci_pci_init_controller(struct ata_host *host)
+ {
+ struct ahci_host_priv *hpriv = host->private_data;
+@@ -849,7 +837,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
+ struct ata_host *host = pci_get_drvdata(pdev);
+ int rc;
+
+- rc = ahci_pci_reset_controller(host);
++ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+ ahci_pci_init_controller(host);
+@@ -884,7 +872,7 @@ static int ahci_pci_device_resume(struct device *dev)
+ ahci_mcp89_apple_enable(pdev);
+
+ if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+- rc = ahci_pci_reset_controller(host);
++ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+@@ -1619,6 +1607,34 @@ update_policy:
+ ap->target_lpm_policy = policy;
+ }
+
++static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
++{
++ const struct pci_device_id *id = pci_match_id(ahci_pci_tbl, pdev);
++ u16 tmp16;
++
++ /*
++ * Only apply the 6-port PCS quirk for known legacy platforms.
++ */
++ if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
++ return;
++ if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
++ return;
++
++ /*
++ * port_map is determined from PORTS_IMPL PCI register which is
++ * implemented as write or write-once register. If the register
++ * isn't programmed, ahci automatically generates it from number
++ * of ports, which is good enough for PCS programming. It is
++ * otherwise expected that platform firmware enables the ports
++ * before the OS boots.
++ */
++ pci_read_config_word(pdev, PCS_6, &tmp16);
++ if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
++ tmp16 |= hpriv->port_map;
++ pci_write_config_word(pdev, PCS_6, tmp16);
++ }
++}
++
+ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+ unsigned int board_id = ent->driver_data;
+@@ -1731,6 +1747,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ /* save initial config */
+ ahci_pci_save_initial_config(pdev, hpriv);
+
++ /*
++ * If platform firmware failed to enable ports, try to enable
++ * them here.
++ */
++ ahci_intel_pcs_quirk(pdev, hpriv);
++
+ /* prepare host */
+ if (hpriv->cap & HOST_CAP_NCQ) {
+ pi.flags |= ATA_FLAG_NCQ;
+@@ -1840,7 +1862,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ if (rc)
+ return rc;
+
+- rc = ahci_pci_reset_controller(host);
++ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
+index 0570629d719d..3dbf398c92ea 100644
+--- a/drivers/ata/ahci.h
++++ b/drivers/ata/ahci.h
+@@ -247,6 +247,8 @@ enum {
+ ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
+
+ ICH_MAP = 0x90, /* ICH MAP register */
++ PCS_6 = 0x92, /* 6 port PCS */
++ PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
+
+ /* em constants */
+ EM_MAX_SLOTS = 8,
+diff --git a/drivers/base/soc.c b/drivers/base/soc.c
+index 10b280f30217..7e91894a380b 100644
+--- a/drivers/base/soc.c
++++ b/drivers/base/soc.c
+@@ -157,6 +157,7 @@ out2:
+ out1:
+ return ERR_PTR(ret);
+ }
++EXPORT_SYMBOL_GPL(soc_device_register);
+
+ /* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
+ void soc_device_unregister(struct soc_device *soc_dev)
+@@ -166,6 +167,7 @@ void soc_device_unregister(struct soc_device *soc_dev)
+ device_unregister(&soc_dev->dev);
+ early_soc_dev_attr = NULL;
+ }
++EXPORT_SYMBOL_GPL(soc_device_unregister);
+
+ static int __init soc_bus_register(void)
+ {
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index e1739efca37e..8e32930f65a1 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1763,6 +1763,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ case LOOP_SET_FD:
+ case LOOP_CHANGE_FD:
+ case LOOP_SET_BLOCK_SIZE:
++ case LOOP_SET_DIRECT_IO:
+ err = lo_ioctl(bdev, mode, cmd, arg);
+ break;
+ default:
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 57aebc6e1c28..5d5e52c6509c 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -355,8 +355,10 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
+ }
+ config = nbd->config;
+
+- if (!mutex_trylock(&cmd->lock))
++ if (!mutex_trylock(&cmd->lock)) {
++ nbd_config_put(nbd);
+ return BLK_EH_RESET_TIMER;
++ }
+
+ if (config->num_connections > 1) {
+ dev_err_ratelimited(nbd_to_dev(nbd),
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index 95be7228f327..3866d6b8017c 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -67,7 +67,7 @@ static void add_early_randomness(struct hwrng *rng)
+ size_t size = min_t(size_t, 16, rng_buffer_size());
+
+ mutex_lock(&reading_mutex);
+- bytes_read = rng_get_data(rng, rng_buffer, size, 1);
++ bytes_read = rng_get_data(rng, rng_buffer, size, 0);
+ mutex_unlock(&reading_mutex);
+ if (bytes_read > 0)
+ add_device_randomness(rng_buffer, bytes_read);
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 1dc10740fc0f..6ce64007b3c0 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -4215,7 +4215,53 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
+ int chan;
+
+ ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
+- if (msg->rsp_size < 2) {
++
++ if ((msg->data_size >= 2)
++ && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
++ && (msg->data[1] == IPMI_SEND_MSG_CMD)
++ && (msg->user_data == NULL)) {
++
++ if (intf->in_shutdown)
++ goto free_msg;
++
++ /*
++ * This is the local response to a command send, start
++ * the timer for these. The user_data will not be
++ * NULL if this is a response send, and we will let
++ * response sends just go through.
++ */
++
++ /*
++ * Check for errors, if we get certain errors (ones
++ * that mean basically we can try again later), we
++ * ignore them and start the timer. Otherwise we
++ * report the error immediately.
++ */
++ if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
++ && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
++ && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
++ && (msg->rsp[2] != IPMI_BUS_ERR)
++ && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
++ int ch = msg->rsp[3] & 0xf;
++ struct ipmi_channel *chans;
++
++ /* Got an error sending the message, handle it. */
++
++ chans = READ_ONCE(intf->channel_list)->c;
++ if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
++ || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
++ ipmi_inc_stat(intf, sent_lan_command_errs);
++ else
++ ipmi_inc_stat(intf, sent_ipmb_command_errs);
++ intf_err_seq(intf, msg->msgid, msg->rsp[2]);
++ } else
++ /* The message was sent, start the timer. */
++ intf_start_seq_timer(intf, msg->msgid);
++free_msg:
++ requeue = 0;
++ goto out;
++
++ } else if (msg->rsp_size < 2) {
+ /* Message is too small to be correct. */
+ dev_warn(intf->si_dev,
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
+@@ -4472,62 +4518,16 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
+ unsigned long flags = 0; /* keep us warning-free. */
+ int run_to_completion = intf->run_to_completion;
+
+- if ((msg->data_size >= 2)
+- && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
+- && (msg->data[1] == IPMI_SEND_MSG_CMD)
+- && (msg->user_data == NULL)) {
+-
+- if (intf->in_shutdown)
+- goto free_msg;
+-
+- /*
+- * This is the local response to a command send, start
+- * the timer for these. The user_data will not be
+- * NULL if this is a response send, and we will let
+- * response sends just go through.
+- */
+-
+- /*
+- * Check for errors, if we get certain errors (ones
+- * that mean basically we can try again later), we
+- * ignore them and start the timer. Otherwise we
+- * report the error immediately.
+- */
+- if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
+- && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
+- && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
+- && (msg->rsp[2] != IPMI_BUS_ERR)
+- && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
+- int ch = msg->rsp[3] & 0xf;
+- struct ipmi_channel *chans;
+-
+- /* Got an error sending the message, handle it. */
+-
+- chans = READ_ONCE(intf->channel_list)->c;
+- if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
+- || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
+- ipmi_inc_stat(intf, sent_lan_command_errs);
+- else
+- ipmi_inc_stat(intf, sent_ipmb_command_errs);
+- intf_err_seq(intf, msg->msgid, msg->rsp[2]);
+- } else
+- /* The message was sent, start the timer. */
+- intf_start_seq_timer(intf, msg->msgid);
+-
+-free_msg:
+- ipmi_free_smi_msg(msg);
+- } else {
+- /*
+- * To preserve message order, we keep a queue and deliver from
+- * a tasklet.
+- */
+- if (!run_to_completion)
+- spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
+- list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
+- if (!run_to_completion)
+- spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+- flags);
+- }
++ /*
++ * To preserve message order, we keep a queue and deliver from
++ * a tasklet.
++ */
++ if (!run_to_completion)
++ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
++ list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
++ if (!run_to_completion)
++ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
++ flags);
+
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+diff --git a/drivers/char/mem.c b/drivers/char/mem.c
+index b08dc50f9f26..9eb564c002f6 100644
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -97,6 +97,13 @@ void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
+ }
+ #endif
+
++static inline bool should_stop_iteration(void)
++{
++ if (need_resched())
++ cond_resched();
++ return fatal_signal_pending(current);
++}
++
+ /*
+ * This funcion reads the *physical* memory. The f_pos points directly to the
+ * memory location.
+@@ -175,6 +182,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+ p += sz;
+ count -= sz;
+ read += sz;
++ if (should_stop_iteration())
++ break;
+ }
+ kfree(bounce);
+
+@@ -251,6 +260,8 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
+ p += sz;
+ count -= sz;
+ written += sz;
++ if (should_stop_iteration())
++ break;
+ }
+
+ *ppos += written;
+@@ -468,6 +479,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
+ read += sz;
+ low_count -= sz;
+ count -= sz;
++ if (should_stop_iteration()) {
++ count = 0;
++ break;
++ }
+ }
+ }
+
+@@ -492,6 +507,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
+ buf += sz;
+ read += sz;
+ p += sz;
++ if (should_stop_iteration())
++ break;
+ }
+ free_page((unsigned long)kbuf);
+ }
+@@ -544,6 +561,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
+ p += sz;
+ count -= sz;
+ written += sz;
++ if (should_stop_iteration())
++ break;
+ }
+
+ *ppos += written;
+@@ -595,6 +614,8 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
+ buf += sz;
+ virtr += sz;
+ p += sz;
++ if (should_stop_iteration())
++ break;
+ }
+ free_page((unsigned long)kbuf);
+ }
+diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
+index 1b4f95c13e00..d7a3888ad80f 100644
+--- a/drivers/char/tpm/tpm-interface.c
++++ b/drivers/char/tpm/tpm-interface.c
+@@ -320,18 +320,22 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
+ if (!chip)
+ return -ENODEV;
+
+- for (i = 0; i < chip->nr_allocated_banks; i++)
+- if (digests[i].alg_id != chip->allocated_banks[i].alg_id)
+- return -EINVAL;
++ for (i = 0; i < chip->nr_allocated_banks; i++) {
++ if (digests[i].alg_id != chip->allocated_banks[i].alg_id) {
++ rc = EINVAL;
++ goto out;
++ }
++ }
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ rc = tpm2_pcr_extend(chip, pcr_idx, digests);
+- tpm_put_ops(chip);
+- return rc;
++ goto out;
+ }
+
+ rc = tpm1_pcr_extend(chip, pcr_idx, digests[0].digest,
+ "attempting extend a PCR value");
++
++out:
+ tpm_put_ops(chip);
+ return rc;
+ }
+@@ -354,14 +358,9 @@ int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
+ if (!chip)
+ return -ENODEV;
+
+- rc = tpm_buf_init(&buf, 0, 0);
+- if (rc)
+- goto out;
+-
+- memcpy(buf.data, cmd, buflen);
++ buf.data = cmd;
+ rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to a send a command");
+- tpm_buf_destroy(&buf);
+-out:
++
+ tpm_put_ops(chip);
+ return rc;
+ }
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index c3181ea9f271..270f43acbb77 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -980,6 +980,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ goto out_err;
+ }
+
++ tpm_chip_start(chip);
++ chip->flags |= TPM_CHIP_FLAG_IRQ;
+ if (irq) {
+ tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
+ irq);
+@@ -989,6 +991,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
+ } else {
+ tpm_tis_probe_irq(chip, intmask);
+ }
++ tpm_chip_stop(chip);
+ }
+
+ rc = tpm_chip_register(chip);
+diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
+index 988ebc326bdb..39e34f5066d3 100644
+--- a/drivers/cpufreq/armada-8k-cpufreq.c
++++ b/drivers/cpufreq/armada-8k-cpufreq.c
+@@ -136,6 +136,8 @@ static int __init armada_8k_cpufreq_init(void)
+
+ nb_cpus = num_possible_cpus();
+ freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
++ if (!freq_tables)
++ return -ENOMEM;
+ cpumask_copy(&cpus, cpu_possible_mask);
+
+ /*
+diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
+index 7d05efdbd3c6..12d9e6cecf1d 100644
+--- a/drivers/cpuidle/governors/teo.c
++++ b/drivers/cpuidle/governors/teo.c
+@@ -242,7 +242,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ int latency_req = cpuidle_governor_latency_req(dev->cpu);
+ unsigned int duration_us, count;
+- int max_early_idx, idx, i;
++ int max_early_idx, constraint_idx, idx, i;
+ ktime_t delta_tick;
+
+ if (cpu_data->last_state >= 0) {
+@@ -257,6 +257,7 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+
+ count = 0;
+ max_early_idx = -1;
++ constraint_idx = drv->state_count;
+ idx = -1;
+
+ for (i = 0; i < drv->state_count; i++) {
+@@ -286,16 +287,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ if (s->target_residency > duration_us)
+ break;
+
+- if (s->exit_latency > latency_req) {
+- /*
+- * If we break out of the loop for latency reasons, use
+- * the target residency of the selected state as the
+- * expected idle duration to avoid stopping the tick
+- * as long as that target residency is low enough.
+- */
+- duration_us = drv->states[idx].target_residency;
+- goto refine;
+- }
++ if (s->exit_latency > latency_req && constraint_idx > i)
++ constraint_idx = i;
+
+ idx = i;
+
+@@ -321,7 +314,13 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ duration_us = drv->states[idx].target_residency;
+ }
+
+-refine:
++ /*
++ * If there is a latency constraint, it may be necessary to use a
++ * shallower idle state than the one selected so far.
++ */
++ if (constraint_idx < idx)
++ idx = constraint_idx;
++
+ if (idx < 0) {
+ idx = 0; /* No states enabled. Must use 0. */
+ } else if (idx > 0) {
+@@ -331,13 +330,12 @@ refine:
+
+ /*
+ * Count and sum the most recent idle duration values less than
+- * the target residency of the state selected so far, find the
+- * max.
++ * the current expected idle duration value.
+ */
+ for (i = 0; i < INTERVALS; i++) {
+ unsigned int val = cpu_data->intervals[i];
+
+- if (val >= drv->states[idx].target_residency)
++ if (val >= duration_us)
+ continue;
+
+ count++;
+@@ -356,8 +354,10 @@ refine:
+ * would be too shallow.
+ */
+ if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) {
+- idx = teo_find_shallower_state(drv, dev, idx, avg_us);
+ duration_us = avg_us;
++ if (drv->states[idx].target_residency > avg_us)
++ idx = teo_find_shallower_state(drv, dev,
++ idx, avg_us);
+ }
+ }
+ }
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index ab22bf8a12d6..a0e19802149f 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -254,7 +254,7 @@ static struct devfreq_governor *try_then_request_governor(const char *name)
+ /* Restore previous state before return */
+ mutex_lock(&devfreq_list_lock);
+ if (err)
+- return ERR_PTR(err);
++ return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL);
+
+ governor = find_devfreq_governor(name);
+ }
+diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
+index d9f377912c10..7c06df8bd74f 100644
+--- a/drivers/devfreq/exynos-bus.c
++++ b/drivers/devfreq/exynos-bus.c
+@@ -191,11 +191,10 @@ static void exynos_bus_exit(struct device *dev)
+ if (ret < 0)
+ dev_warn(dev, "failed to disable the devfreq-event devices\n");
+
+- if (bus->regulator)
+- regulator_disable(bus->regulator);
+-
+ dev_pm_opp_of_remove_table(dev);
+ clk_disable_unprepare(bus->clk);
++ if (bus->regulator)
++ regulator_disable(bus->regulator);
+ }
+
+ /*
+@@ -383,6 +382,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
+ struct exynos_bus *bus;
+ int ret, max_state;
+ unsigned long min_freq, max_freq;
++ bool passive = false;
+
+ if (!np) {
+ dev_err(dev, "failed to find devicetree node\n");
+@@ -396,27 +396,27 @@ static int exynos_bus_probe(struct platform_device *pdev)
+ bus->dev = &pdev->dev;
+ platform_set_drvdata(pdev, bus);
+
+- /* Parse the device-tree to get the resource information */
+- ret = exynos_bus_parse_of(np, bus);
+- if (ret < 0)
+- return ret;
+-
+ profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+- if (!profile) {
+- ret = -ENOMEM;
+- goto err;
+- }
++ if (!profile)
++ return -ENOMEM;
+
+ node = of_parse_phandle(dev->of_node, "devfreq", 0);
+ if (node) {
+ of_node_put(node);
+- goto passive;
++ passive = true;
+ } else {
+ ret = exynos_bus_parent_parse_of(np, bus);
++ if (ret < 0)
++ return ret;
+ }
+
++ /* Parse the device-tree to get the resource information */
++ ret = exynos_bus_parse_of(np, bus);
+ if (ret < 0)
+- goto err;
++ goto err_reg;
++
++ if (passive)
++ goto passive;
+
+ /* Initialize the struct profile and governor data for parent device */
+ profile->polling_ms = 50;
+@@ -507,6 +507,9 @@ out:
+ err:
+ dev_pm_opp_of_remove_table(dev);
+ clk_disable_unprepare(bus->clk);
++err_reg:
++ if (!passive)
++ regulator_disable(bus->regulator);
+
+ return ret;
+ }
+diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
+index 58308948b863..be6eeab9c814 100644
+--- a/drivers/devfreq/governor_passive.c
++++ b/drivers/devfreq/governor_passive.c
+@@ -149,7 +149,6 @@ static int devfreq_passive_notifier_call(struct notifier_block *nb,
+ static int devfreq_passive_event_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+ {
+- struct device *dev = devfreq->dev.parent;
+ struct devfreq_passive_data *p_data
+ = (struct devfreq_passive_data *)devfreq->data;
+ struct devfreq *parent = (struct devfreq *)p_data->parent;
+@@ -165,12 +164,12 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
+ p_data->this = devfreq;
+
+ nb->notifier_call = devfreq_passive_notifier_call;
+- ret = devm_devfreq_register_notifier(dev, parent, nb,
++ ret = devfreq_register_notifier(parent, nb,
+ DEVFREQ_TRANSITION_NOTIFIER);
+ break;
+ case DEVFREQ_GOV_STOP:
+- devm_devfreq_unregister_notifier(dev, parent, nb,
+- DEVFREQ_TRANSITION_NOTIFIER);
++ WARN_ON(devfreq_unregister_notifier(parent, nb,
++ DEVFREQ_TRANSITION_NOTIFIER));
+ break;
+ default:
+ break;
+diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
+index 8101ff2f05c1..970f654611bd 100644
+--- a/drivers/dma/bcm2835-dma.c
++++ b/drivers/dma/bcm2835-dma.c
+@@ -871,8 +871,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+- if (rc)
++ if (rc) {
++ dev_err(&pdev->dev, "Unable to set DMA mask\n");
+ return rc;
++ }
+
+ od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
+ if (!od)
+diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
+index c6c0143670d9..a776857d89c8 100644
+--- a/drivers/dma/iop-adma.c
++++ b/drivers/dma/iop-adma.c
+@@ -116,9 +116,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
+ list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
+ chain_node) {
+ pr_debug("\tcookie: %d slot: %d busy: %d "
+- "this_desc: %#x next_desc: %#x ack: %d\n",
++ "this_desc: %#x next_desc: %#llx ack: %d\n",
+ iter->async_tx.cookie, iter->idx, busy,
+- iter->async_tx.phys, iop_desc_get_next_desc(iter),
++ iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
+ async_tx_test_ack(&iter->async_tx));
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+@@ -306,9 +306,9 @@ retry:
+ int i;
+ dev_dbg(iop_chan->device->common.dev,
+ "allocated slot: %d "
+- "(desc %p phys: %#x) slots_per_op %d\n",
++ "(desc %p phys: %#llx) slots_per_op %d\n",
+ iter->idx, iter->hw_desc,
+- iter->async_tx.phys, slots_per_op);
++ (u64)iter->async_tx.phys, slots_per_op);
+
+ /* pre-ack all but the last descriptor */
+ if (num_slots != slots_per_op)
+@@ -516,7 +516,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
+ return NULL;
+ BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
+
+- dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
++ dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
+ __func__, len);
+
+ spin_lock_bh(&iop_chan->lock);
+@@ -549,7 +549,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
+ BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
+
+ dev_dbg(iop_chan->device->common.dev,
+- "%s src_cnt: %d len: %u flags: %lx\n",
++ "%s src_cnt: %d len: %zu flags: %lx\n",
+ __func__, src_cnt, len, flags);
+
+ spin_lock_bh(&iop_chan->lock);
+@@ -582,7 +582,7 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
+ if (unlikely(!len))
+ return NULL;
+
+- dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
++ dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
+ __func__, src_cnt, len);
+
+ spin_lock_bh(&iop_chan->lock);
+@@ -620,7 +620,7 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
+
+ dev_dbg(iop_chan->device->common.dev,
+- "%s src_cnt: %d len: %u flags: %lx\n",
++ "%s src_cnt: %d len: %zu flags: %lx\n",
+ __func__, src_cnt, len, flags);
+
+ if (dmaf_p_disabled_continue(flags))
+@@ -683,7 +683,7 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ return NULL;
+ BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
+
+- dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
++ dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
+ __func__, src_cnt, len);
+
+ spin_lock_bh(&iop_chan->lock);
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index ceabdea40ae0..982631d4e1f8 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2273,9 +2273,6 @@ static int edma_probe(struct platform_device *pdev)
+
+ ecc->default_queue = info->default_queue;
+
+- for (i = 0; i < ecc->num_slots; i++)
+- edma_write_slot(ecc, i, &dummy_paramset);
+-
+ if (info->rsv) {
+ /* Set the reserved slots in inuse list */
+ rsv_slots = info->rsv->rsv_slots;
+@@ -2288,6 +2285,12 @@ static int edma_probe(struct platform_device *pdev)
+ }
+ }
+
++ for (i = 0; i < ecc->num_slots; i++) {
++ /* Reset only unused - not reserved - paRAM slots */
++ if (!test_bit(i, ecc->slot_inuse))
++ edma_write_slot(ecc, i, &dummy_paramset);
++ }
++
+ /* Clear the xbar mapped channels in unused list */
+ xbar_chans = info->xbar_chans;
+ if (xbar_chans) {
+diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
+index 8816f74a22b4..2d12b94eccda 100644
+--- a/drivers/edac/altera_edac.c
++++ b/drivers/edac/altera_edac.c
+@@ -1829,6 +1829,7 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
+ struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int irq = irq_desc_get_irq(desc);
++ unsigned long bits;
+
+ dberr = (irq == edac->db_irq) ? 1 : 0;
+ sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST :
+@@ -1838,7 +1839,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
+
+ regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
+
+- for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
++ bits = irq_status;
++ for_each_set_bit(bit, &bits, 32) {
+ irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
+ if (irq)
+ generic_handle_irq(irq);
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index 873437be86d9..608fdab566b3 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -810,7 +810,7 @@ static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
+
+ edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
+
+- for (dimm = 0; dimm < 4; dimm++) {
++ for (dimm = 0; dimm < 2; dimm++) {
+ size0 = 0;
+ cs0 = dimm * 2;
+
+@@ -942,89 +942,102 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
+ } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
+ pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
+ pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
++ } else if (pvt->fam >= 0x17) {
++ int umc;
++
++ for_each_umc(umc) {
++ pvt->csels[umc].b_cnt = 4;
++ pvt->csels[umc].m_cnt = 2;
++ }
++
+ } else {
+ pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
+ pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
+ }
+ }
+
++static void read_umc_base_mask(struct amd64_pvt *pvt)
++{
++ u32 umc_base_reg, umc_mask_reg;
++ u32 base_reg, mask_reg;
++ u32 *base, *mask;
++ int cs, umc;
++
++ for_each_umc(umc) {
++ umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
++
++ for_each_chip_select(cs, umc, pvt) {
++ base = &pvt->csels[umc].csbases[cs];
++
++ base_reg = umc_base_reg + (cs * 4);
++
++ if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
++ edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
++ umc, cs, *base, base_reg);
++ }
++
++ umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
++
++ for_each_chip_select_mask(cs, umc, pvt) {
++ mask = &pvt->csels[umc].csmasks[cs];
++
++ mask_reg = umc_mask_reg + (cs * 4);
++
++ if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
++ edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
++ umc, cs, *mask, mask_reg);
++ }
++ }
++}
++
+ /*
+ * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
+ */
+ static void read_dct_base_mask(struct amd64_pvt *pvt)
+ {
+- int base_reg0, base_reg1, mask_reg0, mask_reg1, cs;
++ int cs;
+
+ prep_chip_selects(pvt);
+
+- if (pvt->umc) {
+- base_reg0 = get_umc_base(0) + UMCCH_BASE_ADDR;
+- base_reg1 = get_umc_base(1) + UMCCH_BASE_ADDR;
+- mask_reg0 = get_umc_base(0) + UMCCH_ADDR_MASK;
+- mask_reg1 = get_umc_base(1) + UMCCH_ADDR_MASK;
+- } else {
+- base_reg0 = DCSB0;
+- base_reg1 = DCSB1;
+- mask_reg0 = DCSM0;
+- mask_reg1 = DCSM1;
+- }
++ if (pvt->umc)
++ return read_umc_base_mask(pvt);
+
+ for_each_chip_select(cs, 0, pvt) {
+- int reg0 = base_reg0 + (cs * 4);
+- int reg1 = base_reg1 + (cs * 4);
++ int reg0 = DCSB0 + (cs * 4);
++ int reg1 = DCSB1 + (cs * 4);
+ u32 *base0 = &pvt->csels[0].csbases[cs];
+ u32 *base1 = &pvt->csels[1].csbases[cs];
+
+- if (pvt->umc) {
+- if (!amd_smn_read(pvt->mc_node_id, reg0, base0))
+- edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n",
+- cs, *base0, reg0);
+-
+- if (!amd_smn_read(pvt->mc_node_id, reg1, base1))
+- edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n",
+- cs, *base1, reg1);
+- } else {
+- if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
+- edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
+- cs, *base0, reg0);
++ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
++ edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
++ cs, *base0, reg0);
+
+- if (pvt->fam == 0xf)
+- continue;
++ if (pvt->fam == 0xf)
++ continue;
+
+- if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
+- edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
+- cs, *base1, (pvt->fam == 0x10) ? reg1
+- : reg0);
+- }
++ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
++ edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
++ cs, *base1, (pvt->fam == 0x10) ? reg1
++ : reg0);
+ }
+
+ for_each_chip_select_mask(cs, 0, pvt) {
+- int reg0 = mask_reg0 + (cs * 4);
+- int reg1 = mask_reg1 + (cs * 4);
++ int reg0 = DCSM0 + (cs * 4);
++ int reg1 = DCSM1 + (cs * 4);
+ u32 *mask0 = &pvt->csels[0].csmasks[cs];
+ u32 *mask1 = &pvt->csels[1].csmasks[cs];
+
+- if (pvt->umc) {
+- if (!amd_smn_read(pvt->mc_node_id, reg0, mask0))
+- edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n",
+- cs, *mask0, reg0);
++ if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
++ edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
++ cs, *mask0, reg0);
+
+- if (!amd_smn_read(pvt->mc_node_id, reg1, mask1))
+- edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n",
+- cs, *mask1, reg1);
+- } else {
+- if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
+- edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
+- cs, *mask0, reg0);
+-
+- if (pvt->fam == 0xf)
+- continue;
++ if (pvt->fam == 0xf)
++ continue;
+
+- if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
+- edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
+- cs, *mask1, (pvt->fam == 0x10) ? reg1
+- : reg0);
+- }
++ if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
++ edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
++ cs, *mask1, (pvt->fam == 0x10) ? reg1
++ : reg0);
+ }
+ }
+
+@@ -2537,13 +2550,6 @@ static void decode_umc_error(int node_id, struct mce *m)
+
+ err.channel = find_umc_channel(m);
+
+- if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
+- err.err_code = ERR_NORM_ADDR;
+- goto log_error;
+- }
+-
+- error_address_to_page_and_offset(sys_addr, &err);
+-
+ if (!(m->status & MCI_STATUS_SYNDV)) {
+ err.err_code = ERR_SYND;
+ goto log_error;
+@@ -2560,6 +2566,13 @@ static void decode_umc_error(int node_id, struct mce *m)
+
+ err.csrow = m->synd & 0x7;
+
++ if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
++ err.err_code = ERR_NORM_ADDR;
++ goto log_error;
++ }
++
++ error_address_to_page_and_offset(sys_addr, &err);
++
+ log_error:
+ __log_ecc_error(mci, &err, ecc_type);
+ }
+@@ -3137,12 +3150,15 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
+ static inline void
+ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
+ {
+- u8 i, ecc_en = 1, cpk_en = 1;
++ u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
+
+ for_each_umc(i) {
+ if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
+ ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
+ cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
++
++ dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
++ dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
+ }
+ }
+
+@@ -3150,8 +3166,15 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
+ if (ecc_en) {
+ mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
+
+- if (cpk_en)
++ if (!cpk_en)
++ return;
++
++ if (dev_x4)
+ mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
++ else if (dev_x16)
++ mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
++ else
++ mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
+ }
+ }
+
+diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
+index 8f66472f7adc..4dce6a2ac75f 100644
+--- a/drivers/edac/amd64_edac.h
++++ b/drivers/edac/amd64_edac.h
+@@ -96,6 +96,7 @@
+ /* Hardware limit on ChipSelect rows per MC and processors per system */
+ #define NUM_CHIPSELECTS 8
+ #define DRAM_RANGES 8
++#define NUM_CONTROLLERS 8
+
+ #define ON true
+ #define OFF false
+@@ -351,8 +352,8 @@ struct amd64_pvt {
+ u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
+ u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
+
+- /* one for each DCT */
+- struct chip_select csels[2];
++ /* one for each DCT/UMC */
++ struct chip_select csels[NUM_CONTROLLERS];
+
+ /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
+ struct dram_range ranges[DRAM_RANGES];
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index 64922c8fa7e3..d899d86897d0 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -1235,9 +1235,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+ if (p > e->location)
+ *(p - 1) = '\0';
+
+- /* Report the error via the trace interface */
+- grain_bits = fls_long(e->grain) + 1;
++ /* Sanity-check driver-supplied grain value. */
++ if (WARN_ON_ONCE(!e->grain))
++ e->grain = 1;
++
++ grain_bits = fls_long(e->grain - 1);
+
++ /* Report the error via the trace interface */
+ if (IS_ENABLED(CONFIG_RAS))
+ trace_mc_event(type, e->msg, e->label, e->error_count,
+ mci->mc_idx, e->top_layer, e->mid_layer,
+diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
+index ca25f8fe57ef..1ad538baaa4a 100644
+--- a/drivers/edac/pnd2_edac.c
++++ b/drivers/edac/pnd2_edac.c
+@@ -260,11 +260,14 @@ static u64 get_sideband_reg_base_addr(void)
+ }
+ }
+
++#define DNV_MCHBAR_SIZE 0x8000
++#define DNV_SB_PORT_SIZE 0x10000
+ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
+ {
+ struct pci_dev *pdev;
+ char *base;
+ u64 addr;
++ unsigned long size;
+
+ if (op == 4) {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
+@@ -279,15 +282,17 @@ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
+ addr = get_mem_ctrl_hub_base_addr();
+ if (!addr)
+ return -ENODEV;
++ size = DNV_MCHBAR_SIZE;
+ } else {
+ /* MMIO via sideband register base address */
+ addr = get_sideband_reg_base_addr();
+ if (!addr)
+ return -ENODEV;
+ addr += (port << 16);
++ size = DNV_SB_PORT_SIZE;
+ }
+
+- base = ioremap((resource_size_t)addr, 0x10000);
++ base = ioremap((resource_size_t)addr, size);
+ if (!base)
+ return -ENODEV;
+
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index b5bc4c7a8fab..b49c9e6f4bf1 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -271,6 +271,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
+ struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
+ struct scmi_shared_mem __iomem *mem = cinfo->payload;
+
++ /*
++ * Ideally channel must be free by now unless OS timeout last
++ * request and platform continued to process the same, wait
++ * until it releases the shared memory, otherwise we may endup
++ * overwriting its response with new message payload or vice-versa
++ */
++ spin_until_cond(ioread32(&mem->channel_status) &
++ SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+ /* Mark channel busy + clear error */
+ iowrite32(0x0, &mem->channel_status);
+ iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
+diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
+index 8fa977c7861f..addf0749dd8b 100644
+--- a/drivers/firmware/efi/cper.c
++++ b/drivers/firmware/efi/cper.c
+@@ -390,6 +390,21 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
+ printk(
+ "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
+ pfx, pcie->bridge.secondary_status, pcie->bridge.control);
++
++ /* Fatal errors call __ghes_panic() before AER handler prints this */
++ if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) &&
++ (gdata->error_severity & CPER_SEV_FATAL)) {
++ struct aer_capability_regs *aer;
++
++ aer = (struct aer_capability_regs *)pcie->aer_info;
++ printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
++ pfx, aer->uncor_status, aer->uncor_mask);
++ printk("%saer_uncor_severity: 0x%08x\n",
++ pfx, aer->uncor_severity);
++ printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
++ aer->header_log.dw0, aer->header_log.dw1,
++ aer->header_log.dw2, aer->header_log.dw3);
++ }
+ }
+
+ static void cper_print_tstamp(const char *pfx,
+diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
+index 2ddc118dba1b..74b84244a0db 100644
+--- a/drivers/firmware/qcom_scm.c
++++ b/drivers/firmware/qcom_scm.c
+@@ -9,6 +9,7 @@
+ #include <linux/init.h>
+ #include <linux/cpumask.h>
+ #include <linux/export.h>
++#include <linux/dma-direct.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/module.h>
+ #include <linux/types.h>
+@@ -440,6 +441,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+ phys_addr_t mem_to_map_phys;
+ phys_addr_t dest_phys;
+ phys_addr_t ptr_phys;
++ dma_addr_t ptr_dma;
+ size_t mem_to_map_sz;
+ size_t dest_sz;
+ size_t src_sz;
+@@ -457,9 +459,10 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+ ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
+ ALIGN(dest_sz, SZ_64);
+
+- ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
++ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
++ ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
+
+ /* Fill source vmid detail */
+ src = ptr;
+@@ -489,7 +492,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+
+ ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
+ ptr_phys, src_sz, dest_phys, dest_sz);
+- dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
++ dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
+ if (ret) {
+ dev_err(__scm->dev,
+ "Assign memory protection call failed %d.\n", ret);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 279ced1d64ed..6a743aaf2669 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1958,6 +1958,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+ }
+
+ static const struct backlight_ops amdgpu_dm_backlight_ops = {
++ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = amdgpu_dm_backlight_get_brightness,
+ .update_status = amdgpu_dm_backlight_update_status,
+ };
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 048757e8f494..d1919d343cce 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -4064,6 +4064,11 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
+
+ data->frame_time_x2 = frame_time_in_us * 2 / 100;
+
++ if (data->frame_time_x2 < 280) {
++ pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
++ data->frame_time_x2 = 280;
++ }
++
+ display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
+diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
+index 9c5ae825c507..69917ecd4af6 100644
+--- a/drivers/gpu/drm/drm_kms_helper_common.c
++++ b/drivers/gpu/drm/drm_kms_helper_common.c
+@@ -39,7 +39,7 @@ MODULE_LICENSE("GPL and additional rights");
+ /* Backward compatibility for drm_kms_helper.edid_firmware */
+ static int edid_firmware_set(const char *val, const struct kernel_param *kp)
+ {
+- DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
++ DRM_NOTE("drm_kms_helper.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
+
+ return __drm_set_edid_firmware_path(val);
+ }
+diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
+index 6ba1a08253f0..4cf25458f0b9 100644
+--- a/drivers/hwmon/acpi_power_meter.c
++++ b/drivers/hwmon/acpi_power_meter.c
+@@ -681,8 +681,8 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
+
+ if (resource->caps.flags & POWER_METER_CAN_CAP) {
+ if (!can_cap_in_hardware()) {
+- dev_err(&resource->acpi_dev->dev,
+- "Ignoring unsafe software power cap!\n");
++ dev_warn(&resource->acpi_dev->dev,
++ "Ignoring unsafe software power cap!\n");
+ goto skip_unsafe_cap;
+ }
+
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index c77e89239dcd..5c1dddde193c 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -349,6 +349,7 @@ static const struct pci_device_id k10temp_id_table[] = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
+ { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
+ {}
+ };
+diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
+index f31413fd9521..800414886f6b 100644
+--- a/drivers/i2c/busses/i2c-riic.c
++++ b/drivers/i2c/busses/i2c-riic.c
+@@ -202,6 +202,7 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
+ if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) {
+ /* We got a NACKIE */
+ readb(riic->base + RIIC_ICDRR); /* dummy read */
++ riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2);
+ riic->err = -ENXIO;
+ } else if (riic->bytes_left) {
+ return IRQ_NONE;
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index 9b76a8fcdd24..bf539c34ccd3 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -352,7 +352,7 @@ static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
+
+ if (family == AF_INET) {
+ rt = container_of(dst, struct rtable, dst);
+- return rt->rt_gw_family == AF_INET;
++ return rt->rt_uses_gateway;
+ }
+
+ rt6 = container_of(dst, struct rt6_info, dst);
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index 63fe14c7c68f..df8e8ac2c16b 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -3477,7 +3477,8 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
+
+ err_copy:
+ ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
+-
++ /* It was released in ib_destroy_srq_user */
++ srq = NULL;
+ err_free:
+ kfree(srq);
+ err_put:
+diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
+index 4228393e6c4c..2a936456cf64 100644
+--- a/drivers/infiniband/hw/hfi1/mad.c
++++ b/drivers/infiniband/hw/hfi1/mad.c
+@@ -2326,7 +2326,7 @@ struct opa_port_status_req {
+ __be32 vl_select_mask;
+ };
+
+-#define VL_MASK_ALL 0x000080ff
++#define VL_MASK_ALL 0x00000000000080ffUL
+
+ struct opa_port_status_rsp {
+ __u8 port_num;
+@@ -2625,15 +2625,14 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
+ }
+
+ static void a0_portstatus(struct hfi1_pportdata *ppd,
+- struct opa_port_status_rsp *rsp, u32 vl_select_mask)
++ struct opa_port_status_rsp *rsp)
+ {
+ if (!is_bx(ppd->dd)) {
+ unsigned long vl;
+ u64 sum_vl_xmit_wait = 0;
+- u32 vl_all_mask = VL_MASK_ALL;
++ unsigned long vl_all_mask = VL_MASK_ALL;
+
+- for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
+- 8 * sizeof(vl_all_mask)) {
++ for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
+ u64 tmp = sum_vl_xmit_wait +
+ read_port_cntr(ppd, C_TX_WAIT_VL,
+ idx_from_vl(vl));
+@@ -2730,12 +2729,12 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
+ (struct opa_port_status_req *)pmp->data;
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ struct opa_port_status_rsp *rsp;
+- u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
++ unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask);
+ unsigned long vl;
+ size_t response_data_size;
+ u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
+ u8 port_num = req->port_num;
+- u8 num_vls = hweight32(vl_select_mask);
++ u8 num_vls = hweight64(vl_select_mask);
+ struct _vls_pctrs *vlinfo;
+ struct hfi1_ibport *ibp = to_iport(ibdev, port);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+@@ -2771,7 +2770,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
+
+ hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
+
+- rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
++ rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask);
+ rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
+ CNTR_INVALID_VL));
+ rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
+@@ -2842,8 +2841,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
+ * So in the for_each_set_bit() loop below, we don't need
+ * any additional checks for vl.
+ */
+- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+- 8 * sizeof(vl_select_mask)) {
++ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
+ memset(vlinfo, 0, sizeof(*vlinfo));
+
+ tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
+@@ -2884,7 +2882,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
+ vfi++;
+ }
+
+- a0_portstatus(ppd, rsp, vl_select_mask);
++ a0_portstatus(ppd, rsp);
+
+ if (resp_len)
+ *resp_len += response_data_size;
+@@ -2931,16 +2929,14 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
+ return error_counter_summary;
+ }
+
+-static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
+- u32 vl_select_mask)
++static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp)
+ {
+ if (!is_bx(ppd->dd)) {
+ unsigned long vl;
+ u64 sum_vl_xmit_wait = 0;
+- u32 vl_all_mask = VL_MASK_ALL;
++ unsigned long vl_all_mask = VL_MASK_ALL;
+
+- for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
+- 8 * sizeof(vl_all_mask)) {
++ for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
+ u64 tmp = sum_vl_xmit_wait +
+ read_port_cntr(ppd, C_TX_WAIT_VL,
+ idx_from_vl(vl));
+@@ -2995,7 +2991,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
+ u64 port_mask;
+ u8 port_num;
+ unsigned long vl;
+- u32 vl_select_mask;
++ unsigned long vl_select_mask;
+ int vfi;
+ u16 link_width;
+ u16 link_speed;
+@@ -3073,8 +3069,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
+ * So in the for_each_set_bit() loop below, we don't need
+ * any additional checks for vl.
+ */
+- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+- 8 * sizeof(req->vl_select_mask)) {
++ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
+ memset(vlinfo, 0, sizeof(*vlinfo));
+
+ rsp->vls[vfi].port_vl_xmit_data =
+@@ -3122,7 +3117,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
+ vfi++;
+ }
+
+- a0_datacounters(ppd, rsp, vl_select_mask);
++ a0_datacounters(ppd, rsp);
+
+ if (resp_len)
+ *resp_len += response_data_size;
+@@ -3217,7 +3212,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
+ struct _vls_ectrs *vlinfo;
+ unsigned long vl;
+ u64 port_mask, tmp;
+- u32 vl_select_mask;
++ unsigned long vl_select_mask;
+ int vfi;
+
+ req = (struct opa_port_error_counters64_msg *)pmp->data;
+@@ -3276,8 +3271,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
+ vlinfo = &rsp->vls[0];
+ vfi = 0;
+ vl_select_mask = be32_to_cpu(req->vl_select_mask);
+- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+- 8 * sizeof(req->vl_select_mask)) {
++ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
+ memset(vlinfo, 0, sizeof(*vlinfo));
+ rsp->vls[vfi].port_vl_xmit_discards =
+ cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
+@@ -3488,7 +3482,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
+ u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
+ u64 portn = be64_to_cpu(req->port_select_mask[3]);
+ u32 counter_select = be32_to_cpu(req->counter_select_mask);
+- u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
++ unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
+ unsigned long vl;
+
+ if ((nports != 1) || (portn != 1 << port)) {
+@@ -3582,8 +3576,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
+ if (counter_select & CS_UNCORRECTABLE_ERRORS)
+ write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
+
+- for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
+- 8 * sizeof(vl_select_mask)) {
++ for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
+ if (counter_select & CS_PORT_XMIT_DATA)
+ write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
+
+diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
+index 27f86b436b9e..7f9c23450579 100644
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -874,16 +874,17 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ else
+ pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
+
+- if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
+- pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
+ pbc = create_pbc(ppd,
+ pbc,
+ qp->srate_mbps,
+ vl,
+ plen);
+
+- /* Update HCRC based on packet opcode */
+- pbc = update_hcrc(ps->opcode, pbc);
++ if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
++ pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
++ else
++ /* Update HCRC based on packet opcode */
++ pbc = update_hcrc(ps->opcode, pbc);
+ }
+ tx->wqe = qp->s_wqe;
+ ret = build_verbs_tx_desc(tx->sde, len, tx, ahg_info, pbc);
+@@ -1030,12 +1031,12 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ else
+ pbc |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
+
++ pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
+ if (unlikely(hfi1_dbg_should_fault_tx(qp, ps->opcode)))
+ pbc = hfi1_fault_tx(qp, ps->opcode, pbc);
+- pbc = create_pbc(ppd, pbc, qp->srate_mbps, vl, plen);
+-
+- /* Update HCRC based on packet opcode */
+- pbc = update_hcrc(ps->opcode, pbc);
++ else
++ /* Update HCRC based on packet opcode */
++ pbc = update_hcrc(ps->opcode, pbc);
+ }
+ if (cb)
+ iowait_pio_inc(&priv->s_iowait);
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 9ab276a8bc81..ff6a2b1fa8b6 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -6829,6 +6829,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
+ mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
+ list_del(&mpi->list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
++ kfree(mpi);
+ return;
+ }
+
+diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
+index 8c71a15e986b..826a5bbe0d09 100644
+--- a/drivers/iommu/Makefile
++++ b/drivers/iommu/Makefile
+@@ -10,7 +10,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
+ obj-$(CONFIG_IOMMU_IOVA) += iova.o
+ obj-$(CONFIG_OF_IOMMU) += of_iommu.o
+ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
+-obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
++obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
+ obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
+ obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
+ obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 3e687f18b203..a0b64c43257a 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -2570,7 +2570,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
+
+ bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
+ phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
+- ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
++ ret = iommu_map_page(domain, bus_addr, phys_addr,
++ PAGE_SIZE, prot,
++ GFP_ATOMIC | __GFP_NOWARN);
+ if (ret)
+ goto out_unmap;
+
+diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h
+new file mode 100644
+index 000000000000..12d540d9b59b
+--- /dev/null
++++ b/drivers/iommu/amd_iommu.h
+@@ -0,0 +1,14 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++#ifndef AMD_IOMMU_H
++#define AMD_IOMMU_H
++
++int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line);
++
++#ifdef CONFIG_DMI
++void amd_iommu_apply_ivrs_quirks(void);
++#else
++static void amd_iommu_apply_ivrs_quirks(void) { }
++#endif
++
++#endif
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 07d84dbab564..6469f5128242 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -30,6 +30,7 @@
+ #include <asm/irq_remapping.h>
+
+ #include <linux/crash_dump.h>
++#include "amd_iommu.h"
+ #include "amd_iommu_proto.h"
+ #include "amd_iommu_types.h"
+ #include "irq_remapping.h"
+@@ -997,7 +998,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
+ set_iommu_for_device(iommu, devid);
+ }
+
+-static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
++int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
+ {
+ struct devid_map *entry;
+ struct list_head *list;
+@@ -1148,6 +1149,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
+ if (ret)
+ return ret;
+
++ amd_iommu_apply_ivrs_quirks();
++
+ /*
+ * First save the recommended feature enable bits from ACPI
+ */
+diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c
+new file mode 100644
+index 000000000000..c235f79b7a20
+--- /dev/null
++++ b/drivers/iommu/amd_iommu_quirks.c
+@@ -0,0 +1,92 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++
++/*
++ * Quirks for AMD IOMMU
++ *
++ * Copyright (C) 2019 Kai-Heng Feng <kai.heng.feng@canonical.com>
++ */
++
++#ifdef CONFIG_DMI
++#include <linux/dmi.h>
++
++#include "amd_iommu.h"
++
++#define IVHD_SPECIAL_IOAPIC 1
++
++struct ivrs_quirk_entry {
++ u8 id;
++ u16 devid;
++};
++
++enum {
++ DELL_INSPIRON_7375 = 0,
++ DELL_LATITUDE_5495,
++ LENOVO_IDEAPAD_330S_15ARR,
++};
++
++static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = {
++ /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */
++ [DELL_INSPIRON_7375] = {
++ { .id = 4, .devid = 0xa0 },
++ { .id = 5, .devid = 0x2 },
++ {}
++ },
++ /* ivrs_ioapic[4]=00:14.0 */
++ [DELL_LATITUDE_5495] = {
++ { .id = 4, .devid = 0xa0 },
++ {}
++ },
++ /* ivrs_ioapic[32]=00:14.0 */
++ [LENOVO_IDEAPAD_330S_15ARR] = {
++ { .id = 32, .devid = 0xa0 },
++ {}
++ },
++ {}
++};
++
++static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
++{
++ const struct ivrs_quirk_entry *i;
++
++ for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
++ add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
++
++ return 0;
++}
++
++static const struct dmi_system_id ivrs_quirks[] __initconst = {
++ {
++ .callback = ivrs_ioapic_quirk_cb,
++ .ident = "Dell Inspiron 7375",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"),
++ },
++ .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375],
++ },
++ {
++ .callback = ivrs_ioapic_quirk_cb,
++ .ident = "Dell Latitude 5495",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"),
++ },
++ .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
++ },
++ {
++ .callback = ivrs_ioapic_quirk_cb,
++ .ident = "Lenovo ideapad 330S-15ARR",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "81FB"),
++ },
++ .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR],
++ },
++ {}
++};
++
++void __init amd_iommu_apply_ivrs_quirks(void)
++{
++ dmi_check_system(ivrs_quirks);
++}
++#endif
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index 0fee8f7957ec..f04c13d52598 100644
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -2817,11 +2817,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
+ }
+
+ /* Boolean feature flags */
++#if 0 /* ATS invalidation is slow and broken */
+ if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
+ smmu->features |= ARM_SMMU_FEAT_PRI;
+
+ if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
+ smmu->features |= ARM_SMMU_FEAT_ATS;
++#endif
+
+ if (reg & IDR0_SEV)
+ smmu->features |= ARM_SMMU_FEAT_SEV;
+diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
+index 4160aa9f3f80..04a6cd90812c 100644
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -376,13 +376,13 @@ static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
+ {
+ struct set_msi_sid_data *data = opaque;
+
++ if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias))
++ data->busmatch_count++;
++
+ data->pdev = pdev;
+ data->alias = alias;
+ data->count++;
+
+- if (PCI_BUS_NUM(alias) == pdev->bus->number)
+- data->busmatch_count++;
+-
+ return 0;
+ }
+
+diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
+index 3e1a8a675572..41c605b0058f 100644
+--- a/drivers/iommu/iova.c
++++ b/drivers/iommu/iova.c
+@@ -577,7 +577,9 @@ void queue_iova(struct iova_domain *iovad,
+
+ spin_unlock_irqrestore(&fq->lock, flags);
+
+- if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
++ /* Avoid false sharing as much as possible. */
++ if (!atomic_read(&iovad->fq_timer_on) &&
++ !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1))
+ mod_timer(&iovad->fq_timer,
+ jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
+ }
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 20e5482d91b9..fca8b9002852 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -2641,14 +2641,13 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ struct its_node *its = its_dev->its;
+ int i;
+
++ bitmap_release_region(its_dev->event_map.lpi_map,
++ its_get_event_id(irq_domain_get_irq_data(domain, virq)),
++ get_count_order(nr_irqs));
++
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *data = irq_domain_get_irq_data(domain,
+ virq + i);
+- u32 event = its_get_event_id(data);
+-
+- /* Mark interrupt index as unused */
+- clear_bit(event, its_dev->event_map.lpi_map);
+-
+ /* Nuke the entry in the domain */
+ irq_domain_reset_irq_data(data);
+ }
+diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
+index cf755964f2f8..c72c036aea76 100644
+--- a/drivers/irqchip/irq-sifive-plic.c
++++ b/drivers/irqchip/irq-sifive-plic.c
+@@ -244,6 +244,7 @@ static int __init plic_init(struct device_node *node,
+ struct plic_handler *handler;
+ irq_hw_number_t hwirq;
+ int cpu, hartid;
++ u32 threshold = 0;
+
+ if (of_irq_parse_one(node, i, &parent)) {
+ pr_err("failed to parse parent for context %d.\n", i);
+@@ -266,10 +267,16 @@ static int __init plic_init(struct device_node *node,
+ continue;
+ }
+
++ /*
++ * When running in M-mode we need to ignore the S-mode handler.
++ * Here we assume it always comes later, but that might be a
++ * little fragile.
++ */
+ handler = per_cpu_ptr(&plic_handlers, cpu);
+ if (handler->present) {
+ pr_warn("handler already present for context %d.\n", i);
+- continue;
++ threshold = 0xffffffff;
++ goto done;
+ }
+
+ handler->present = true;
+@@ -279,8 +286,9 @@ static int __init plic_init(struct device_node *node,
+ handler->enable_base =
+ plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
+
++done:
+ /* priority must be > threshold to trigger an interrupt */
+- writel(0, handler->hart_base + CONTEXT_THRESHOLD);
++ writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
+ for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
+ plic_toggle(handler, hwirq, 0);
+ nr_handlers++;
+diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
+index c6ba37df4b9d..dff4132b3702 100644
+--- a/drivers/isdn/mISDN/socket.c
++++ b/drivers/isdn/mISDN/socket.c
+@@ -754,6 +754,8 @@ base_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
+
+ if (sock->type != SOCK_RAW)
+ return -ESOCKTNOSUPPORT;
++ if (!capable(CAP_NET_RAW))
++ return -EPERM;
+
+ sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
+ if (!sk)
+diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
+index 8d11a5e23227..eff1bda8b520 100644
+--- a/drivers/leds/led-triggers.c
++++ b/drivers/leds/led-triggers.c
+@@ -173,6 +173,7 @@ err_activate:
+ list_del(&led_cdev->trig_list);
+ write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
+ led_set_brightness(led_cdev, LED_OFF);
++ kfree(event);
+
+ return ret;
+ }
+diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
+index 180895b83b88..e55a64847fe2 100644
+--- a/drivers/leds/leds-lm3532.c
++++ b/drivers/leds/leds-lm3532.c
+@@ -40,7 +40,7 @@
+ #define LM3532_REG_ZN_3_LO 0x67
+ #define LM3532_REG_MAX 0x7e
+
+-/* Contorl Enable */
++/* Control Enable */
+ #define LM3532_CTRL_A_ENABLE BIT(0)
+ #define LM3532_CTRL_B_ENABLE BIT(1)
+ #define LM3532_CTRL_C_ENABLE BIT(2)
+@@ -302,7 +302,7 @@ static int lm3532_led_disable(struct lm3532_led *led_data)
+ int ret;
+
+ ret = regmap_update_bits(led_data->priv->regmap, LM3532_REG_ENABLE,
+- ctrl_en_val, ~ctrl_en_val);
++ ctrl_en_val, 0);
+ if (ret) {
+ dev_err(led_data->priv->dev, "Failed to set ctrl:%d\n", ret);
+ return ret;
+@@ -321,7 +321,7 @@ static int lm3532_brightness_set(struct led_classdev *led_cdev,
+
+ mutex_lock(&led->priv->lock);
+
+- if (led->mode == LM3532_BL_MODE_ALS) {
++ if (led->mode == LM3532_ALS_CTRL) {
+ if (brt_val > LED_OFF)
+ ret = lm3532_led_enable(led);
+ else
+@@ -542,11 +542,14 @@ static int lm3532_parse_node(struct lm3532_data *priv)
+ }
+
+ if (led->mode == LM3532_BL_MODE_ALS) {
++ led->mode = LM3532_ALS_CTRL;
+ ret = lm3532_parse_als(priv);
+ if (ret)
+ dev_err(&priv->client->dev, "Failed to parse als\n");
+ else
+ lm3532_als_configure(priv, led);
++ } else {
++ led->mode = LM3532_I2C_CTRL;
+ }
+
+ led->num_leds = fwnode_property_read_u32_array(child,
+@@ -590,7 +593,13 @@ static int lm3532_parse_node(struct lm3532_data *priv)
+ goto child_out;
+ }
+
+- lm3532_init_registers(led);
++ ret = lm3532_init_registers(led);
++ if (ret) {
++ dev_err(&priv->client->dev, "register init err: %d\n",
++ ret);
++ fwnode_handle_put(child);
++ goto child_out;
++ }
+
+ i++;
+ }
+diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
+index 37632fc63741..edb57c42e8b1 100644
+--- a/drivers/leds/leds-lp5562.c
++++ b/drivers/leds/leds-lp5562.c
+@@ -260,7 +260,11 @@ static void lp5562_firmware_loaded(struct lp55xx_chip *chip)
+ {
+ const struct firmware *fw = chip->fw;
+
+- if (fw->size > LP5562_PROGRAM_LENGTH) {
++ /*
++ * the firmware is encoded in ascii hex character, with 2 chars
++ * per byte
++ */
++ if (fw->size > (LP5562_PROGRAM_LENGTH * 2)) {
+ dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
+ fw->size);
+ return;
+diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
+index 73f5319295bc..c12cd809ab19 100644
+--- a/drivers/md/bcache/closure.c
++++ b/drivers/md/bcache/closure.c
+@@ -105,8 +105,14 @@ struct closure_syncer {
+
+ static void closure_sync_fn(struct closure *cl)
+ {
+- cl->s->done = 1;
+- wake_up_process(cl->s->task);
++ struct closure_syncer *s = cl->s;
++ struct task_struct *p;
++
++ rcu_read_lock();
++ p = READ_ONCE(s->task);
++ s->done = 1;
++ wake_up_process(p);
++ rcu_read_unlock();
+ }
+
+ void __sched __closure_sync(struct closure *cl)
+diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
+index 5f7063f05ae0..b41ecb451c78 100644
+--- a/drivers/md/dm-rq.c
++++ b/drivers/md/dm-rq.c
+@@ -408,6 +408,7 @@ static int map_request(struct dm_rq_target_io *tio)
+ ret = dm_dispatch_clone_request(clone, rq);
+ if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
+ blk_rq_unprep_clone(clone);
++ blk_mq_cleanup_rq(clone);
+ tio->ti->type->release_clone_rq(clone, &tio->info);
+ tio->clone = NULL;
+ return DM_MAPIO_REQUEUE;
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 9801d540fea1..c29002d8f337 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1754,8 +1754,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
+ if (!(le32_to_cpu(sb->feature_map) &
+ MD_FEATURE_RECOVERY_BITMAP))
+ rdev->saved_raid_disk = -1;
+- } else
+- set_bit(In_sync, &rdev->flags);
++ } else {
++ /*
++ * If the array is FROZEN, then the device can't
++ * be in_sync with rest of array.
++ */
++ if (!test_bit(MD_RECOVERY_FROZEN,
++ &mddev->recovery))
++ set_bit(In_sync, &rdev->flags);
++ }
+ rdev->raid_disk = role;
+ break;
+ }
+@@ -4098,7 +4105,7 @@ array_state_show(struct mddev *mddev, char *page)
+ {
+ enum array_state st = inactive;
+
+- if (mddev->pers)
++ if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags))
+ switch(mddev->ro) {
+ case 1:
+ st = readonly;
+@@ -5653,9 +5660,6 @@ int md_run(struct mddev *mddev)
+ md_update_sb(mddev, 0);
+
+ md_new_event(mddev);
+- sysfs_notify_dirent_safe(mddev->sysfs_state);
+- sysfs_notify_dirent_safe(mddev->sysfs_action);
+- sysfs_notify(&mddev->kobj, NULL, "degraded");
+ return 0;
+
+ abort:
+@@ -5669,6 +5673,7 @@ static int do_md_run(struct mddev *mddev)
+ {
+ int err;
+
++ set_bit(MD_NOT_READY, &mddev->flags);
+ err = md_run(mddev);
+ if (err)
+ goto out;
+@@ -5689,9 +5694,14 @@ static int do_md_run(struct mddev *mddev)
+
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
++ clear_bit(MD_NOT_READY, &mddev->flags);
+ mddev->changed = 1;
+ kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
++ sysfs_notify_dirent_safe(mddev->sysfs_state);
++ sysfs_notify_dirent_safe(mddev->sysfs_action);
++ sysfs_notify(&mddev->kobj, NULL, "degraded");
+ out:
++ clear_bit(MD_NOT_READY, &mddev->flags);
+ return err;
+ }
+
+@@ -8801,6 +8811,7 @@ void md_check_recovery(struct mddev *mddev)
+
+ if (mddev_trylock(mddev)) {
+ int spares = 0;
++ bool try_set_sync = mddev->safemode != 0;
+
+ if (!mddev->external && mddev->safemode == 1)
+ mddev->safemode = 0;
+@@ -8846,7 +8857,7 @@ void md_check_recovery(struct mddev *mddev)
+ }
+ }
+
+- if (!mddev->external && !mddev->in_sync) {
++ if (try_set_sync && !mddev->external && !mddev->in_sync) {
+ spin_lock(&mddev->lock);
+ set_in_sync(mddev);
+ spin_unlock(&mddev->lock);
+@@ -8944,7 +8955,8 @@ void md_reap_sync_thread(struct mddev *mddev)
+ /* resync has finished, collect result */
+ md_unregister_thread(&mddev->sync_thread);
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+- !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
++ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
++ mddev->degraded != mddev->raid_disks) {
+ /* success...*/
+ /* activate any spares */
+ if (mddev->pers->spare_active(mddev)) {
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index 7c930c091193..6ad1f93a8c17 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -236,6 +236,9 @@ enum mddev_flags {
+ MD_UPDATING_SB, /* md_check_recovery is updating the metadata
+ * without explicitly holding reconfig_mutex.
+ */
++ MD_NOT_READY, /* do_md_run() is active, so 'array_state'
++ * must not report that array is ready yet
++ */
+ };
+
+ enum mddev_sb_flags {
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index bf5cf184a260..297bbc0f41f0 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -19,6 +19,9 @@
+ #include "raid0.h"
+ #include "raid5.h"
+
++static int default_layout = 0;
++module_param(default_layout, int, 0644);
++
+ #define UNSUPPORTED_MDDEV_FLAGS \
+ ((1L << MD_HAS_JOURNAL) | \
+ (1L << MD_JOURNAL_CLEAN) | \
+@@ -139,6 +142,19 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
+ }
+ pr_debug("md/raid0:%s: FINAL %d zones\n",
+ mdname(mddev), conf->nr_strip_zones);
++
++ if (conf->nr_strip_zones == 1) {
++ conf->layout = RAID0_ORIG_LAYOUT;
++ } else if (default_layout == RAID0_ORIG_LAYOUT ||
++ default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
++ conf->layout = default_layout;
++ } else {
++ pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
++ mdname(mddev));
++ pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
++ err = -ENOTSUPP;
++ goto abort;
++ }
+ /*
+ * now since we have the hard sector sizes, we can make sure
+ * chunk size is a multiple of that sector size
+@@ -547,10 +563,12 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
+
+ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
+ {
++ struct r0conf *conf = mddev->private;
+ struct strip_zone *zone;
+ struct md_rdev *tmp_dev;
+ sector_t bio_sector;
+ sector_t sector;
++ sector_t orig_sector;
+ unsigned chunk_sects;
+ unsigned sectors;
+
+@@ -584,8 +602,21 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
+ bio = split;
+ }
+
++ orig_sector = sector;
+ zone = find_zone(mddev->private, §or);
+- tmp_dev = map_sector(mddev, zone, sector, §or);
++ switch (conf->layout) {
++ case RAID0_ORIG_LAYOUT:
++ tmp_dev = map_sector(mddev, zone, orig_sector, §or);
++ break;
++ case RAID0_ALT_MULTIZONE_LAYOUT:
++ tmp_dev = map_sector(mddev, zone, sector, §or);
++ break;
++ default:
++ WARN("md/raid0:%s: Invalid layout\n", mdname(mddev));
++ bio_io_error(bio);
++ return true;
++ }
++
+ bio_set_dev(bio, tmp_dev->bdev);
+ bio->bi_iter.bi_sector = sector + zone->dev_start +
+ tmp_dev->data_offset;
+diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
+index 540e65d92642..3816e5477db1 100644
+--- a/drivers/md/raid0.h
++++ b/drivers/md/raid0.h
+@@ -8,11 +8,25 @@ struct strip_zone {
+ int nb_dev; /* # of devices attached to the zone */
+ };
+
++/* Linux 3.14 (20d0189b101) made an unintended change to
++ * the RAID0 layout for multi-zone arrays (where devices aren't all
++ * the same size.
++ * RAID0_ORIG_LAYOUT restores the original layout
++ * RAID0_ALT_MULTIZONE_LAYOUT uses the altered layout
++ * The layouts are identical when there is only one zone (all
++ * devices the same size).
++ */
++
++enum r0layout {
++ RAID0_ORIG_LAYOUT = 1,
++ RAID0_ALT_MULTIZONE_LAYOUT = 2,
++};
+ struct r0conf {
+ struct strip_zone *strip_zone;
+ struct md_rdev **devlist; /* lists of rdevs, pointed to
+ * by strip_zone->dev */
+ int nr_strip_zones;
++ enum r0layout layout;
+ };
+
+ #endif
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 2aa36e570e04..f393f0dc042f 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -426,19 +426,21 @@ static void raid1_end_write_request(struct bio *bio)
+ /* We never try FailFast to WriteMostly devices */
+ !test_bit(WriteMostly, &rdev->flags)) {
+ md_error(r1_bio->mddev, rdev);
+- if (!test_bit(Faulty, &rdev->flags))
+- /* This is the only remaining device,
+- * We need to retry the write without
+- * FailFast
+- */
+- set_bit(R1BIO_WriteError, &r1_bio->state);
+- else {
+- /* Finished with this branch */
+- r1_bio->bios[mirror] = NULL;
+- to_put = bio;
+- }
+- } else
++ }
++
++ /*
++ * When the device is faulty, it is not necessary to
++ * handle write error.
++ * For failfast, this is the only remaining device,
++ * We need to retry the write without FailFast.
++ */
++ if (!test_bit(Faulty, &rdev->flags))
+ set_bit(R1BIO_WriteError, &r1_bio->state);
++ else {
++ /* Finished with this branch */
++ r1_bio->bios[mirror] = NULL;
++ to_put = bio;
++ }
+ } else {
+ /*
+ * Set R1BIO_Uptodate in our master bio, so that we
+@@ -3094,6 +3096,13 @@ static int raid1_run(struct mddev *mddev)
+ !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
+ test_bit(Faulty, &conf->mirrors[i].rdev->flags))
+ mddev->degraded++;
++ /*
++ * RAID1 needs at least one disk in active
++ */
++ if (conf->raid_disks - mddev->degraded < 1) {
++ ret = -EINVAL;
++ goto abort;
++ }
+
+ if (conf->raid_disks - mddev->degraded == 1)
+ mddev->recovery_cp = MaxSector;
+@@ -3127,8 +3136,12 @@ static int raid1_run(struct mddev *mddev)
+ ret = md_integrity_register(mddev);
+ if (ret) {
+ md_unregister_thread(&mddev->thread);
+- raid1_free(mddev, conf);
++ goto abort;
+ }
++ return 0;
++
++abort:
++ raid1_free(mddev, conf);
+ return ret;
+ }
+
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index da94cbaa1a9e..90380064afc7 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2526,7 +2526,8 @@ static void raid5_end_read_request(struct bio * bi)
+ int set_bad = 0;
+
+ clear_bit(R5_UPTODATE, &sh->dev[i].flags);
+- atomic_inc(&rdev->read_errors);
++ if (!(bi->bi_status == BLK_STS_PROTECTION))
++ atomic_inc(&rdev->read_errors);
+ if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
+ pr_warn_ratelimited(
+ "md/raid:%s: read error on replacement device (sector %llu on %s).\n",
+@@ -2558,7 +2559,9 @@ static void raid5_end_read_request(struct bio * bi)
+ && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+ retry = 1;
+ if (retry)
+- if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
++ if (sh->qd_idx >= 0 && sh->pd_idx == i)
++ set_bit(R5_ReadError, &sh->dev[i].flags);
++ else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
+ set_bit(R5_ReadError, &sh->dev[i].flags);
+ clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
+ } else
+@@ -5719,7 +5722,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
+ do_flush = false;
+ }
+
+- set_bit(STRIPE_HANDLE, &sh->state);
++ if (!sh->batch_head)
++ set_bit(STRIPE_HANDLE, &sh->state);
+ clear_bit(STRIPE_DELAYED, &sh->state);
+ if ((!sh->batch_head || sh == sh->batch_head) &&
+ (bi->bi_opf & REQ_SYNC) &&
+diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
+index 9598c7778871..c4aa27e0c430 100644
+--- a/drivers/media/cec/cec-notifier.c
++++ b/drivers/media/cec/cec-notifier.c
+@@ -124,6 +124,8 @@ void cec_notifier_unregister(struct cec_notifier *n)
+ {
+ mutex_lock(&n->lock);
+ n->callback = NULL;
++ n->cec_adap->notifier = NULL;
++ n->cec_adap = NULL;
+ mutex_unlock(&n->lock);
+ cec_notifier_put(n);
+ }
+diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
+index fb9ac7696fc6..bd9bfeee385f 100644
+--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
++++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
+@@ -872,17 +872,19 @@ EXPORT_SYMBOL_GPL(vb2_queue_release);
+ __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+ {
+ struct video_device *vfd = video_devdata(file);
+- __poll_t res = 0;
++ __poll_t res;
++
++ res = vb2_core_poll(q, file, wait);
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
+ struct v4l2_fh *fh = file->private_data;
+
+ poll_wait(file, &fh->wait, wait);
+ if (v4l2_event_pending(fh))
+- res = EPOLLPRI;
++ res |= EPOLLPRI;
+ }
+
+- return res | vb2_core_poll(q, file, wait);
++ return res;
+ }
+ EXPORT_SYMBOL_GPL(vb2_poll);
+
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index 6351a97f3d18..2dc7761a3680 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -152,6 +152,9 @@ static void dvb_frontend_free(struct kref *ref)
+
+ static void dvb_frontend_put(struct dvb_frontend *fe)
+ {
++ /* call detach before dropping the reference count */
++ if (fe->ops.detach)
++ fe->ops.detach(fe);
+ /*
+ * Check if the frontend was registered, as otherwise
+ * kref was not initialized yet.
+@@ -3026,7 +3029,6 @@ void dvb_frontend_detach(struct dvb_frontend *fe)
+ dvb_frontend_invoke_release(fe, fe->ops.release_sec);
+ dvb_frontend_invoke_release(fe, fe->ops.tuner_ops.release);
+ dvb_frontend_invoke_release(fe, fe->ops.analog_ops.release);
+- dvb_frontend_invoke_release(fe, fe->ops.detach);
+ dvb_frontend_put(fe);
+ }
+ EXPORT_SYMBOL(dvb_frontend_detach);
+diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
+index a3393cd4e584..7557fbf9d306 100644
+--- a/drivers/media/dvb-core/dvbdev.c
++++ b/drivers/media/dvb-core/dvbdev.c
+@@ -339,8 +339,10 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
+ if (npads) {
+ dvbdev->pads = kcalloc(npads, sizeof(*dvbdev->pads),
+ GFP_KERNEL);
+- if (!dvbdev->pads)
++ if (!dvbdev->pads) {
++ kfree(dvbdev->entity);
+ return -ENOMEM;
++ }
+ }
+
+ switch (type) {
+diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
+index ba0c49107bd2..d45b4ddc8f91 100644
+--- a/drivers/media/dvb-frontends/dvb-pll.c
++++ b/drivers/media/dvb-frontends/dvb-pll.c
+@@ -9,6 +9,7 @@
+
+ #include <linux/slab.h>
+ #include <linux/module.h>
++#include <linux/idr.h>
+ #include <linux/dvb/frontend.h>
+ #include <asm/types.h>
+
+@@ -34,8 +35,7 @@ struct dvb_pll_priv {
+ };
+
+ #define DVB_PLL_MAX 64
+-
+-static unsigned int dvb_pll_devcount;
++static DEFINE_IDA(pll_ida);
+
+ static int debug;
+ module_param(debug, int, 0644);
+@@ -787,6 +787,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
+ struct dvb_pll_priv *priv = NULL;
+ int ret;
+ const struct dvb_pll_desc *desc;
++ int nr;
+
+ b1 = kmalloc(1, GFP_KERNEL);
+ if (!b1)
+@@ -795,9 +796,14 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
+ b1[0] = 0;
+ msg.buf = b1;
+
+- if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
+- (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
+- pll_desc_id = id[dvb_pll_devcount];
++ nr = ida_simple_get(&pll_ida, 0, DVB_PLL_MAX, GFP_KERNEL);
++ if (nr < 0) {
++ kfree(b1);
++ return NULL;
++ }
++
++ if (id[nr] > DVB_PLL_UNDEFINED && id[nr] < ARRAY_SIZE(pll_list))
++ pll_desc_id = id[nr];
+
+ BUG_ON(pll_desc_id < 1 || pll_desc_id >= ARRAY_SIZE(pll_list));
+
+@@ -808,24 +814,20 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ ret = i2c_transfer (i2c, &msg, 1);
+- if (ret != 1) {
+- kfree(b1);
+- return NULL;
+- }
++ if (ret != 1)
++ goto out;
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+ }
+
+ priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
+- if (!priv) {
+- kfree(b1);
+- return NULL;
+- }
++ if (!priv)
++ goto out;
+
+ priv->pll_i2c_address = pll_addr;
+ priv->i2c = i2c;
+ priv->pll_desc = desc;
+- priv->nr = dvb_pll_devcount++;
++ priv->nr = nr;
+
+ memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops,
+ sizeof(struct dvb_tuner_ops));
+@@ -858,6 +860,11 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
+ kfree(b1);
+
+ return fe;
++out:
++ kfree(b1);
++ ida_simple_remove(&pll_ida, nr);
++
++ return NULL;
+ }
+ EXPORT_SYMBOL(dvb_pll_attach);
+
+@@ -894,9 +901,10 @@ dvb_pll_probe(struct i2c_client *client, const struct i2c_device_id *id)
+
+ static int dvb_pll_remove(struct i2c_client *client)
+ {
+- struct dvb_frontend *fe;
++ struct dvb_frontend *fe = i2c_get_clientdata(client);
++ struct dvb_pll_priv *priv = fe->tuner_priv;
+
+- fe = i2c_get_clientdata(client);
++ ida_simple_remove(&pll_ida, priv->nr);
+ dvb_pll_release(fe);
+ return 0;
+ }
+diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
+index 759d60c6d630..afe7920557a8 100644
+--- a/drivers/media/i2c/ov5640.c
++++ b/drivers/media/i2c/ov5640.c
+@@ -3022,9 +3022,14 @@ static int ov5640_probe(struct i2c_client *client,
+ /* request optional power down pin */
+ sensor->pwdn_gpio = devm_gpiod_get_optional(dev, "powerdown",
+ GPIOD_OUT_HIGH);
++ if (IS_ERR(sensor->pwdn_gpio))
++ return PTR_ERR(sensor->pwdn_gpio);
++
+ /* request optional reset pin */
+ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
++ if (IS_ERR(sensor->reset_gpio))
++ return PTR_ERR(sensor->reset_gpio);
+
+ v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops);
+
+diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
+index 124c8df04633..58972c884705 100644
+--- a/drivers/media/i2c/ov5645.c
++++ b/drivers/media/i2c/ov5645.c
+@@ -45,6 +45,8 @@
+ #define OV5645_CHIP_ID_HIGH_BYTE 0x56
+ #define OV5645_CHIP_ID_LOW 0x300b
+ #define OV5645_CHIP_ID_LOW_BYTE 0x45
++#define OV5645_IO_MIPI_CTRL00 0x300e
++#define OV5645_PAD_OUTPUT00 0x3019
+ #define OV5645_AWB_MANUAL_CONTROL 0x3406
+ #define OV5645_AWB_MANUAL_ENABLE BIT(0)
+ #define OV5645_AEC_PK_MANUAL 0x3503
+@@ -55,6 +57,7 @@
+ #define OV5645_ISP_VFLIP BIT(2)
+ #define OV5645_TIMING_TC_REG21 0x3821
+ #define OV5645_SENSOR_MIRROR BIT(1)
++#define OV5645_MIPI_CTRL00 0x4800
+ #define OV5645_PRE_ISP_TEST_SETTING_1 0x503d
+ #define OV5645_TEST_PATTERN_MASK 0x3
+ #define OV5645_SET_TEST_PATTERN(x) ((x) & OV5645_TEST_PATTERN_MASK)
+@@ -121,7 +124,6 @@ static const struct reg_value ov5645_global_init_setting[] = {
+ { 0x3503, 0x07 },
+ { 0x3002, 0x1c },
+ { 0x3006, 0xc3 },
+- { 0x300e, 0x45 },
+ { 0x3017, 0x00 },
+ { 0x3018, 0x00 },
+ { 0x302e, 0x0b },
+@@ -350,7 +352,10 @@ static const struct reg_value ov5645_global_init_setting[] = {
+ { 0x3a1f, 0x14 },
+ { 0x0601, 0x02 },
+ { 0x3008, 0x42 },
+- { 0x3008, 0x02 }
++ { 0x3008, 0x02 },
++ { OV5645_IO_MIPI_CTRL00, 0x40 },
++ { OV5645_MIPI_CTRL00, 0x24 },
++ { OV5645_PAD_OUTPUT00, 0x70 }
+ };
+
+ static const struct reg_value ov5645_setting_sxga[] = {
+@@ -737,13 +742,9 @@ static int ov5645_s_power(struct v4l2_subdev *sd, int on)
+ goto exit;
+ }
+
+- ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
+- OV5645_SYSTEM_CTRL0_STOP);
+- if (ret < 0) {
+- ov5645_set_power_off(ov5645);
+- goto exit;
+- }
++ usleep_range(500, 1000);
+ } else {
++ ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x58);
+ ov5645_set_power_off(ov5645);
+ }
+ }
+@@ -1049,11 +1050,20 @@ static int ov5645_s_stream(struct v4l2_subdev *subdev, int enable)
+ dev_err(ov5645->dev, "could not sync v4l2 controls\n");
+ return ret;
+ }
++
++ ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x45);
++ if (ret < 0)
++ return ret;
++
+ ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
+ OV5645_SYSTEM_CTRL0_START);
+ if (ret < 0)
+ return ret;
+ } else {
++ ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x40);
++ if (ret < 0)
++ return ret;
++
+ ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
+ OV5645_SYSTEM_CTRL0_STOP);
+ if (ret < 0)
+diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
+index 30ab2225fbd0..b350f5c1a989 100644
+--- a/drivers/media/i2c/ov9650.c
++++ b/drivers/media/i2c/ov9650.c
+@@ -703,6 +703,11 @@ static int ov965x_set_gain(struct ov965x *ov965x, int auto_gain)
+ for (m = 6; m >= 0; m--)
+ if (gain >= (1 << m) * 16)
+ break;
++
++ /* Sanity check: don't adjust the gain with a negative value */
++ if (m < 0)
++ return -EINVAL;
++
+ rgain = (gain - ((1 << m) * 16)) / (1 << m);
+ rgain |= (((1 << m) - 1) << 4);
+
+diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
+index a62ede096636..5e68182001ec 100644
+--- a/drivers/media/i2c/tda1997x.c
++++ b/drivers/media/i2c/tda1997x.c
+@@ -2691,7 +2691,13 @@ static int tda1997x_probe(struct i2c_client *client,
+ }
+
+ ret = 0x34 + ((io_read(sd, REG_SLAVE_ADDR)>>4) & 0x03);
+- state->client_cec = i2c_new_dummy(client->adapter, ret);
++ state->client_cec = devm_i2c_new_dummy_device(&client->dev,
++ client->adapter, ret);
++ if (IS_ERR(state->client_cec)) {
++ ret = PTR_ERR(state->client_cec);
++ goto err_free_mutex;
++ }
++
+ v4l_info(client, "CEC slave address 0x%02x\n", ret);
+
+ ret = tda1997x_core_init(sd);
+@@ -2798,7 +2804,6 @@ static int tda1997x_remove(struct i2c_client *client)
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(&state->hdl);
+ regulator_bulk_disable(TDA1997X_NUM_SUPPLIES, state->supplies);
+- i2c_unregister_device(state->client_cec);
+ cancel_delayed_work(&state->delayed_work_enable_hpd);
+ mutex_destroy(&state->page_lock);
+ mutex_destroy(&state->lock);
+diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
+index 493b1858815f..04e85765373e 100644
+--- a/drivers/media/pci/saa7134/saa7134-i2c.c
++++ b/drivers/media/pci/saa7134/saa7134-i2c.c
+@@ -342,7 +342,11 @@ static const struct i2c_client saa7134_client_template = {
+
+ /* ----------------------------------------------------------- */
+
+-/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
++/*
++ * On Medion 7134 reading the SAA7134 chip config EEPROM needs DVB-T
++ * demod i2c gate closed due to an address clash between this EEPROM
++ * and the demod one.
++ */
+ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
+ {
+ u8 subaddr = 0x7, dmdregval;
+@@ -359,14 +363,14 @@ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
+
+ ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
+ if ((ret == 2) && (dmdregval & 0x2)) {
+- pr_debug("%s: DVB-T demod i2c gate was left closed\n",
++ pr_debug("%s: DVB-T demod i2c gate was left open\n",
+ dev->name);
+
+ data[0] = subaddr;
+ data[1] = (dmdregval & ~0x2);
+ if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
+- pr_err("%s: EEPROM i2c gate open failure\n",
+- dev->name);
++ pr_err("%s: EEPROM i2c gate close failure\n",
++ dev->name);
+ }
+ }
+
+diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
+index dca20a3d98e2..f96226930670 100644
+--- a/drivers/media/pci/saa7146/hexium_gemini.c
++++ b/drivers/media/pci/saa7146/hexium_gemini.c
+@@ -292,6 +292,9 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
+ ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER);
+ if (ret < 0) {
+ pr_err("cannot register capture v4l2 device. skipping.\n");
++ saa7146_vv_release(dev);
++ i2c_del_adapter(&hexium->i2c_adapter);
++ kfree(hexium);
+ return ret;
+ }
+
+diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
+index de0f192afa8b..388c32a11345 100644
+--- a/drivers/media/platform/aspeed-video.c
++++ b/drivers/media/platform/aspeed-video.c
+@@ -632,7 +632,7 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
+ }
+
+ if (hsync_counter < 0 || vsync_counter < 0) {
+- u32 ctrl;
++ u32 ctrl = 0;
+
+ if (hsync_counter < 0) {
+ ctrl = VE_CTRL_HSYNC_POL;
+@@ -652,7 +652,8 @@ static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
+ V4L2_DV_VSYNC_POS_POL;
+ }
+
+- aspeed_video_update(video, VE_CTRL, 0, ctrl);
++ if (ctrl)
++ aspeed_video_update(video, VE_CTRL, 0, ctrl);
+ }
+ }
+
+diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
+index e043d55133a3..b7cc8e651e32 100644
+--- a/drivers/media/platform/exynos4-is/fimc-is.c
++++ b/drivers/media/platform/exynos4-is/fimc-is.c
+@@ -806,6 +806,7 @@ static int fimc_is_probe(struct platform_device *pdev)
+ return -ENODEV;
+
+ is->pmu_regs = of_iomap(node, 0);
++ of_node_put(node);
+ if (!is->pmu_regs)
+ return -ENOMEM;
+
+diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
+index 1b83a6ec745f..3cece7cd73e2 100644
+--- a/drivers/media/platform/exynos4-is/media-dev.c
++++ b/drivers/media/platform/exynos4-is/media-dev.c
+@@ -499,6 +499,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
+ continue;
+
+ ret = fimc_md_parse_port_node(fmd, port, index);
++ of_node_put(port);
+ if (ret < 0) {
+ of_node_put(node);
+ goto cleanup;
+@@ -538,6 +539,7 @@ static int __of_get_csis_id(struct device_node *np)
+ if (!np)
+ return -EINVAL;
+ of_property_read_u32(np, "reg", ®);
++ of_node_put(np);
+ return reg - FIMC_INPUT_MIPI_CSI2_0;
+ }
+
+diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
+index 691be788e38b..b74e4f50d7d9 100644
+--- a/drivers/media/platform/fsl-viu.c
++++ b/drivers/media/platform/fsl-viu.c
+@@ -32,7 +32,7 @@
+ #define VIU_VERSION "0.5.1"
+
+ /* Allow building this driver with COMPILE_TEST */
+-#ifndef CONFIG_PPC
++#if !defined(CONFIG_PPC) && !defined(CONFIG_MICROBLAZE)
+ #define out_be32(v, a) iowrite32be(a, (void __iomem *)v)
+ #define in_be32(a) ioread32be((void __iomem *)a)
+ #endif
+diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+index fc9faec85edb..5d44f2e92dd5 100644
+--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
++++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+@@ -110,7 +110,9 @@ static int mtk_mdp_probe(struct platform_device *pdev)
+ mutex_init(&mdp->vpulock);
+
+ /* Old dts had the components as child nodes */
+- if (of_get_next_child(dev->of_node, NULL)) {
++ node = of_get_next_child(dev->of_node, NULL);
++ if (node) {
++ of_node_put(node);
+ parent = dev->of_node;
+ dev_warn(dev, "device tree is out of date\n");
+ } else {
+diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
+index 38849f0ba09d..8a87f427c056 100644
+--- a/drivers/media/platform/omap3isp/isp.c
++++ b/drivers/media/platform/omap3isp/isp.c
+@@ -719,6 +719,10 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
+ s_stream, mode);
+ pipe->do_propagation = true;
+ }
++
++ /* Stop at the first external sub-device. */
++ if (subdev->dev != isp->dev)
++ break;
+ }
+
+ return 0;
+@@ -833,6 +837,10 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
+ &subdev->entity);
+ failure = -ETIMEDOUT;
+ }
++
++ /* Stop at the first external sub-device. */
++ if (subdev->dev != isp->dev)
++ break;
+ }
+
+ return failure;
+diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
+index 1ba8a5ba343f..e2f336c715a4 100644
+--- a/drivers/media/platform/omap3isp/ispccdc.c
++++ b/drivers/media/platform/omap3isp/ispccdc.c
+@@ -2602,6 +2602,7 @@ int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
+ int ret;
+
+ /* Register the subdev and video node. */
++ ccdc->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &ccdc->subdev);
+ if (ret < 0)
+ goto error;
+diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
+index efca45bb02c8..d0a49cdfd22d 100644
+--- a/drivers/media/platform/omap3isp/ispccp2.c
++++ b/drivers/media/platform/omap3isp/ispccp2.c
+@@ -1031,6 +1031,7 @@ int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
+ int ret;
+
+ /* Register the subdev and video nodes. */
++ ccp2->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &ccp2->subdev);
+ if (ret < 0)
+ goto error;
+diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
+index e85917f4a50c..fd493c5e4e24 100644
+--- a/drivers/media/platform/omap3isp/ispcsi2.c
++++ b/drivers/media/platform/omap3isp/ispcsi2.c
+@@ -1198,6 +1198,7 @@ int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
+ int ret;
+
+ /* Register the subdev and video nodes. */
++ csi2->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
+ if (ret < 0)
+ goto error;
+diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
+index 40e22400cf5e..97d660606d98 100644
+--- a/drivers/media/platform/omap3isp/isppreview.c
++++ b/drivers/media/platform/omap3isp/isppreview.c
+@@ -2225,6 +2225,7 @@ int omap3isp_preview_register_entities(struct isp_prev_device *prev,
+ int ret;
+
+ /* Register the subdev and video nodes. */
++ prev->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &prev->subdev);
+ if (ret < 0)
+ goto error;
+diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
+index 21ca6954df72..78d9dd7ea2da 100644
+--- a/drivers/media/platform/omap3isp/ispresizer.c
++++ b/drivers/media/platform/omap3isp/ispresizer.c
+@@ -1681,6 +1681,7 @@ int omap3isp_resizer_register_entities(struct isp_res_device *res,
+ int ret;
+
+ /* Register the subdev and video nodes. */
++ res->subdev.dev = vdev->mdev->dev;
+ ret = v4l2_device_register_subdev(vdev, &res->subdev);
+ if (ret < 0)
+ goto error;
+diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
+index ca7bb8497c3d..cb5841f628cf 100644
+--- a/drivers/media/platform/omap3isp/ispstat.c
++++ b/drivers/media/platform/omap3isp/ispstat.c
+@@ -1026,6 +1026,8 @@ void omap3isp_stat_unregister_entities(struct ispstat *stat)
+ int omap3isp_stat_register_entities(struct ispstat *stat,
+ struct v4l2_device *vdev)
+ {
++ stat->subdev.dev = vdev->mdev->dev;
++
+ return v4l2_device_register_subdev(vdev, &stat->subdev);
+ }
+
+diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
+index b8615a288e2b..6ca2cb15291f 100644
+--- a/drivers/media/platform/rcar_fdp1.c
++++ b/drivers/media/platform/rcar_fdp1.c
+@@ -2306,7 +2306,7 @@ static int fdp1_probe(struct platform_device *pdev)
+ fdp1->fcp = rcar_fcp_get(fcp_node);
+ of_node_put(fcp_node);
+ if (IS_ERR(fdp1->fcp)) {
+- dev_err(&pdev->dev, "FCP not found (%ld)\n",
++ dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
+ PTR_ERR(fdp1->fcp));
+ return PTR_ERR(fdp1->fcp);
+ }
+diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
+index f8006a30c12f..96d85cd8839f 100644
+--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
++++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
+@@ -232,8 +232,8 @@ static void *plane_vaddr(struct tpg_data *tpg, struct vivid_buffer *buf,
+ return vbuf;
+ }
+
+-static int vivid_copy_buffer(struct vivid_dev *dev, unsigned p, u8 *vcapbuf,
+- struct vivid_buffer *vid_cap_buf)
++static noinline_for_stack int vivid_copy_buffer(struct vivid_dev *dev, unsigned p,
++ u8 *vcapbuf, struct vivid_buffer *vid_cap_buf)
+ {
+ bool blank = dev->must_blank[vid_cap_buf->vb.vb2_buf.index];
+ struct tpg_data *tpg = &dev->tpg;
+@@ -658,6 +658,8 @@ static void vivid_cap_update_frame_period(struct vivid_dev *dev)
+ u64 f_period;
+
+ f_period = (u64)dev->timeperframe_vid_cap.numerator * 1000000000;
++ if (WARN_ON(dev->timeperframe_vid_cap.denominator == 0))
++ dev->timeperframe_vid_cap.denominator = 1;
+ do_div(f_period, dev->timeperframe_vid_cap.denominator);
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE)
+ f_period >>= 1;
+@@ -670,7 +672,8 @@ static void vivid_cap_update_frame_period(struct vivid_dev *dev)
+ dev->cap_frame_period = f_period;
+ }
+
+-static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
++static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev,
++ int dropped_bufs)
+ {
+ struct vivid_buffer *vid_cap_buf = NULL;
+ struct vivid_buffer *vbi_cap_buf = NULL;
+diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
+index 104b6f514536..d7b43037e500 100644
+--- a/drivers/media/platform/vsp1/vsp1_dl.c
++++ b/drivers/media/platform/vsp1/vsp1_dl.c
+@@ -557,8 +557,10 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
+
+ /* Get a default body for our list. */
+ dl->body0 = vsp1_dl_body_get(dlm->pool);
+- if (!dl->body0)
++ if (!dl->body0) {
++ kfree(dl);
+ return NULL;
++ }
+
+ header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
+
+diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
+index 58e622d57373..3dccce398113 100644
+--- a/drivers/media/radio/si470x/radio-si470x-usb.c
++++ b/drivers/media/radio/si470x/radio-si470x-usb.c
+@@ -734,7 +734,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
+ /* start radio */
+ retval = si470x_start_usb(radio);
+ if (retval < 0)
+- goto err_all;
++ goto err_buf;
+
+ /* set initial frequency */
+ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
+@@ -749,6 +749,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
+
+ return 0;
+ err_all:
++ usb_kill_urb(radio->int_in_urb);
++err_buf:
+ kfree(radio->buffer);
+ err_ctrl:
+ v4l2_ctrl_handler_free(&radio->hdl);
+@@ -822,6 +824,7 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
+ mutex_lock(&radio->lock);
+ v4l2_device_disconnect(&radio->v4l2_dev);
+ video_unregister_device(&radio->videodev);
++ usb_kill_urb(radio->int_in_urb);
+ usb_set_intfdata(intf, NULL);
+ mutex_unlock(&radio->lock);
+ v4l2_device_put(&radio->v4l2_dev);
+diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
+index ea05e125016a..872d6441e512 100644
+--- a/drivers/media/rc/iguanair.c
++++ b/drivers/media/rc/iguanair.c
+@@ -413,6 +413,10 @@ static int iguanair_probe(struct usb_interface *intf,
+ int ret, pipein, pipeout;
+ struct usb_host_interface *idesc;
+
++ idesc = intf->altsetting;
++ if (idesc->desc.bNumEndpoints < 2)
++ return -ENODEV;
++
+ ir = kzalloc(sizeof(*ir), GFP_KERNEL);
+ rc = rc_allocate_device(RC_DRIVER_IR_RAW);
+ if (!ir || !rc) {
+@@ -427,18 +431,13 @@ static int iguanair_probe(struct usb_interface *intf,
+ ir->urb_in = usb_alloc_urb(0, GFP_KERNEL);
+ ir->urb_out = usb_alloc_urb(0, GFP_KERNEL);
+
+- if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out) {
++ if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out ||
++ !usb_endpoint_is_int_in(&idesc->endpoint[0].desc) ||
++ !usb_endpoint_is_int_out(&idesc->endpoint[1].desc)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+- idesc = intf->altsetting;
+-
+- if (idesc->desc.bNumEndpoints < 2) {
+- ret = -ENODEV;
+- goto out;
+- }
+-
+ ir->rc = rc;
+ ir->dev = &intf->dev;
+ ir->udev = udev;
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 7bee72108b0e..37a850421fbb 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -1826,12 +1826,17 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
+ break;
+ /* iMON VFD, MCE IR */
+ case 0x46:
+- case 0x7e:
+ case 0x9e:
+ dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
+ detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ allowed_protos = RC_PROTO_BIT_RC6_MCE;
+ break;
++ /* iMON VFD, iMON or MCE IR */
++ case 0x7e:
++ dev_info(ictx->dev, "0xffdc iMON VFD, iMON or MCE IR");
++ detected_display_type = IMON_DISPLAY_TYPE_VFD;
++ allowed_protos |= RC_PROTO_BIT_RC6_MCE;
++ break;
+ /* iMON LCD, MCE IR */
+ case 0x9f:
+ dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
+diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
+index 72862e4bec62..a69e94092015 100644
+--- a/drivers/media/rc/mceusb.c
++++ b/drivers/media/rc/mceusb.c
+@@ -31,21 +31,22 @@
+ #include <linux/pm_wakeup.h>
+ #include <media/rc-core.h>
+
+-#define DRIVER_VERSION "1.94"
++#define DRIVER_VERSION "1.95"
+ #define DRIVER_AUTHOR "Jarod Wilson <jarod@redhat.com>"
+ #define DRIVER_DESC "Windows Media Center Ed. eHome Infrared Transceiver " \
+ "device driver"
+ #define DRIVER_NAME "mceusb"
+
++#define USB_TX_TIMEOUT 1000 /* in milliseconds */
+ #define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
+ #define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
+
+ /* MCE constants */
+-#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
++#define MCE_IRBUF_SIZE 128 /* TX IR buffer length */
+ #define MCE_TIME_UNIT 50 /* Approx 50us resolution */
+-#define MCE_CODE_LENGTH 5 /* Normal length of packet (with header) */
+-#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */
+-#define MCE_IRDATA_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */
++#define MCE_PACKET_SIZE 31 /* Max length of packet (with header) */
++#define MCE_IRDATA_HEADER (0x80 + MCE_PACKET_SIZE - 1)
++ /* Actual format is 0x80 + num_bytes */
+ #define MCE_IRDATA_TRAILER 0x80 /* End of IR data */
+ #define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */
+ #define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */
+@@ -607,9 +608,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
+ if (len <= skip)
+ return;
+
+- dev_dbg(dev, "%cx data: %*ph (length=%d)",
+- (out ? 't' : 'r'),
+- min(len, buf_len - offset), buf + offset, len);
++ dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
++ (out ? 't' : 'r'), offset,
++ min(len, buf_len - offset), buf + offset, len, buf_len);
+
+ inout = out ? "Request" : "Got";
+
+@@ -731,6 +732,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
+ case MCE_RSP_CMD_ILLEGAL:
+ dev_dbg(dev, "Illegal PORT_IR command");
+ break;
++ case MCE_RSP_TX_TIMEOUT:
++ dev_dbg(dev, "IR TX timeout (TX buffer underrun)");
++ break;
+ default:
+ dev_dbg(dev, "Unknown command 0x%02x 0x%02x",
+ cmd, subcmd);
+@@ -745,13 +749,14 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
+ dev_dbg(dev, "End of raw IR data");
+ else if ((cmd != MCE_CMD_PORT_IR) &&
+ ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
+- dev_dbg(dev, "Raw IR data, %d pulse/space samples", ir->rem);
++ dev_dbg(dev, "Raw IR data, %d pulse/space samples",
++ cmd & MCE_PACKET_LENGTH_MASK);
+ #endif
+ }
+
+ /*
+ * Schedule work that can't be done in interrupt handlers
+- * (mceusb_dev_recv() and mce_async_callback()) nor tasklets.
++ * (mceusb_dev_recv() and mce_write_callback()) nor tasklets.
+ * Invokes mceusb_deferred_kevent() for recovering from
+ * error events specified by the kevent bit field.
+ */
+@@ -764,23 +769,80 @@ static void mceusb_defer_kevent(struct mceusb_dev *ir, int kevent)
+ dev_dbg(ir->dev, "kevent %d scheduled", kevent);
+ }
+
+-static void mce_async_callback(struct urb *urb)
++static void mce_write_callback(struct urb *urb)
+ {
+- struct mceusb_dev *ir;
+- int len;
+-
+ if (!urb)
+ return;
+
+- ir = urb->context;
++ complete(urb->context);
++}
++
++/*
++ * Write (TX/send) data to MCE device USB endpoint out.
++ * Used for IR blaster TX and MCE device commands.
++ *
++ * Return: The number of bytes written (> 0) or errno (< 0).
++ */
++static int mce_write(struct mceusb_dev *ir, u8 *data, int size)
++{
++ int ret;
++ struct urb *urb;
++ struct device *dev = ir->dev;
++ unsigned char *buf_out;
++ struct completion tx_done;
++ unsigned long expire;
++ unsigned long ret_wait;
++
++ mceusb_dev_printdata(ir, data, size, 0, size, true);
++
++ urb = usb_alloc_urb(0, GFP_KERNEL);
++ if (unlikely(!urb)) {
++ dev_err(dev, "Error: mce write couldn't allocate urb");
++ return -ENOMEM;
++ }
++
++ buf_out = kmalloc(size, GFP_KERNEL);
++ if (!buf_out) {
++ usb_free_urb(urb);
++ return -ENOMEM;
++ }
++
++ init_completion(&tx_done);
++
++ /* outbound data */
++ if (usb_endpoint_xfer_int(ir->usb_ep_out))
++ usb_fill_int_urb(urb, ir->usbdev, ir->pipe_out,
++ buf_out, size, mce_write_callback, &tx_done,
++ ir->usb_ep_out->bInterval);
++ else
++ usb_fill_bulk_urb(urb, ir->usbdev, ir->pipe_out,
++ buf_out, size, mce_write_callback, &tx_done);
++ memcpy(buf_out, data, size);
++
++ ret = usb_submit_urb(urb, GFP_KERNEL);
++ if (ret) {
++ dev_err(dev, "Error: mce write submit urb error = %d", ret);
++ kfree(buf_out);
++ usb_free_urb(urb);
++ return ret;
++ }
++
++ expire = msecs_to_jiffies(USB_TX_TIMEOUT);
++ ret_wait = wait_for_completion_timeout(&tx_done, expire);
++ if (!ret_wait) {
++ dev_err(dev, "Error: mce write timed out (expire = %lu (%dms))",
++ expire, USB_TX_TIMEOUT);
++ usb_kill_urb(urb);
++ ret = (urb->status == -ENOENT ? -ETIMEDOUT : urb->status);
++ } else {
++ ret = urb->status;
++ }
++ if (ret >= 0)
++ ret = urb->actual_length; /* bytes written */
+
+ switch (urb->status) {
+ /* success */
+ case 0:
+- len = urb->actual_length;
+-
+- mceusb_dev_printdata(ir, urb->transfer_buffer, len,
+- 0, len, true);
+ break;
+
+ case -ECONNRESET:
+@@ -790,140 +852,135 @@ static void mce_async_callback(struct urb *urb)
+ break;
+
+ case -EPIPE:
+- dev_err(ir->dev, "Error: request urb status = %d (TX HALT)",
++ dev_err(ir->dev, "Error: mce write urb status = %d (TX HALT)",
+ urb->status);
+ mceusb_defer_kevent(ir, EVENT_TX_HALT);
+ break;
+
+ default:
+- dev_err(ir->dev, "Error: request urb status = %d", urb->status);
++ dev_err(ir->dev, "Error: mce write urb status = %d",
++ urb->status);
+ break;
+ }
+
+- /* the transfer buffer and urb were allocated in mce_request_packet */
+- kfree(urb->transfer_buffer);
+- usb_free_urb(urb);
+-}
+-
+-/* request outgoing (send) usb packet - used to initialize remote */
+-static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
+- int size)
+-{
+- int res;
+- struct urb *async_urb;
+- struct device *dev = ir->dev;
+- unsigned char *async_buf;
++ dev_dbg(dev, "tx done status = %d (wait = %lu, expire = %lu (%dms), urb->actual_length = %d, urb->status = %d)",
++ ret, ret_wait, expire, USB_TX_TIMEOUT,
++ urb->actual_length, urb->status);
+
+- async_urb = usb_alloc_urb(0, GFP_KERNEL);
+- if (unlikely(!async_urb)) {
+- dev_err(dev, "Error, couldn't allocate urb!");
+- return;
+- }
+-
+- async_buf = kmalloc(size, GFP_KERNEL);
+- if (!async_buf) {
+- usb_free_urb(async_urb);
+- return;
+- }
+-
+- /* outbound data */
+- if (usb_endpoint_xfer_int(ir->usb_ep_out))
+- usb_fill_int_urb(async_urb, ir->usbdev, ir->pipe_out,
+- async_buf, size, mce_async_callback, ir,
+- ir->usb_ep_out->bInterval);
+- else
+- usb_fill_bulk_urb(async_urb, ir->usbdev, ir->pipe_out,
+- async_buf, size, mce_async_callback, ir);
+-
+- memcpy(async_buf, data, size);
+-
+- dev_dbg(dev, "send request called (size=%#x)", size);
++ kfree(buf_out);
++ usb_free_urb(urb);
+
+- res = usb_submit_urb(async_urb, GFP_ATOMIC);
+- if (res) {
+- dev_err(dev, "send request FAILED! (res=%d)", res);
+- kfree(async_buf);
+- usb_free_urb(async_urb);
+- return;
+- }
+- dev_dbg(dev, "send request complete (res=%d)", res);
++ return ret;
+ }
+
+-static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
++static void mce_command_out(struct mceusb_dev *ir, u8 *data, int size)
+ {
+ int rsize = sizeof(DEVICE_RESUME);
+
+ if (ir->need_reset) {
+ ir->need_reset = false;
+- mce_request_packet(ir, DEVICE_RESUME, rsize);
++ mce_write(ir, DEVICE_RESUME, rsize);
+ msleep(10);
+ }
+
+- mce_request_packet(ir, data, size);
++ mce_write(ir, data, size);
+ msleep(10);
+ }
+
+-/* Send data out the IR blaster port(s) */
++/*
++ * Transmit IR out the MCE device IR blaster port(s).
++ *
++ * Convert IR pulse/space sequence from LIRC to MCE format.
++ * Break up a long IR sequence into multiple parts (MCE IR data packets).
++ *
++ * u32 txbuf[] consists of IR pulse, space, ..., and pulse times in usec.
++ * Pulses and spaces are implicit by their position.
++ * The first IR sample, txbuf[0], is always a pulse.
++ *
++ * u8 irbuf[] consists of multiple IR data packets for the MCE device.
++ * A packet is 1 u8 MCE_IRDATA_HEADER and up to 30 u8 IR samples.
++ * An IR sample is 1-bit pulse/space flag with 7-bit time
++ * in MCE time units (50usec).
++ *
++ * Return: The number of IR samples sent (> 0) or errno (< 0).
++ */
+ static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
+ {
+ struct mceusb_dev *ir = dev->priv;
+- int i, length, ret = 0;
+- int cmdcount = 0;
+- unsigned char cmdbuf[MCE_CMDBUF_SIZE];
+-
+- /* MCE tx init header */
+- cmdbuf[cmdcount++] = MCE_CMD_PORT_IR;
+- cmdbuf[cmdcount++] = MCE_CMD_SETIRTXPORTS;
+- cmdbuf[cmdcount++] = ir->tx_mask;
++ u8 cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00 };
++ u8 irbuf[MCE_IRBUF_SIZE];
++ int ircount = 0;
++ unsigned int irsample;
++ int i, length, ret;
+
+ /* Send the set TX ports command */
+- mce_async_out(ir, cmdbuf, cmdcount);
+- cmdcount = 0;
+-
+- /* Generate mce packet data */
+- for (i = 0; (i < count) && (cmdcount < MCE_CMDBUF_SIZE); i++) {
+- txbuf[i] = txbuf[i] / MCE_TIME_UNIT;
+-
+- do { /* loop to support long pulses/spaces > 127*50us=6.35ms */
+-
+- /* Insert mce packet header every 4th entry */
+- if ((cmdcount < MCE_CMDBUF_SIZE) &&
+- (cmdcount % MCE_CODE_LENGTH) == 0)
+- cmdbuf[cmdcount++] = MCE_IRDATA_HEADER;
+-
+- /* Insert mce packet data */
+- if (cmdcount < MCE_CMDBUF_SIZE)
+- cmdbuf[cmdcount++] =
+- (txbuf[i] < MCE_PULSE_BIT ?
+- txbuf[i] : MCE_MAX_PULSE_LENGTH) |
+- (i & 1 ? 0x00 : MCE_PULSE_BIT);
+- else {
+- ret = -EINVAL;
+- goto out;
++ cmdbuf[2] = ir->tx_mask;
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
++
++ /* Generate mce IR data packet */
++ for (i = 0; i < count; i++) {
++ irsample = txbuf[i] / MCE_TIME_UNIT;
++
++ /* loop to support long pulses/spaces > 6350us (127*50us) */
++ while (irsample > 0) {
++ /* Insert IR header every 30th entry */
++ if (ircount % MCE_PACKET_SIZE == 0) {
++ /* Room for IR header and one IR sample? */
++ if (ircount >= MCE_IRBUF_SIZE - 1) {
++ /* Send near full buffer */
++ ret = mce_write(ir, irbuf, ircount);
++ if (ret < 0)
++ return ret;
++ ircount = 0;
++ }
++ irbuf[ircount++] = MCE_IRDATA_HEADER;
+ }
+
+- } while ((txbuf[i] > MCE_MAX_PULSE_LENGTH) &&
+- (txbuf[i] -= MCE_MAX_PULSE_LENGTH));
+- }
+-
+- /* Check if we have room for the empty packet at the end */
+- if (cmdcount >= MCE_CMDBUF_SIZE) {
+- ret = -EINVAL;
+- goto out;
+- }
++ /* Insert IR sample */
++ if (irsample <= MCE_MAX_PULSE_LENGTH) {
++ irbuf[ircount] = irsample;
++ irsample = 0;
++ } else {
++ irbuf[ircount] = MCE_MAX_PULSE_LENGTH;
++ irsample -= MCE_MAX_PULSE_LENGTH;
++ }
++ /*
++ * Even i = IR pulse
++ * Odd i = IR space
++ */
++ irbuf[ircount] |= (i & 1 ? 0 : MCE_PULSE_BIT);
++ ircount++;
++
++ /* IR buffer full? */
++ if (ircount >= MCE_IRBUF_SIZE) {
++ /* Fix packet length in last header */
++ length = ircount % MCE_PACKET_SIZE;
++ if (length > 0)
++ irbuf[ircount - length] -=
++ MCE_PACKET_SIZE - length;
++ /* Send full buffer */
++ ret = mce_write(ir, irbuf, ircount);
++ if (ret < 0)
++ return ret;
++ ircount = 0;
++ }
++ }
++ } /* after for loop, 0 <= ircount < MCE_IRBUF_SIZE */
+
+ /* Fix packet length in last header */
+- length = cmdcount % MCE_CODE_LENGTH;
+- cmdbuf[cmdcount - length] -= MCE_CODE_LENGTH - length;
++ length = ircount % MCE_PACKET_SIZE;
++ if (length > 0)
++ irbuf[ircount - length] -= MCE_PACKET_SIZE - length;
+
+- /* All mce commands end with an empty packet (0x80) */
+- cmdbuf[cmdcount++] = MCE_IRDATA_TRAILER;
++ /* Append IR trailer (0x80) to final partial (or empty) IR buffer */
++ irbuf[ircount++] = MCE_IRDATA_TRAILER;
+
+- /* Transmit the command to the mce device */
+- mce_async_out(ir, cmdbuf, cmdcount);
++ /* Send final buffer */
++ ret = mce_write(ir, irbuf, ircount);
++ if (ret < 0)
++ return ret;
+
+-out:
+- return ret ? ret : count;
++ return count;
+ }
+
+ /* Sets active IR outputs -- mce devices typically have two */
+@@ -963,7 +1020,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
+ cmdbuf[2] = MCE_CMD_SIG_END;
+ cmdbuf[3] = MCE_IRDATA_TRAILER;
+ dev_dbg(ir->dev, "disabling carrier modulation");
+- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+ return 0;
+ }
+
+@@ -977,7 +1034,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
+ carrier);
+
+ /* Transmit new carrier to mce device */
+- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+ return 0;
+ }
+ }
+@@ -1000,10 +1057,10 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout)
+ cmdbuf[2] = units >> 8;
+ cmdbuf[3] = units;
+
+- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+
+ /* get receiver timeout value */
+- mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
++ mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
+
+ return 0;
+ }
+@@ -1028,7 +1085,7 @@ static int mceusb_set_rx_wideband(struct rc_dev *dev, int enable)
+ ir->wideband_rx_enabled = false;
+ cmdbuf[2] = 1; /* port 1 is long range receiver */
+ }
+- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+ /* response from device sets ir->learning_active */
+
+ return 0;
+@@ -1051,7 +1108,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
+ ir->carrier_report_enabled = true;
+ if (!ir->learning_active) {
+ cmdbuf[2] = 2; /* port 2 is short range receiver */
+- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+ }
+ } else {
+ ir->carrier_report_enabled = false;
+@@ -1062,7 +1119,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
+ */
+ if (ir->learning_active && !ir->wideband_rx_enabled) {
+ cmdbuf[2] = 1; /* port 1 is long range receiver */
+- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+ }
+ }
+
+@@ -1141,6 +1198,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
+ }
+ break;
+ case MCE_RSP_CMD_ILLEGAL:
++ case MCE_RSP_TX_TIMEOUT:
+ ir->need_reset = true;
+ break;
+ default:
+@@ -1279,7 +1337,7 @@ static void mceusb_get_emulator_version(struct mceusb_dev *ir)
+ {
+ /* If we get no reply or an illegal command reply, its ver 1, says MS */
+ ir->emver = 1;
+- mce_async_out(ir, GET_EMVER, sizeof(GET_EMVER));
++ mce_command_out(ir, GET_EMVER, sizeof(GET_EMVER));
+ }
+
+ static void mceusb_gen1_init(struct mceusb_dev *ir)
+@@ -1325,10 +1383,10 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
+ dev_dbg(dev, "set handshake - retC = %d", ret);
+
+ /* device resume */
+- mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
++ mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
+
+ /* get hw/sw revision? */
+- mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
++ mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION));
+
+ kfree(data);
+ }
+@@ -1336,13 +1394,13 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
+ static void mceusb_gen2_init(struct mceusb_dev *ir)
+ {
+ /* device resume */
+- mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
++ mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
+
+ /* get wake version (protocol, key, address) */
+- mce_async_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
++ mce_command_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
+
+ /* unknown what this one actually returns... */
+- mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
++ mce_command_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
+ }
+
+ static void mceusb_get_parameters(struct mceusb_dev *ir)
+@@ -1356,24 +1414,24 @@ static void mceusb_get_parameters(struct mceusb_dev *ir)
+ ir->num_rxports = 2;
+
+ /* get number of tx and rx ports */
+- mce_async_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
++ mce_command_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
+
+ /* get the carrier and frequency */
+- mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
++ mce_command_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
+
+ if (ir->num_txports && !ir->flags.no_tx)
+ /* get the transmitter bitmask */
+- mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
++ mce_command_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
+
+ /* get receiver timeout value */
+- mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
++ mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
+
+ /* get receiver sensor setting */
+- mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
++ mce_command_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
+
+ for (i = 0; i < ir->num_txports; i++) {
+ cmdbuf[2] = i;
+- mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
++ mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
+ }
+ }
+
+@@ -1382,7 +1440,7 @@ static void mceusb_flash_led(struct mceusb_dev *ir)
+ if (ir->emver < 2)
+ return;
+
+- mce_async_out(ir, FLASH_LED, sizeof(FLASH_LED));
++ mce_command_out(ir, FLASH_LED, sizeof(FLASH_LED));
+ }
+
+ /*
+diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
+index 46101efe017b..da457bf85130 100644
+--- a/drivers/media/rc/mtk-cir.c
++++ b/drivers/media/rc/mtk-cir.c
+@@ -35,6 +35,11 @@
+ /* Fields containing pulse width data */
+ #define MTK_WIDTH_MASK (GENMASK(7, 0))
+
++/* IR threshold */
++#define MTK_IRTHD 0x14
++#define MTK_DG_CNT_MASK (GENMASK(12, 8))
++#define MTK_DG_CNT(x) ((x) << 8)
++
+ /* Bit to enable interrupt */
+ #define MTK_IRINT_EN BIT(0)
+
+@@ -400,6 +405,9 @@ static int mtk_ir_probe(struct platform_device *pdev)
+ mtk_w32_mask(ir, val, ir->data->fields[MTK_HW_PERIOD].mask,
+ ir->data->fields[MTK_HW_PERIOD].reg);
+
++ /* Set de-glitch counter */
++ mtk_w32_mask(ir, MTK_DG_CNT(1), MTK_DG_CNT_MASK, MTK_IRTHD);
++
+ /* Enable IR and PWM */
+ val = mtk_r32(ir, MTK_CONFIG_HIGH_REG);
+ val |= MTK_OK_COUNT(ir->data->ok_count) | MTK_PWM_EN | MTK_IR_EN;
+diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
+index 17468f7d78ed..3ab80a7b4498 100644
+--- a/drivers/media/usb/cpia2/cpia2_usb.c
++++ b/drivers/media/usb/cpia2/cpia2_usb.c
+@@ -676,6 +676,10 @@ static int submit_urbs(struct camera_data *cam)
+ if (!urb) {
+ for (j = 0; j < i; j++)
+ usb_free_urb(cam->sbuf[j].urb);
++ for (j = 0; j < NUM_SBUF; j++) {
++ kfree(cam->sbuf[j].data);
++ cam->sbuf[j].data = NULL;
++ }
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
+index 66d685065e06..ab7a100ec84f 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
++++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
+@@ -2439,9 +2439,13 @@ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
+ 8, 0x0486,
+ };
+
++ if (!IS_ENABLED(CONFIG_DVB_DIB9000))
++ return -ENODEV;
+ if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &dib9090_dib0090_config) == NULL)
+ return -ENODEV;
+ i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
++ if (!i2c)
++ return -ENODEV;
+ if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0)
+ return -ENODEV;
+ dib0700_set_i2c_speed(adap->dev, 1500);
+@@ -2517,10 +2521,14 @@ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
+ 0, 0x00ef,
+ 8, 0x0406,
+ };
++ if (!IS_ENABLED(CONFIG_DVB_DIB9000))
++ return -ENODEV;
+ i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe);
+ if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &nim9090md_dib0090_config[0]) == NULL)
+ return -ENODEV;
+ i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
++ if (!i2c)
++ return -ENODEV;
+ if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0)
+ return -ENODEV;
+
+diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
+index d6b36e4f33d2..441d878fc22c 100644
+--- a/drivers/media/usb/dvb-usb/pctv452e.c
++++ b/drivers/media/usb/dvb-usb/pctv452e.c
+@@ -909,14 +909,6 @@ static int pctv452e_frontend_attach(struct dvb_usb_adapter *a)
+ &a->dev->i2c_adap);
+ if (!a->fe_adap[0].fe)
+ return -ENODEV;
+-
+- /*
+- * dvb_frontend will call dvb_detach for both stb0899_detach
+- * and stb0899_release but we only do dvb_attach(stb0899_attach).
+- * Increment the module refcount instead.
+- */
+- symbol_get(stb0899_attach);
+-
+ if ((dvb_attach(lnbp22_attach, a->fe_adap[0].fe,
+ &a->dev->i2c_adap)) == NULL)
+ err("Cannot attach lnbp22\n");
+diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
+index 1283c7ca9ad5..1de835a591a0 100644
+--- a/drivers/media/usb/em28xx/em28xx-cards.c
++++ b/drivers/media/usb/em28xx/em28xx-cards.c
+@@ -4020,7 +4020,6 @@ static void em28xx_usb_disconnect(struct usb_interface *intf)
+ dev->dev_next->disconnected = 1;
+ dev_info(&dev->intf->dev, "Disconnecting %s\n",
+ dev->dev_next->name);
+- flush_request_modules(dev->dev_next);
+ }
+
+ dev->disconnected = 1;
+diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
+index d8e40137a204..53db9a2895ea 100644
+--- a/drivers/media/usb/gspca/konica.c
++++ b/drivers/media/usb/gspca/konica.c
+@@ -114,6 +114,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index)
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, 2);
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c
+index 59649704beba..880f569bda30 100644
+--- a/drivers/media/usb/gspca/nw80x.c
++++ b/drivers/media/usb/gspca/nw80x.c
+@@ -1572,6 +1572,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
+ return;
+ }
+ if (len == 1)
+diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
+index cfb1f53bc17e..f417dfc0b872 100644
+--- a/drivers/media/usb/gspca/ov519.c
++++ b/drivers/media/usb/gspca/ov519.c
+@@ -2073,6 +2073,11 @@ static int reg_r(struct sd *sd, u16 index)
+ } else {
+ gspca_err(gspca_dev, "reg_r %02x failed %d\n", index, ret);
+ sd->gspca_dev.usb_err = ret;
++ /*
++ * Make sure the result is zeroed to avoid uninitialized
++ * values.
++ */
++ gspca_dev->usb_buf[0] = 0;
+ }
+
+ return ret;
+@@ -2101,6 +2106,11 @@ static int reg_r8(struct sd *sd,
+ } else {
+ gspca_err(gspca_dev, "reg_r8 %02x failed %d\n", index, ret);
+ sd->gspca_dev.usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, 8);
+ }
+
+ return ret;
+diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
+index 56521c991db4..185c1f10fb30 100644
+--- a/drivers/media/usb/gspca/ov534.c
++++ b/drivers/media/usb/gspca/ov534.c
+@@ -693,6 +693,11 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
+ if (ret < 0) {
+ pr_err("read failed %d\n", ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the result is zeroed to avoid uninitialized
++ * values.
++ */
++ gspca_dev->usb_buf[0] = 0;
+ }
+ return gspca_dev->usb_buf[0];
+ }
+diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c
+index 867f860a9650..91efc650cf76 100644
+--- a/drivers/media/usb/gspca/ov534_9.c
++++ b/drivers/media/usb/gspca/ov534_9.c
+@@ -1145,6 +1145,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg)
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
++ return 0;
+ }
+ return gspca_dev->usb_buf[0];
+ }
+diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c
+index 061deee138c3..e087cfb5980b 100644
+--- a/drivers/media/usb/gspca/se401.c
++++ b/drivers/media/usb/gspca/se401.c
+@@ -101,6 +101,11 @@ static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent)
+ pr_err("read req failed req %#04x error %d\n",
+ req, err);
+ gspca_dev->usb_err = err;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, READ_REQ_SIZE);
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
+index b43f89fee6c1..2a6d0a1265a7 100644
+--- a/drivers/media/usb/gspca/sn9c20x.c
++++ b/drivers/media/usb/gspca/sn9c20x.c
+@@ -123,6 +123,13 @@ static const struct dmi_system_id flip_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0341")
+ }
+ },
++ {
++ .ident = "MSI MS-1039",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "MS-1039"),
++ }
++ },
+ {
+ .ident = "MSI MS-1632",
+ .matches = {
+@@ -909,6 +916,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length)
+ if (unlikely(result < 0 || result != length)) {
+ pr_err("Read register %02x failed %d\n", reg, result);
+ gspca_dev->usb_err = result;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
+index 046fc2c2a135..4d655e2da9cb 100644
+--- a/drivers/media/usb/gspca/sonixb.c
++++ b/drivers/media/usb/gspca/sonixb.c
+@@ -453,6 +453,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
+ dev_err(gspca_dev->v4l2_dev.dev,
+ "Error reading register %02x: %d\n", value, res);
+ gspca_dev->usb_err = res;
++ /*
++ * Make sure the result is zeroed to avoid uninitialized
++ * values.
++ */
++ gspca_dev->usb_buf[0] = 0;
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
+index 50a6c8425827..2e1bd2df8304 100644
+--- a/drivers/media/usb/gspca/sonixj.c
++++ b/drivers/media/usb/gspca/sonixj.c
+@@ -1162,6 +1162,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c
+index 2ae03b60163f..ccc477944ef8 100644
+--- a/drivers/media/usb/gspca/spca1528.c
++++ b/drivers/media/usb/gspca/spca1528.c
+@@ -71,6 +71,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c
+index d1ba0888d798..c3610247a90e 100644
+--- a/drivers/media/usb/gspca/sq930x.c
++++ b/drivers/media/usb/gspca/sq930x.c
+@@ -425,6 +425,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
+ if (ret < 0) {
+ pr_err("reg_r %04x failed %d\n", value, ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
+index d0ddfa957ca9..f4a4222f0d2e 100644
+--- a/drivers/media/usb/gspca/sunplus.c
++++ b/drivers/media/usb/gspca/sunplus.c
+@@ -255,6 +255,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
+ }
+ }
+
+diff --git a/drivers/media/usb/gspca/vc032x.c b/drivers/media/usb/gspca/vc032x.c
+index 588a847ea483..4cb7c92ea132 100644
+--- a/drivers/media/usb/gspca/vc032x.c
++++ b/drivers/media/usb/gspca/vc032x.c
+@@ -2906,6 +2906,11 @@ static void reg_r_i(struct gspca_dev *gspca_dev,
+ if (ret < 0) {
+ pr_err("reg_r err %d\n", ret);
+ gspca_dev->usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
+ }
+ }
+ static void reg_r(struct gspca_dev *gspca_dev,
+diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c
+index 16b679c2de21..a8350ee9712f 100644
+--- a/drivers/media/usb/gspca/w996Xcf.c
++++ b/drivers/media/usb/gspca/w996Xcf.c
+@@ -133,6 +133,11 @@ static int w9968cf_read_sb(struct sd *sd)
+ } else {
+ pr_err("Read SB reg [01] failed\n");
+ sd->gspca_dev.usb_err = ret;
++ /*
++ * Make sure the buffer is zeroed to avoid uninitialized
++ * values.
++ */
++ memset(sd->gspca_dev.usb_buf, 0, 2);
+ }
+
+ udelay(W9968CF_I2C_BUS_DELAY);
+diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
+index 9b9d894d29bc..b75c18a012a7 100644
+--- a/drivers/media/usb/hdpvr/hdpvr-core.c
++++ b/drivers/media/usb/hdpvr/hdpvr-core.c
+@@ -137,6 +137,7 @@ static int device_authorization(struct hdpvr_device *dev)
+
+ dev->fw_ver = dev->usbc_buf[1];
+
++ dev->usbc_buf[46] = '\0';
+ v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n",
+ dev->fw_ver, &dev->usbc_buf[2]);
+
+@@ -271,6 +272,7 @@ static int hdpvr_probe(struct usb_interface *interface,
+ #endif
+ size_t buffer_size;
+ int i;
++ int dev_num;
+ int retval = -ENOMEM;
+
+ /* allocate memory for our device state and initialize it */
+@@ -368,8 +370,17 @@ static int hdpvr_probe(struct usb_interface *interface,
+ }
+ #endif
+
++ dev_num = atomic_inc_return(&dev_nr);
++ if (dev_num >= HDPVR_MAX) {
++ v4l2_err(&dev->v4l2_dev,
++ "max device number reached, device register failed\n");
++ atomic_dec(&dev_nr);
++ retval = -ENODEV;
++ goto reg_fail;
++ }
++
+ retval = hdpvr_register_videodev(dev, &interface->dev,
+- video_nr[atomic_inc_return(&dev_nr)]);
++ video_nr[dev_num]);
+ if (retval < 0) {
+ v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
+ goto reg_fail;
+diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
+index 1d0afa340f47..3198f9624b7c 100644
+--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
++++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
+@@ -319,7 +319,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
+
+ dprintk("%s\n", __func__);
+
+- b = kmalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
++ b = kzalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
+index 7ef3e4d22bf6..939fc11cf080 100644
+--- a/drivers/media/v4l2-core/videobuf-core.c
++++ b/drivers/media/v4l2-core/videobuf-core.c
+@@ -1123,7 +1123,6 @@ __poll_t videobuf_poll_stream(struct file *file,
+ struct videobuf_buffer *buf = NULL;
+ __poll_t rc = 0;
+
+- poll_wait(file, &buf->done, wait);
+ videobuf_queue_lock(q);
+ if (q->streaming) {
+ if (!list_empty(&q->stream))
+@@ -1143,7 +1142,9 @@ __poll_t videobuf_poll_stream(struct file *file,
+ }
+ buf = q->read_buf;
+ }
+- if (!buf)
++ if (buf)
++ poll_wait(file, &buf->done, wait);
++ else
+ rc = EPOLLERR;
+
+ if (0 == rc) {
+diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
+index 9f54a259a1b3..e4823ef0a0de 100644
+--- a/drivers/mmc/core/sdio_irq.c
++++ b/drivers/mmc/core/sdio_irq.c
+@@ -31,6 +31,7 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
+ {
+ struct mmc_card *card = host->card;
+ int i, ret, count;
++ bool sdio_irq_pending = host->sdio_irq_pending;
+ unsigned char pending;
+ struct sdio_func *func;
+
+@@ -38,13 +39,16 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
+ if (mmc_card_suspended(card))
+ return 0;
+
++ /* Clear the flag to indicate that we have processed the IRQ. */
++ host->sdio_irq_pending = false;
++
+ /*
+ * Optimization, if there is only 1 function interrupt registered
+ * and we know an IRQ was signaled then call irq handler directly.
+ * Otherwise do the full probe.
+ */
+ func = card->sdio_single_irq;
+- if (func && host->sdio_irq_pending) {
++ if (func && sdio_irq_pending) {
+ func->irq_handler(func);
+ return 1;
+ }
+@@ -96,7 +100,6 @@ void sdio_run_irqs(struct mmc_host *host)
+ {
+ mmc_claim_host(host);
+ if (host->sdio_irqs) {
+- host->sdio_irq_pending = true;
+ process_sdio_pending_irqs(host);
+ if (host->ops->ack_sdio_irq)
+ host->ops->ack_sdio_irq(host);
+@@ -115,6 +118,7 @@ void sdio_irq_work(struct work_struct *work)
+
+ void sdio_signal_irq(struct mmc_host *host)
+ {
++ host->sdio_irq_pending = true;
+ queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
+ }
+ EXPORT_SYMBOL_GPL(sdio_signal_irq);
+@@ -160,7 +164,6 @@ static int sdio_irq_thread(void *_host)
+ if (ret)
+ break;
+ ret = process_sdio_pending_irqs(host);
+- host->sdio_irq_pending = false;
+ mmc_release_host(host);
+
+ /*
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index 60c3a06e3469..45c349054683 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -3482,6 +3482,10 @@ int dw_mci_runtime_resume(struct device *dev)
+ /* Force setup bus to guarantee available clock output */
+ dw_mci_setup_bus(host->slot, true);
+
++ /* Re-enable SDIO interrupts. */
++ if (sdio_irq_claimed(host->slot->mmc))
++ __dw_mci_enable_sdio_irq(host->slot, 1);
++
+ /* Now that slots are all setup, we can enable card detect */
+ dw_mci_enable_cd(host);
+
+diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
+index 33f4b6387ef7..978c8ccce7e3 100644
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -2421,6 +2421,9 @@ static void msdc_restore_reg(struct msdc_host *host)
+ } else {
+ writel(host->save_para.pad_tune, host->base + tune_reg);
+ }
++
++ if (sdio_irq_claimed(host->mmc))
++ __msdc_enable_sdio_irq(host, 1);
+ }
+
+ static int msdc_runtime_suspend(struct device *dev)
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index a5dc5aae973e..c66e66fbaeb4 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1849,7 +1849,9 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ else if (timing == MMC_TIMING_UHS_SDR12)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+- else if (timing == MMC_TIMING_UHS_SDR25)
++ else if (timing == MMC_TIMING_SD_HS ||
++ timing == MMC_TIMING_MMC_HS ||
++ timing == MMC_TIMING_UHS_SDR25)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ else if (timing == MMC_TIMING_UHS_SDR50)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+index 999ca6a66036..76609b908b5e 100644
+--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+@@ -1424,21 +1424,16 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
+ struct stm32_fmc2_timings *tims = &nand->timings;
+ unsigned long hclk = clk_get_rate(fmc2->clk);
+ unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000);
+- int tar, tclr, thiz, twait, tset_mem, tset_att, thold_mem, thold_att;
+-
+- tar = hclkp;
+- if (tar < sdrt->tAR_min)
+- tar = sdrt->tAR_min;
+- tims->tar = DIV_ROUND_UP(tar, hclkp) - 1;
+- if (tims->tar > FMC2_PCR_TIMING_MASK)
+- tims->tar = FMC2_PCR_TIMING_MASK;
+-
+- tclr = hclkp;
+- if (tclr < sdrt->tCLR_min)
+- tclr = sdrt->tCLR_min;
+- tims->tclr = DIV_ROUND_UP(tclr, hclkp) - 1;
+- if (tims->tclr > FMC2_PCR_TIMING_MASK)
+- tims->tclr = FMC2_PCR_TIMING_MASK;
++ unsigned long timing, tar, tclr, thiz, twait;
++ unsigned long tset_mem, tset_att, thold_mem, thold_att;
++
++ tar = max_t(unsigned long, hclkp, sdrt->tAR_min);
++ timing = DIV_ROUND_UP(tar, hclkp) - 1;
++ tims->tar = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK);
++
++ tclr = max_t(unsigned long, hclkp, sdrt->tCLR_min);
++ timing = DIV_ROUND_UP(tclr, hclkp) - 1;
++ tims->tclr = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK);
+
+ tims->thiz = FMC2_THIZ;
+ thiz = (tims->thiz + 1) * hclkp;
+@@ -1448,18 +1443,11 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
+ * tWAIT > tWP
+ * tWAIT > tREA + tIO
+ */
+- twait = hclkp;
+- if (twait < sdrt->tRP_min)
+- twait = sdrt->tRP_min;
+- if (twait < sdrt->tWP_min)
+- twait = sdrt->tWP_min;
+- if (twait < sdrt->tREA_max + FMC2_TIO)
+- twait = sdrt->tREA_max + FMC2_TIO;
+- tims->twait = DIV_ROUND_UP(twait, hclkp);
+- if (tims->twait == 0)
+- tims->twait = 1;
+- else if (tims->twait > FMC2_PMEM_PATT_TIMING_MASK)
+- tims->twait = FMC2_PMEM_PATT_TIMING_MASK;
++ twait = max_t(unsigned long, hclkp, sdrt->tRP_min);
++ twait = max_t(unsigned long, twait, sdrt->tWP_min);
++ twait = max_t(unsigned long, twait, sdrt->tREA_max + FMC2_TIO);
++ timing = DIV_ROUND_UP(twait, hclkp);
++ tims->twait = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tSETUP_MEM > tCS - tWAIT
+@@ -1474,20 +1462,15 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
+ if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
+ (tset_mem < sdrt->tDS_min - (twait - thiz)))
+ tset_mem = sdrt->tDS_min - (twait - thiz);
+- tims->tset_mem = DIV_ROUND_UP(tset_mem, hclkp);
+- if (tims->tset_mem == 0)
+- tims->tset_mem = 1;
+- else if (tims->tset_mem > FMC2_PMEM_PATT_TIMING_MASK)
+- tims->tset_mem = FMC2_PMEM_PATT_TIMING_MASK;
++ timing = DIV_ROUND_UP(tset_mem, hclkp);
++ tims->tset_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tHOLD_MEM > tCH
+ * tHOLD_MEM > tREH - tSETUP_MEM
+ * tHOLD_MEM > max(tRC, tWC) - (tSETUP_MEM + tWAIT)
+ */
+- thold_mem = hclkp;
+- if (thold_mem < sdrt->tCH_min)
+- thold_mem = sdrt->tCH_min;
++ thold_mem = max_t(unsigned long, hclkp, sdrt->tCH_min);
+ if (sdrt->tREH_min > tset_mem &&
+ (thold_mem < sdrt->tREH_min - tset_mem))
+ thold_mem = sdrt->tREH_min - tset_mem;
+@@ -1497,11 +1480,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
+ if ((sdrt->tWC_min > tset_mem + twait) &&
+ (thold_mem < sdrt->tWC_min - (tset_mem + twait)))
+ thold_mem = sdrt->tWC_min - (tset_mem + twait);
+- tims->thold_mem = DIV_ROUND_UP(thold_mem, hclkp);
+- if (tims->thold_mem == 0)
+- tims->thold_mem = 1;
+- else if (tims->thold_mem > FMC2_PMEM_PATT_TIMING_MASK)
+- tims->thold_mem = FMC2_PMEM_PATT_TIMING_MASK;
++ timing = DIV_ROUND_UP(thold_mem, hclkp);
++ tims->thold_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tSETUP_ATT > tCS - tWAIT
+@@ -1523,11 +1503,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
+ if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
+ (tset_att < sdrt->tDS_min - (twait - thiz)))
+ tset_att = sdrt->tDS_min - (twait - thiz);
+- tims->tset_att = DIV_ROUND_UP(tset_att, hclkp);
+- if (tims->tset_att == 0)
+- tims->tset_att = 1;
+- else if (tims->tset_att > FMC2_PMEM_PATT_TIMING_MASK)
+- tims->tset_att = FMC2_PMEM_PATT_TIMING_MASK;
++ timing = DIV_ROUND_UP(tset_att, hclkp);
++ tims->tset_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tHOLD_ATT > tALH
+@@ -1542,17 +1519,11 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
+ * tHOLD_ATT > tRC - (tSETUP_ATT + tWAIT)
+ * tHOLD_ATT > tWC - (tSETUP_ATT + tWAIT)
+ */
+- thold_att = hclkp;
+- if (thold_att < sdrt->tALH_min)
+- thold_att = sdrt->tALH_min;
+- if (thold_att < sdrt->tCH_min)
+- thold_att = sdrt->tCH_min;
+- if (thold_att < sdrt->tCLH_min)
+- thold_att = sdrt->tCLH_min;
+- if (thold_att < sdrt->tCOH_min)
+- thold_att = sdrt->tCOH_min;
+- if (thold_att < sdrt->tDH_min)
+- thold_att = sdrt->tDH_min;
++ thold_att = max_t(unsigned long, hclkp, sdrt->tALH_min);
++ thold_att = max_t(unsigned long, thold_att, sdrt->tCH_min);
++ thold_att = max_t(unsigned long, thold_att, sdrt->tCLH_min);
++ thold_att = max_t(unsigned long, thold_att, sdrt->tCOH_min);
++ thold_att = max_t(unsigned long, thold_att, sdrt->tDH_min);
+ if ((sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC > tset_mem) &&
+ (thold_att < sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem))
+ thold_att = sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem;
+@@ -1571,11 +1542,8 @@ static void stm32_fmc2_calc_timings(struct nand_chip *chip,
+ if ((sdrt->tWC_min > tset_att + twait) &&
+ (thold_att < sdrt->tWC_min - (tset_att + twait)))
+ thold_att = sdrt->tWC_min - (tset_att + twait);
+- tims->thold_att = DIV_ROUND_UP(thold_att, hclkp);
+- if (tims->thold_att == 0)
+- tims->thold_att = 1;
+- else if (tims->thold_att > FMC2_PMEM_PATT_TIMING_MASK)
+- tims->thold_att = FMC2_PMEM_PATT_TIMING_MASK;
++ timing = DIV_ROUND_UP(thold_att, hclkp);
++ tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+ }
+
+ static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr,
+diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
+index 8459115d9d4e..553776cc1d29 100644
+--- a/drivers/net/arcnet/arcnet.c
++++ b/drivers/net/arcnet/arcnet.c
+@@ -1063,31 +1063,34 @@ EXPORT_SYMBOL(arcnet_interrupt);
+ static void arcnet_rx(struct net_device *dev, int bufnum)
+ {
+ struct arcnet_local *lp = netdev_priv(dev);
+- struct archdr pkt;
++ union {
++ struct archdr pkt;
++ char buf[512];
++ } rxdata;
+ struct arc_rfc1201 *soft;
+ int length, ofs;
+
+- soft = &pkt.soft.rfc1201;
++ soft = &rxdata.pkt.soft.rfc1201;
+
+- lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
+- if (pkt.hard.offset[0]) {
+- ofs = pkt.hard.offset[0];
++ lp->hw.copy_from_card(dev, bufnum, 0, &rxdata.pkt, ARC_HDR_SIZE);
++ if (rxdata.pkt.hard.offset[0]) {
++ ofs = rxdata.pkt.hard.offset[0];
+ length = 256 - ofs;
+ } else {
+- ofs = pkt.hard.offset[1];
++ ofs = rxdata.pkt.hard.offset[1];
+ length = 512 - ofs;
+ }
+
+ /* get the full header, if possible */
+- if (sizeof(pkt.soft) <= length) {
+- lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft));
++ if (sizeof(rxdata.pkt.soft) <= length) {
++ lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(rxdata.pkt.soft));
+ } else {
+- memset(&pkt.soft, 0, sizeof(pkt.soft));
++ memset(&rxdata.pkt.soft, 0, sizeof(rxdata.pkt.soft));
+ lp->hw.copy_from_card(dev, bufnum, ofs, soft, length);
+ }
+
+ arc_printk(D_DURING, dev, "Buffer #%d: received packet from %02Xh to %02Xh (%d+4 bytes)\n",
+- bufnum, pkt.hard.source, pkt.hard.dest, length);
++ bufnum, rxdata.pkt.hard.source, rxdata.pkt.hard.dest, length);
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += length + ARC_HDR_SIZE;
+@@ -1096,13 +1099,13 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
+ if (arc_proto_map[soft->proto]->is_ip) {
+ if (BUGLVL(D_PROTO)) {
+ struct ArcProto
+- *oldp = arc_proto_map[lp->default_proto[pkt.hard.source]],
++ *oldp = arc_proto_map[lp->default_proto[rxdata.pkt.hard.source]],
+ *newp = arc_proto_map[soft->proto];
+
+ if (oldp != newp) {
+ arc_printk(D_PROTO, dev,
+ "got protocol %02Xh; encap for host %02Xh is now '%c' (was '%c')\n",
+- soft->proto, pkt.hard.source,
++ soft->proto, rxdata.pkt.hard.source,
+ newp->suffix, oldp->suffix);
+ }
+ }
+@@ -1111,10 +1114,10 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
+ lp->default_proto[0] = soft->proto;
+
+ /* in striking contrast, the following isn't a hack. */
+- lp->default_proto[pkt.hard.source] = soft->proto;
++ lp->default_proto[rxdata.pkt.hard.source] = soft->proto;
+ }
+ /* call the protocol-specific receiver. */
+- arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length);
++ arc_proto_map[soft->proto]->rx(dev, bufnum, &rxdata.pkt, length);
+ }
+
+ static void null_rx(struct net_device *dev, int bufnum,
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index cdae0efde8e6..7998a73b6a0f 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+ else
+ phy_reg |= 0xFA;
+ e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
++
++ if (speed == SPEED_1000) {
++ hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
++ &phy_reg);
++
++ phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
++
++ hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
++ phy_reg);
++ }
+ }
+ hw->phy.ops.release(hw);
+
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
+index eb09c755fa17..1502895eb45d 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
+@@ -210,7 +210,7 @@
+
+ /* PHY Power Management Control */
+ #define HV_PM_CTRL PHY_REG(770, 17)
+-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
++#define HV_PM_CTRL_K1_CLK_REQ 0x200
+ #define HV_PM_CTRL_K1_ENABLE 0x4000
+
+ #define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 320562b39686..f2906479f5a6 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -2586,6 +2586,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+ return;
+ if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
+ return;
++ if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
++ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
++ return;
++ }
+
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
+ if (pf->vsi[v] &&
+@@ -2600,6 +2604,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+ }
+ }
+ }
++ clear_bit(__I40E_VF_DISABLE, pf->state);
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
+index 35a92fd2cf39..59bbb5a8401c 100644
+--- a/drivers/net/ethernet/marvell/skge.c
++++ b/drivers/net/ethernet/marvell/skge.c
+@@ -3110,7 +3110,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
+ skb_put(skb, len);
+
+ if (dev->features & NETIF_F_RXCSUM) {
+- skb->csum = csum;
++ skb->csum = le16_to_cpu(csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+index 4421c10f58ae..baed9e93794f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+@@ -397,10 +397,10 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_table *ft,
+ struct ethtool_rx_flow_spec *fs)
+ {
++ struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND };
+ struct mlx5_flow_destination *dst = NULL;
+- struct mlx5_flow_act flow_act = {0};
+- struct mlx5_flow_spec *spec;
+ struct mlx5_flow_handle *rule;
++ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 23d53163ce15..41a8e44cc09c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -1525,6 +1525,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
+ { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
+ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
+ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
++ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
+ { 0, }
+ };
+
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
+index eb846133943b..acb02e1513f2 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
+@@ -400,6 +400,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
+ repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
+ if (!repr_priv) {
+ err = -ENOMEM;
++ nfp_repr_free(repr);
+ goto err_reprs_clean;
+ }
+
+@@ -413,6 +414,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
+ port = nfp_port_alloc(app, port_type, repr);
+ if (IS_ERR(port)) {
+ err = PTR_ERR(port);
++ kfree(repr_priv);
+ nfp_repr_free(repr);
+ goto err_reprs_clean;
+ }
+@@ -433,6 +435,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
+ err = nfp_repr_init(app, repr,
+ port_id, port, priv->nn->dp.netdev);
+ if (err) {
++ kfree(repr_priv);
+ nfp_port_free(port);
+ nfp_repr_free(repr);
+ goto err_reprs_clean;
+@@ -515,6 +518,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
+ repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
+ if (!repr_priv) {
+ err = -ENOMEM;
++ nfp_repr_free(repr);
+ goto err_reprs_clean;
+ }
+
+@@ -525,11 +529,13 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
+ port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
+ if (IS_ERR(port)) {
+ err = PTR_ERR(port);
++ kfree(repr_priv);
+ nfp_repr_free(repr);
+ goto err_reprs_clean;
+ }
+ err = nfp_port_init_phy_port(app->pf, app, port, i);
+ if (err) {
++ kfree(repr_priv);
+ nfp_port_free(port);
+ nfp_repr_free(repr);
+ goto err_reprs_clean;
+@@ -542,6 +548,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
+ err = nfp_repr_init(app, repr,
+ cmsg_port_id, port, priv->nn->dp.netdev);
+ if (err) {
++ kfree(repr_priv);
+ nfp_port_free(port);
+ nfp_repr_free(repr);
+ goto err_reprs_clean;
+diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
+index f7e11f1b0426..b0c8be127bee 100644
+--- a/drivers/net/ethernet/nxp/lpc_eth.c
++++ b/drivers/net/ethernet/nxp/lpc_eth.c
+@@ -1344,13 +1344,14 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
+ pldat->dma_buff_base_p = dma_handle;
+
+ netdev_dbg(ndev, "IO address space :%pR\n", res);
+- netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
++ netdev_dbg(ndev, "IO address size :%zd\n",
++ (size_t)resource_size(res));
+ netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
+ pldat->net_base);
+ netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
+- netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
+- netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
+- pldat->dma_buff_base_p);
++ netdev_dbg(ndev, "DMA buffer size :%zd\n", pldat->dma_buff_size);
++ netdev_dbg(ndev, "DMA buffer P address :%pad\n",
++ &pldat->dma_buff_base_p);
+ netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
+ pldat->dma_buff_base_v);
+
+@@ -1397,8 +1398,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
+ if (ret)
+ goto err_out_unregister_netdev;
+
+- netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
+- res->start, ndev->irq);
++ netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n",
++ (unsigned long)res->start, ndev->irq);
+
+ device_init_wakeup(dev, 1);
+ device_set_wakeup_enable(dev, 0);
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 8f46aa1ddec0..cb7637364b40 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1235,6 +1235,7 @@ deliver:
+ macsec_rxsa_put(rx_sa);
+ macsec_rxsc_put(rx_sc);
+
++ skb_orphan(skb);
+ ret = gro_cells_receive(&macsec->gro_cells, skb);
+ if (ret == NET_RX_SUCCESS)
+ count_rx(dev, skb->len);
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 3c8186f269f9..2fea5541c35a 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -763,6 +763,8 @@ static int ksz9031_get_features(struct phy_device *phydev)
+ * Whenever the device's Asymmetric Pause capability is set to 1,
+ * link-up may fail after a link-up to link-down transition.
+ *
++ * The Errata Sheet is for ksz9031, but ksz9021 has the same issue
++ *
+ * Workaround:
+ * Do not enable the Asymmetric Pause capability bit.
+ */
+@@ -1076,6 +1078,7 @@ static struct phy_driver ksphy_driver[] = {
+ /* PHY_GBIT_FEATURES */
+ .driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
++ .get_features = ksz9031_get_features,
+ .config_init = ksz9021_config_init,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
+index a221dd552c3c..a5bf0874c7d8 100644
+--- a/drivers/net/phy/national.c
++++ b/drivers/net/phy/national.c
+@@ -105,14 +105,17 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
+
+ static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
+ {
++ u16 lb_dis = BIT(1);
++
+ if (disable)
+- ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1);
++ ns_exp_write(phydev, 0x1c0,
++ ns_exp_read(phydev, 0x1c0) | lb_dis);
+ else
+ ns_exp_write(phydev, 0x1c0,
+- ns_exp_read(phydev, 0x1c0) & 0xfffe);
++ ns_exp_read(phydev, 0x1c0) & ~lb_dis);
+
+ pr_debug("10BASE-T HDX loopback %s\n",
+- (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
++ (ns_exp_read(phydev, 0x1c0) & lb_dis) ? "off" : "on");
+ }
+
+ static int ns_config_init(struct phy_device *phydev)
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index a30e41a56085..9a1b006904a7 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1415,6 +1415,8 @@ static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
+ netif_wake_queue(ppp->dev);
+ else
+ netif_stop_queue(ppp->dev);
++ } else {
++ kfree_skb(skb);
+ }
+ ppp_xmit_unlock(ppp);
+ }
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 50c05d0f44cb..00cab3f43a4c 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -681,8 +681,12 @@ cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf)
+ u8 ep;
+
+ for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
+-
+ e = intf->cur_altsetting->endpoint + ep;
++
++ /* ignore endpoints which cannot transfer data */
++ if (!usb_endpoint_maxp(&e->desc))
++ continue;
++
+ switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+ case USB_ENDPOINT_XFER_INT:
+ if (usb_endpoint_dir_in(&e->desc)) {
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 72514c46b478..ef1d667b0108 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -100,6 +100,11 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
+ int intr = 0;
+
+ e = alt->endpoint + ep;
++
++ /* ignore endpoints which cannot transfer data */
++ if (!usb_endpoint_maxp(&e->desc))
++ continue;
++
+ switch (e->desc.bmAttributes) {
+ case USB_ENDPOINT_XFER_INT:
+ if (!usb_endpoint_dir_in(&e->desc))
+@@ -339,6 +344,8 @@ void usbnet_update_max_qlen(struct usbnet *dev)
+ {
+ enum usb_device_speed speed = dev->udev->speed;
+
++ if (!dev->rx_urb_size || !dev->hard_mtu)
++ goto insanity;
+ switch (speed) {
+ case USB_SPEED_HIGH:
+ dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
+@@ -355,6 +362,7 @@ void usbnet_update_max_qlen(struct usbnet *dev)
+ dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
+ break;
+ default:
++insanity:
+ dev->rx_qlen = dev->tx_qlen = 4;
+ }
+ }
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 97fb0cb1b97a..1a22165afb39 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -1153,7 +1153,8 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
+ struct sk_buff *skb;
+ int err;
+
+- if (family == AF_INET6 && !ipv6_mod_enabled())
++ if ((family == AF_INET6 || family == RTNL_FAMILY_IP6MR) &&
++ !ipv6_mod_enabled())
+ return 0;
+
+ skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+index 02709fc99034..9deca6d33ecd 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+@@ -810,7 +810,7 @@ static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
+ struct wmi_ch_info_ev_arg *arg)
+ {
+ const void **tb;
+- const struct wmi_chan_info_event *ev;
++ const struct wmi_tlv_chan_info_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+index 65e6aa520b06..4bdd65515ad4 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+@@ -1607,6 +1607,22 @@ struct chan_info_params {
+
+ #define WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL BIT(9)
+
++struct wmi_tlv_chan_info_event {
++ __le32 err_code;
++ __le32 freq;
++ __le32 cmd_flags;
++ __le32 noise_floor;
++ __le32 rx_clear_count;
++ __le32 cycle_count;
++ __le32 chan_tx_pwr_range;
++ __le32 chan_tx_pwr_tp;
++ __le32 rx_frame_count;
++ __le32 my_bss_rx_cycle_count;
++ __le32 rx_11b_mode_data_duration;
++ __le32 tx_frame_cnt;
++ __le32 mac_clk_mhz;
++} __packed;
++
+ struct wmi_tlv_mgmt_tx_compl_ev {
+ __le32 desc_id;
+ __le32 status;
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
+index 12f57f9adbba..aaa3f587fa24 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.h
++++ b/drivers/net/wireless/ath/ath10k/wmi.h
+@@ -6524,14 +6524,6 @@ struct wmi_chan_info_event {
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+- __le32 chan_tx_pwr_range;
+- __le32 chan_tx_pwr_tp;
+- __le32 rx_frame_count;
+- __le32 my_bss_rx_cycle_count;
+- __le32 rx_11b_mode_data_duration;
+- __le32 tx_frame_cnt;
+- __le32 mac_clk_mhz;
+-
+ } __packed;
+
+ struct wmi_10_4_chan_info_event {
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 8892707050d5..f04e470bd13a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -882,11 +882,13 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
+ * firmware versions. Unfortunately, we don't have a TLV API
+ * flag to rely on, so rely on the major version which is in
+ * the first byte of ucode_ver. This was implemented
+- * initially on version 38 and then backported to 36, 29 and
+- * 17.
++ * initially on version 38 and then backported to29 and 17.
++ * The intention was to have it in 36 as well, but not all
++ * 8000 family got this feature enabled. The 8000 family is
++ * the only one using version 36, so skip this version
++ * entirely.
+ */
+ return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
+- IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
+ }
+diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
+index f1622f0ff8c9..fe3142d85d1e 100644
+--- a/drivers/net/wireless/marvell/libertas/if_usb.c
++++ b/drivers/net/wireless/marvell/libertas/if_usb.c
+@@ -50,7 +50,8 @@ static const struct lbs_fw_table fw_table[] = {
+ { MODEL_8388, "libertas/usb8388_v5.bin", NULL },
+ { MODEL_8388, "libertas/usb8388.bin", NULL },
+ { MODEL_8388, "usb8388.bin", NULL },
+- { MODEL_8682, "libertas/usb8682.bin", NULL }
++ { MODEL_8682, "libertas/usb8682.bin", NULL },
++ { 0, NULL, NULL }
+ };
+
+ static const struct usb_device_id if_usb_table[] = {
+diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
+index 38368d19aa6f..83c96a47914f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mmio.c
++++ b/drivers/net/wireless/mediatek/mt76/mmio.c
+@@ -43,7 +43,7 @@ static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+ static void mt76_mmio_copy(struct mt76_dev *dev, u32 offset, const void *data,
+ int len)
+ {
+- __iowrite32_copy(dev->mmio.regs + offset, data, len >> 2);
++ __iowrite32_copy(dev->mmio.regs + offset, data, DIV_ROUND_UP(len, 4));
+ }
+
+ static int mt76_mmio_wr_rp(struct mt76_dev *dev, u32 base,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+index dc1301effa24..f877e3862f8d 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+@@ -289,9 +289,8 @@ static int mt7615_driver_own(struct mt7615_dev *dev)
+
+ static int mt7615_load_patch(struct mt7615_dev *dev)
+ {
+- const struct firmware *fw;
+ const struct mt7615_patch_hdr *hdr;
+- const char *firmware = MT7615_ROM_PATCH;
++ const struct firmware *fw = NULL;
+ int len, ret, sem;
+
+ sem = mt7615_mcu_patch_sem_ctrl(dev, 1);
+@@ -305,9 +304,9 @@ static int mt7615_load_patch(struct mt7615_dev *dev)
+ return -EAGAIN;
+ }
+
+- ret = request_firmware(&fw, firmware, dev->mt76.dev);
++ ret = request_firmware(&fw, MT7615_ROM_PATCH, dev->mt76.dev);
+ if (ret)
+- return ret;
++ goto out;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+@@ -371,14 +370,12 @@ static u32 gen_dl_mode(u8 feature_set, bool is_cr4)
+
+ static int mt7615_load_ram(struct mt7615_dev *dev)
+ {
+- const struct firmware *fw;
+ const struct mt7615_fw_trailer *hdr;
+- const char *n9_firmware = MT7615_FIRMWARE_N9;
+- const char *cr4_firmware = MT7615_FIRMWARE_CR4;
+ u32 n9_ilm_addr, offset;
+ int i, ret;
++ const struct firmware *fw;
+
+- ret = request_firmware(&fw, n9_firmware, dev->mt76.dev);
++ ret = request_firmware(&fw, MT7615_FIRMWARE_N9, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+@@ -426,7 +423,7 @@ static int mt7615_load_ram(struct mt7615_dev *dev)
+
+ release_firmware(fw);
+
+- ret = request_firmware(&fw, cr4_firmware, dev->mt76.dev);
++ ret = request_firmware(&fw, MT7615_FIRMWARE_CR4, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+index 895c2904d7eb..929b39fa57c3 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+@@ -25,9 +25,9 @@
+ #define MT7615_RX_RING_SIZE 1024
+ #define MT7615_RX_MCU_RING_SIZE 512
+
+-#define MT7615_FIRMWARE_CR4 "mt7615_cr4.bin"
+-#define MT7615_FIRMWARE_N9 "mt7615_n9.bin"
+-#define MT7615_ROM_PATCH "mt7615_rom_patch.bin"
++#define MT7615_FIRMWARE_CR4 "mediatek/mt7615_cr4.bin"
++#define MT7615_FIRMWARE_N9 "mediatek/mt7615_n9.bin"
++#define MT7615_ROM_PATCH "mediatek/mt7615_rom_patch.bin"
+
+ #define MT7615_EEPROM_SIZE 1024
+ #define MT7615_TOKEN_SIZE 4096
+diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
+index dd90427b2d67..28e3af7332bf 100644
+--- a/drivers/net/wireless/mediatek/mt76/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/usb.c
+@@ -164,7 +164,7 @@ static void mt76u_copy(struct mt76_dev *dev, u32 offset,
+ int i, ret;
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+- for (i = 0; i < (len / 4); i++) {
++ for (i = 0; i < DIV_ROUND_UP(len, 4); i++) {
+ put_unaligned_le32(val[i], usb->data);
+ ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
+index cfe05ba7280d..68fae52151dd 100644
+--- a/drivers/net/wireless/realtek/rtw88/pci.c
++++ b/drivers/net/wireless/realtek/rtw88/pci.c
+@@ -206,6 +206,23 @@ static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ return 0;
+ }
+
++static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
++ struct rtw_pci_rx_ring *rx_ring,
++ u32 idx, u32 desc_sz)
++{
++ struct device *dev = rtwdev->dev;
++ struct rtw_pci_rx_buffer_desc *buf_desc;
++ int buf_sz = RTK_PCI_RX_BUF_SIZE;
++
++ dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
++
++ buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
++ idx * desc_sz);
++ memset(buf_desc, 0, sizeof(*buf_desc));
++ buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
++ buf_desc->dma = cpu_to_le32(dma);
++}
++
+ static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
+ struct rtw_pci_rx_ring *rx_ring,
+ u8 desc_size, u32 len)
+@@ -763,6 +780,7 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
+ u32 pkt_offset;
+ u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
+ u32 buf_desc_sz = chip->rx_buf_desc_sz;
++ u32 new_len;
+ u8 *rx_desc;
+ dma_addr_t dma;
+
+@@ -781,8 +799,8 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
+ rtw_pci_dma_check(rtwdev, ring, cur_rp);
+ skb = ring->buf[cur_rp];
+ dma = *((dma_addr_t *)skb->cb);
+- pci_unmap_single(rtwpci->pdev, dma, RTK_PCI_RX_BUF_SIZE,
+- PCI_DMA_FROMDEVICE);
++ dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
++ DMA_FROM_DEVICE);
+ rx_desc = skb->data;
+ chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
+
+@@ -790,40 +808,35 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
+ pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
+ pkt_stat.shift;
+
+- if (pkt_stat.is_c2h) {
+- /* keep rx_desc, halmac needs it */
+- skb_put(skb, pkt_stat.pkt_len + pkt_offset);
++ /* allocate a new skb for this frame,
++ * discard the frame if none available
++ */
++ new_len = pkt_stat.pkt_len + pkt_offset;
++ new = dev_alloc_skb(new_len);
++ if (WARN_ONCE(!new, "rx routine starvation\n"))
++ goto next_rp;
+
+- /* pass offset for further operation */
+- *((u32 *)skb->cb) = pkt_offset;
+- skb_queue_tail(&rtwdev->c2h_queue, skb);
++ /* put the DMA data including rx_desc from phy to new skb */
++ skb_put_data(new, skb->data, new_len);
++
++ if (pkt_stat.is_c2h) {
++ /* pass rx_desc & offset for further operation */
++ *((u32 *)new->cb) = pkt_offset;
++ skb_queue_tail(&rtwdev->c2h_queue, new);
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
+ } else {
+- /* remove rx_desc, maybe use skb_pull? */
+- skb_put(skb, pkt_stat.pkt_len);
+- skb_reserve(skb, pkt_offset);
+-
+- /* alloc a smaller skb to mac80211 */
+- new = dev_alloc_skb(pkt_stat.pkt_len);
+- if (!new) {
+- new = skb;
+- } else {
+- skb_put_data(new, skb->data, skb->len);
+- dev_kfree_skb_any(skb);
+- }
+- /* TODO: merge into rx.c */
+- rtw_rx_stats(rtwdev, pkt_stat.vif, skb);
++ /* remove rx_desc */
++ skb_pull(new, pkt_offset);
++
++ rtw_rx_stats(rtwdev, pkt_stat.vif, new);
+ memcpy(new->cb, &rx_status, sizeof(rx_status));
+ ieee80211_rx_irqsafe(rtwdev->hw, new);
+ }
+
+- /* skb delivered to mac80211, alloc a new one in rx ring */
+- new = dev_alloc_skb(RTK_PCI_RX_BUF_SIZE);
+- if (WARN(!new, "rx routine starvation\n"))
+- return;
+-
+- ring->buf[cur_rp] = new;
+- rtw_pci_reset_rx_desc(rtwdev, new, ring, cur_rp, buf_desc_sz);
++next_rp:
++ /* new skb delivered to mac80211, re-enable original skb DMA */
++ rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
++ buf_desc_sz);
+
+ /* host read next element in ring */
+ if (++cur_rp >= ring->r.len)
+diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+index da7e63fca9f5..a9999d10ae81 100644
+--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
++++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+@@ -223,7 +223,6 @@ void zd_mac_clear(struct zd_mac *mac)
+ {
+ flush_workqueue(zd_workqueue);
+ zd_chip_clear(&mac->chip);
+- lockdep_assert_held(&mac->lock);
+ ZD_MEMCLEAR(mac, sizeof(struct zd_mac));
+ }
+
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 304aa8a65f2f..f928bcfc57b5 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -501,14 +501,16 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
+
+ down_write(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+- if (ns->head->ns_id != le32_to_cpu(desc->nsids[n]))
++ unsigned nsid = le32_to_cpu(desc->nsids[n]);
++
++ if (ns->head->ns_id < nsid)
+ continue;
+- nvme_update_ns_ana_state(desc, ns);
++ if (ns->head->ns_id == nsid)
++ nvme_update_ns_ana_state(desc, ns);
+ if (++n == nr_nsids)
+ break;
+ }
+ up_write(&ctrl->namespaces_rwsem);
+- WARN_ON_ONCE(n < nr_nsids);
+ return 0;
+ }
+
+diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
+index 9f72d515fc4b..4099093a1734 100644
+--- a/drivers/nvme/target/admin-cmd.c
++++ b/drivers/nvme/target/admin-cmd.c
+@@ -81,9 +81,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
+ goto out;
+
+ host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
+- data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
++ data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
++ sectors[READ]), 1000);
+ host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+- data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
++ data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
++ sectors[WRITE]), 1000);
+
+ put_unaligned_le64(host_reads, &slog->host_reads[0]);
+ put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
+@@ -111,11 +113,11 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
+ if (!ns->bdev)
+ continue;
+ host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
+- data_units_read +=
+- part_stat_read(ns->bdev->bd_part, sectors[READ]);
++ data_units_read += DIV_ROUND_UP(
++ part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
+ host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
+- data_units_written +=
+- part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
++ data_units_written += DIV_ROUND_UP(
++ part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
+
+ }
+ rcu_read_unlock();
+diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
+index 3c730103e637..14be463e25b0 100644
+--- a/drivers/parisc/dino.c
++++ b/drivers/parisc/dino.c
+@@ -156,6 +156,15 @@ static inline struct dino_device *DINO_DEV(struct pci_hba_data *hba)
+ return container_of(hba, struct dino_device, hba);
+ }
+
++/* Check if PCI device is behind a Card-mode Dino. */
++static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
++{
++ struct dino_device *dino_dev;
++
++ dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
++ return is_card_dino(&dino_dev->hba.dev->id);
++}
++
+ /*
+ * Dino Configuration Space Accessor Functions
+ */
+@@ -437,6 +446,21 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
+
++#ifdef CONFIG_TULIP
++static void pci_fixup_tulip(struct pci_dev *dev)
++{
++ if (!pci_dev_is_behind_card_dino(dev))
++ return;
++ if (!(pci_resource_flags(dev, 1) & IORESOURCE_MEM))
++ return;
++ pr_warn("%s: HP HSC-PCI Cards with card-mode Dino not yet supported.\n",
++ pci_name(dev));
++ /* Disable this card by zeroing the PCI resources */
++ memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
++ memset(&dev->resource[1], 0, sizeof(dev->resource[1]));
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_DEC, PCI_ANY_ID, pci_fixup_tulip);
++#endif /* CONFIG_TULIP */
+
+ static void __init
+ dino_bios_init(void)
+diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
+index 5d3fb2abad1d..bec19d4814ab 100644
+--- a/drivers/platform/chrome/cros_ec_rpmsg.c
++++ b/drivers/platform/chrome/cros_ec_rpmsg.c
+@@ -41,6 +41,7 @@ struct cros_ec_rpmsg {
+ struct rpmsg_device *rpdev;
+ struct completion xfer_ack;
+ struct work_struct host_event_work;
++ struct rpmsg_endpoint *ept;
+ };
+
+ /**
+@@ -72,7 +73,6 @@ static int cros_ec_pkt_xfer_rpmsg(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+ {
+ struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
+- struct rpmsg_device *rpdev = ec_rpmsg->rpdev;
+ struct ec_host_response *response;
+ unsigned long timeout;
+ int len;
+@@ -85,7 +85,7 @@ static int cros_ec_pkt_xfer_rpmsg(struct cros_ec_device *ec_dev,
+ dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
+
+ reinit_completion(&ec_rpmsg->xfer_ack);
+- ret = rpmsg_send(rpdev->ept, ec_dev->dout, len);
++ ret = rpmsg_send(ec_rpmsg->ept, ec_dev->dout, len);
+ if (ret) {
+ dev_err(ec_dev->dev, "rpmsg send failed\n");
+ return ret;
+@@ -196,11 +196,24 @@ static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
+ return 0;
+ }
+
++static struct rpmsg_endpoint *
++cros_ec_rpmsg_create_ept(struct rpmsg_device *rpdev)
++{
++ struct rpmsg_channel_info chinfo = {};
++
++ strscpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
++ chinfo.src = rpdev->src;
++ chinfo.dst = RPMSG_ADDR_ANY;
++
++ return rpmsg_create_ept(rpdev, cros_ec_rpmsg_callback, NULL, chinfo);
++}
++
+ static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
+ {
+ struct device *dev = &rpdev->dev;
+ struct cros_ec_rpmsg *ec_rpmsg;
+ struct cros_ec_device *ec_dev;
++ int ret;
+
+ ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ if (!ec_dev)
+@@ -225,7 +238,18 @@ static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
+ INIT_WORK(&ec_rpmsg->host_event_work,
+ cros_ec_rpmsg_host_event_function);
+
+- return cros_ec_register(ec_dev);
++ ec_rpmsg->ept = cros_ec_rpmsg_create_ept(rpdev);
++ if (!ec_rpmsg->ept)
++ return -ENOMEM;
++
++ ret = cros_ec_register(ec_dev);
++ if (ret < 0) {
++ rpmsg_destroy_ept(ec_rpmsg->ept);
++ cancel_work_sync(&ec_rpmsg->host_event_work);
++ return ret;
++ }
++
++ return 0;
+ }
+
+ static void cros_ec_rpmsg_remove(struct rpmsg_device *rpdev)
+@@ -233,6 +257,7 @@ static void cros_ec_rpmsg_remove(struct rpmsg_device *rpdev)
+ struct cros_ec_device *ec_dev = dev_get_drvdata(&rpdev->dev);
+ struct cros_ec_rpmsg *ec_rpmsg = ec_dev->priv;
+
++ rpmsg_destroy_ept(ec_rpmsg->ept);
+ cancel_work_sync(&ec_rpmsg->host_event_work);
+ }
+
+@@ -249,7 +274,6 @@ static struct rpmsg_driver cros_ec_driver_rpmsg = {
+ },
+ .probe = cros_ec_rpmsg_probe,
+ .remove = cros_ec_rpmsg_remove,
+- .callback = cros_ec_rpmsg_callback,
+ };
+
+ module_rpmsg_driver(cros_ec_driver_rpmsg);
+diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
+index 1694a9aec77c..9b7af885e314 100644
+--- a/drivers/platform/x86/intel_int0002_vgpio.c
++++ b/drivers/platform/x86/intel_int0002_vgpio.c
+@@ -155,6 +155,7 @@ static struct irq_chip int0002_cht_irqchip = {
+ * No set_wake, on CHT the IRQ is typically shared with the ACPI SCI
+ * and we don't want to mess with the ACPI SCI irq settings.
+ */
++ .flags = IRQCHIP_SKIP_SET_WAKE,
+ };
+
+ static int int0002_probe(struct platform_device *pdev)
+diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
+index be6cda89dcf5..01a530e2f801 100644
+--- a/drivers/platform/x86/intel_pmc_core.c
++++ b/drivers/platform/x86/intel_pmc_core.c
+@@ -882,10 +882,14 @@ static int pmc_core_probe(struct platform_device *pdev)
+ if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
+ pmcdev->map = &cnp_reg_map;
+
+- if (lpit_read_residency_count_address(&slp_s0_addr))
++ if (lpit_read_residency_count_address(&slp_s0_addr)) {
+ pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT;
+- else
++
++ if (page_is_ram(PHYS_PFN(pmcdev->base_addr)))
++ return -ENODEV;
++ } else {
+ pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset;
++ }
+
+ pmcdev->regbase = ioremap(pmcdev->base_addr,
+ pmcdev->map->regmap_length);
+diff --git a/drivers/ras/Makefile b/drivers/ras/Makefile
+index ef6777e14d3d..6f0404f50107 100644
+--- a/drivers/ras/Makefile
++++ b/drivers/ras/Makefile
+@@ -1,3 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+-obj-$(CONFIG_RAS) += ras.o debugfs.o
++obj-$(CONFIG_RAS) += ras.o
++obj-$(CONFIG_DEBUG_FS) += debugfs.o
+ obj-$(CONFIG_RAS_CEC) += cec.o
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index c894cf0d8a28..81bd93eef21f 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -5486,7 +5486,7 @@ static int __init regulator_init(void)
+ /* init early to allow our consumers to complete system booting */
+ core_initcall(regulator_init);
+
+-static int __init regulator_late_cleanup(struct device *dev, void *data)
++static int regulator_late_cleanup(struct device *dev, void *data)
+ {
+ struct regulator_dev *rdev = dev_to_rdev(dev);
+ const struct regulator_ops *ops = rdev->desc->ops;
+@@ -5535,17 +5535,8 @@ unlock:
+ return 0;
+ }
+
+-static int __init regulator_init_complete(void)
++static void regulator_init_complete_work_function(struct work_struct *work)
+ {
+- /*
+- * Since DT doesn't provide an idiomatic mechanism for
+- * enabling full constraints and since it's much more natural
+- * with DT to provide them just assume that a DT enabled
+- * system has full constraints.
+- */
+- if (of_have_populated_dt())
+- has_full_constraints = true;
+-
+ /*
+ * Regulators may had failed to resolve their input supplies
+ * when were registered, either because the input supply was
+@@ -5563,6 +5554,35 @@ static int __init regulator_init_complete(void)
+ */
+ class_for_each_device(®ulator_class, NULL, NULL,
+ regulator_late_cleanup);
++}
++
++static DECLARE_DELAYED_WORK(regulator_init_complete_work,
++ regulator_init_complete_work_function);
++
++static int __init regulator_init_complete(void)
++{
++ /*
++ * Since DT doesn't provide an idiomatic mechanism for
++ * enabling full constraints and since it's much more natural
++ * with DT to provide them just assume that a DT enabled
++ * system has full constraints.
++ */
++ if (of_have_populated_dt())
++ has_full_constraints = true;
++
++ /*
++ * We punt completion for an arbitrary amount of time since
++ * systems like distros will load many drivers from userspace
++ * so consumers might not always be ready yet, this is
++ * particularly an issue with laptops where this might bounce
++ * the display off then on. Ideally we'd get a notification
++ * from userspace when this happens but we don't so just wait
++ * a bit and hope we waited long enough. It'd be better if
++ * we'd only do this on systems that need it, and a kernel
++ * command line option might be useful.
++ */
++ schedule_delayed_work(®ulator_init_complete_work,
++ msecs_to_jiffies(30000));
+
+ return 0;
+ }
+diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
+index 60f15a722760..7e2ea8c76f6e 100644
+--- a/drivers/regulator/lm363x-regulator.c
++++ b/drivers/regulator/lm363x-regulator.c
+@@ -30,7 +30,7 @@
+
+ /* LM3632 */
+ #define LM3632_BOOST_VSEL_MAX 0x26
+-#define LM3632_LDO_VSEL_MAX 0x29
++#define LM3632_LDO_VSEL_MAX 0x28
+ #define LM3632_VBOOST_MIN 4500000
+ #define LM3632_VLDO_MIN 4000000
+
+diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
+index 65f1fe343c64..5efc959493ec 100644
+--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
++++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
+@@ -546,6 +546,8 @@ static void send_mode_select(struct work_struct *work)
+ spin_unlock(&ctlr->ms_lock);
+
+ retry:
++ memset(cdb, 0, sizeof(cdb));
++
+ data_size = rdac_failover_get(ctlr, &list, cdb);
+
+ RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 6a4c719497ca..a51a6301f11b 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -289,8 +289,13 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
+ struct srb_iocb *lio;
+ int rval = QLA_FUNCTION_FAILED;
+
+- if (!vha->flags.online)
+- goto done;
++ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
++ fcport->loop_id == FC_NO_LOOP_ID) {
++ ql_log(ql_log_warn, vha, 0xffff,
++ "%s: %8phC - not sending command.\n",
++ __func__, fcport->port_name);
++ return rval;
++ }
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+@@ -1262,8 +1267,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
+ struct port_database_24xx *pd;
+ struct qla_hw_data *ha = vha->hw;
+
+- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
++ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
++ fcport->loop_id == FC_NO_LOOP_ID) {
++ ql_log(ql_log_warn, vha, 0xffff,
++ "%s: %8phC - not sending command.\n",
++ __func__, fcport->port_name);
+ return rval;
++ }
+
+ fcport->disc_state = DSC_GPDB;
+
+@@ -1953,8 +1963,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
+ return;
+ }
+
+- if (fcport->disc_state == DSC_DELETE_PEND)
++ if ((fcport->disc_state == DSC_DELETE_PEND) ||
++ (fcport->disc_state == DSC_DELETED)) {
++ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return;
++ }
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+@@ -6699,8 +6712,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
+ }
+
+ /* Clear all async request states across all VPs. */
+- list_for_each_entry(fcport, &vha->vp_fcports, list)
++ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
++ fcport->scan_state = 0;
++ }
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 794478e5f7ec..b5ef1148eea8 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -5087,6 +5087,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
+ if (fcport) {
+ fcport->id_changed = 1;
+ fcport->scan_state = QLA_FCPORT_FOUND;
++ fcport->chip_reset = vha->hw->base_qpair->chip_reset;
+ memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
+
+ if (pla) {
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 2fd5c09b42d4..e614415e0d72 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1209,7 +1209,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
+ sess->logout_on_delete = 0;
+ sess->logo_ack_needed = 0;
+ sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
+- sess->scan_state = 0;
+ }
+ }
+
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 40f392569664..506062081d8e 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1073,6 +1073,18 @@ static void scsi_initialize_rq(struct request *rq)
+ cmd->retries = 0;
+ }
+
++/*
++ * Only called when the request isn't completed by SCSI, and not freed by
++ * SCSI
++ */
++static void scsi_cleanup_rq(struct request *rq)
++{
++ if (rq->rq_flags & RQF_DONTPREP) {
++ scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
++ rq->rq_flags &= ~RQF_DONTPREP;
++ }
++}
++
+ /* Add a command to the list used by the aacraid and dpt_i2o drivers */
+ void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
+ {
+@@ -1800,6 +1812,7 @@ static const struct blk_mq_ops scsi_mq_ops = {
+ .init_request = scsi_mq_init_request,
+ .exit_request = scsi_mq_exit_request,
+ .initialize_rq_fn = scsi_initialize_rq,
++ .cleanup_rq = scsi_cleanup_rq,
+ .busy = scsi_mq_lld_busy,
+ .map_queues = scsi_map_queues,
+ };
+diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
+index 19d4cbc93a17..c470e24f1dfa 100644
+--- a/drivers/soc/amlogic/meson-clk-measure.c
++++ b/drivers/soc/amlogic/meson-clk-measure.c
+@@ -11,6 +11,8 @@
+ #include <linux/debugfs.h>
+ #include <linux/regmap.h>
+
++static DEFINE_MUTEX(measure_lock);
++
+ #define MSR_CLK_DUTY 0x0
+ #define MSR_CLK_REG0 0x4
+ #define MSR_CLK_REG1 0x8
+@@ -360,6 +362,10 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
+ unsigned int val;
+ int ret;
+
++ ret = mutex_lock_interruptible(&measure_lock);
++ if (ret)
++ return ret;
++
+ regmap_write(priv->regmap, MSR_CLK_REG0, 0);
+
+ /* Set measurement duration */
+@@ -377,8 +383,10 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
+
+ ret = regmap_read_poll_timeout(priv->regmap, MSR_CLK_REG0,
+ val, !(val & MSR_BUSY), 10, 10000);
+- if (ret)
++ if (ret) {
++ mutex_unlock(&measure_lock);
+ return ret;
++ }
+
+ /* Disable */
+ regmap_update_bits(priv->regmap, MSR_CLK_REG0, MSR_ENABLE, 0);
+@@ -386,6 +394,8 @@ static int meson_measure_id(struct meson_msr_id *clk_msr_id,
+ /* Get the value in multiple of gate time counts */
+ regmap_read(priv->regmap, MSR_CLK_REG2, &val);
+
++ mutex_unlock(&measure_lock);
++
+ if (val >= MSR_VAL_MASK)
+ return -EINVAL;
+
+diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
+index 68bfca6f20dd..2040caa6c808 100644
+--- a/drivers/soc/renesas/Kconfig
++++ b/drivers/soc/renesas/Kconfig
+@@ -55,6 +55,7 @@ config ARCH_EMEV2
+
+ config ARCH_R7S72100
+ bool "RZ/A1H (R7S72100)"
++ select ARM_ERRATA_754322
+ select PM
+ select PM_GENERIC_DOMAINS
+ select SYS_SUPPORTS_SH_MTU2
+@@ -76,6 +77,7 @@ config ARCH_R8A73A4
+ config ARCH_R8A7740
+ bool "R-Mobile A1 (R8A77400)"
+ select ARCH_RMOBILE
++ select ARM_ERRATA_754322
+ select RENESAS_INTC_IRQPIN
+
+ config ARCH_R8A7743
+@@ -103,10 +105,12 @@ config ARCH_R8A77470
+ config ARCH_R8A7778
+ bool "R-Car M1A (R8A77781)"
+ select ARCH_RCAR_GEN1
++ select ARM_ERRATA_754322
+
+ config ARCH_R8A7779
+ bool "R-Car H1 (R8A77790)"
+ select ARCH_RCAR_GEN1
++ select ARM_ERRATA_754322
+ select HAVE_ARM_SCU if SMP
+ select HAVE_ARM_TWD if SMP
+ select SYSC_R8A7779
+@@ -150,6 +154,7 @@ config ARCH_R9A06G032
+ config ARCH_SH73A0
+ bool "SH-Mobile AG5 (R8A73A00)"
+ select ARCH_RMOBILE
++ select ARM_ERRATA_754322
+ select HAVE_ARM_SCU if SMP
+ select HAVE_ARM_TWD if SMP
+ select RENESAS_INTC_IRQPIN
+diff --git a/drivers/soc/renesas/rmobile-sysc.c b/drivers/soc/renesas/rmobile-sysc.c
+index 421ae1c887d8..54b616ad4a62 100644
+--- a/drivers/soc/renesas/rmobile-sysc.c
++++ b/drivers/soc/renesas/rmobile-sysc.c
+@@ -48,12 +48,8 @@ struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d)
+ static int rmobile_pd_power_down(struct generic_pm_domain *genpd)
+ {
+ struct rmobile_pm_domain *rmobile_pd = to_rmobile_pd(genpd);
+- unsigned int mask;
++ unsigned int mask = BIT(rmobile_pd->bit_shift);
+
+- if (rmobile_pd->bit_shift == ~0)
+- return -EBUSY;
+-
+- mask = BIT(rmobile_pd->bit_shift);
+ if (rmobile_pd->suspend) {
+ int ret = rmobile_pd->suspend();
+
+@@ -80,14 +76,10 @@ static int rmobile_pd_power_down(struct generic_pm_domain *genpd)
+
+ static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd)
+ {
+- unsigned int mask;
++ unsigned int mask = BIT(rmobile_pd->bit_shift);
+ unsigned int retry_count;
+ int ret = 0;
+
+- if (rmobile_pd->bit_shift == ~0)
+- return 0;
+-
+- mask = BIT(rmobile_pd->bit_shift);
+ if (__raw_readl(rmobile_pd->base + PSTR) & mask)
+ return ret;
+
+@@ -122,11 +114,15 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
+ struct dev_power_governor *gov = rmobile_pd->gov;
+
+ genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+- genpd->power_off = rmobile_pd_power_down;
+- genpd->power_on = rmobile_pd_power_up;
+- genpd->attach_dev = cpg_mstp_attach_dev;
+- genpd->detach_dev = cpg_mstp_detach_dev;
+- __rmobile_pd_power_up(rmobile_pd);
++ genpd->attach_dev = cpg_mstp_attach_dev;
++ genpd->detach_dev = cpg_mstp_detach_dev;
++
++ if (!(genpd->flags & GENPD_FLAG_ALWAYS_ON)) {
++ genpd->power_off = rmobile_pd_power_down;
++ genpd->power_on = rmobile_pd_power_up;
++ __rmobile_pd_power_up(rmobile_pd);
++ }
++
+ pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
+ }
+
+@@ -270,6 +266,11 @@ static void __init rmobile_setup_pm_domain(struct device_node *np,
+ break;
+
+ case PD_NORMAL:
++ if (pd->bit_shift == ~0) {
++ /* Top-level always-on domain */
++ pr_debug("PM domain %s is always-on domain\n", name);
++ pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
++ }
+ break;
+ }
+
+diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
+index 18c06568805e..86789dbaf577 100644
+--- a/drivers/spi/spi-dw-mmio.c
++++ b/drivers/spi/spi-dw-mmio.c
+@@ -172,8 +172,10 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
+
+ /* Optional clock needed to access the registers */
+ dwsmmio->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
+- if (IS_ERR(dwsmmio->pclk))
+- return PTR_ERR(dwsmmio->pclk);
++ if (IS_ERR(dwsmmio->pclk)) {
++ ret = PTR_ERR(dwsmmio->pclk);
++ goto out_clk;
++ }
+ ret = clk_prepare_enable(dwsmmio->pclk);
+ if (ret)
+ goto out_clk;
+diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
+index 53335ccc98f6..545fc8189fb0 100644
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -886,9 +886,11 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
+ trans_mode);
+ }
+ }
++
++ return IRQ_HANDLED;
+ }
+
+- return IRQ_HANDLED;
++ return IRQ_NONE;
+ }
+
+ static const struct of_device_id fsl_dspi_dt_ids[] = {
+diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
+index f29e28df36ed..bfa4b254c4e4 100644
+--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
++++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
+@@ -243,7 +243,7 @@ static int __maybe_unused csi2_dphy_wait_ulp(struct csi2_dev *csi2)
+ }
+
+ /* Waits for low-power LP-11 state on data and clock lanes. */
+-static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
++static void csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
+ {
+ u32 mask, reg;
+ int ret;
+@@ -254,11 +254,9 @@ static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
+ ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
+ (reg & mask) == mask, 0, 500000);
+ if (ret) {
+- v4l2_err(&csi2->sd, "LP-11 timeout, phy_state = 0x%08x\n", reg);
+- return ret;
++ v4l2_warn(&csi2->sd, "LP-11 wait timeout, likely a sensor driver bug, expect capture failures.\n");
++ v4l2_warn(&csi2->sd, "phy_state = 0x%08x\n", reg);
+ }
+-
+- return 0;
+ }
+
+ /* Wait for active clock on the clock lane. */
+@@ -316,9 +314,7 @@ static int csi2_start(struct csi2_dev *csi2)
+ csi2_enable(csi2, true);
+
+ /* Step 5 */
+- ret = csi2_dphy_wait_stopstate(csi2);
+- if (ret)
+- goto err_assert_reset;
++ csi2_dphy_wait_stopstate(csi2);
+
+ /* Step 6 */
+ ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1);
+diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
+index 9f39f0c360e0..cc1006375cac 100644
+--- a/drivers/video/fbdev/efifb.c
++++ b/drivers/video/fbdev/efifb.c
+@@ -122,28 +122,13 @@ static void efifb_copy_bmp(u8 *src, u32 *dst, int width, struct screen_info *si)
+ */
+ static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
+ {
+- static const int default_resolutions[][2] = {
+- { 800, 600 },
+- { 1024, 768 },
+- { 1280, 1024 },
+- };
+- u32 i, right_margin;
+-
+- for (i = 0; i < ARRAY_SIZE(default_resolutions); i++) {
+- if (default_resolutions[i][0] == si->lfb_width &&
+- default_resolutions[i][1] == si->lfb_height)
+- break;
+- }
+- /* If not a default resolution used for textmode, this should be fine */
+- if (i >= ARRAY_SIZE(default_resolutions))
+- return true;
+-
+- /* If the right margin is 5 times smaller then the left one, reject */
+- right_margin = si->lfb_width - (bgrt_tab.image_offset_x + bmp_width);
+- if (right_margin < (bgrt_tab.image_offset_x / 5))
+- return false;
++ /*
++ * All x86 firmwares horizontally center the image (the yoffset
++ * calculations differ between boards, but xoffset is predictable).
++ */
++ u32 expected_xoffset = (si->lfb_width - bmp_width) / 2;
+
+- return true;
++ return bgrt_tab.image_offset_x == expected_xoffset;
+ }
+ #else
+ static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 8264b468f283..36d172ccb085 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -1142,7 +1142,8 @@ out_free_interp:
+ * (since it grows up, and may collide early with the stack
+ * growing down), and into the unused ELF_ET_DYN_BASE region.
+ */
+- if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && !interpreter)
++ if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
++ loc->elf_ex.e_type == ET_DYN && !interpreter)
+ current->mm->brk = current->mm->start_brk =
+ ELF_ET_DYN_BASE;
+
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 5df76c17775a..322ec4b839ed 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -1343,6 +1343,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
+ struct tree_mod_elem *tm;
+ struct extent_buffer *eb = NULL;
+ struct extent_buffer *eb_root;
++ u64 eb_root_owner = 0;
+ struct extent_buffer *old;
+ struct tree_mod_root *old_root = NULL;
+ u64 old_generation = 0;
+@@ -1380,6 +1381,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
+ free_extent_buffer(old);
+ }
+ } else if (old_root) {
++ eb_root_owner = btrfs_header_owner(eb_root);
+ btrfs_tree_read_unlock(eb_root);
+ free_extent_buffer(eb_root);
+ eb = alloc_dummy_extent_buffer(fs_info, logical);
+@@ -1396,7 +1398,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
+ if (old_root) {
+ btrfs_set_header_bytenr(eb, eb->start);
+ btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
+- btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
++ btrfs_set_header_owner(eb, eb_root_owner);
+ btrfs_set_header_level(eb, old_root->level);
+ btrfs_set_header_generation(eb, old_generation);
+ }
+@@ -5475,6 +5477,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
+ advance_left = advance_right = 0;
+
+ while (1) {
++ cond_resched();
+ if (advance_left && !left_end_reached) {
+ ret = tree_advance(left_path, &left_level,
+ left_root_level,
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 0a61dff27f57..ae784d7bf5f3 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -40,6 +40,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep;
+ extern struct kmem_cache *btrfs_bit_radix_cachep;
+ extern struct kmem_cache *btrfs_path_cachep;
+ extern struct kmem_cache *btrfs_free_space_cachep;
++extern struct kmem_cache *btrfs_free_space_bitmap_cachep;
+ struct btrfs_ordered_sum;
+ struct btrfs_ref;
+
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 43fdb2992956..6858a05606dd 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -474,6 +474,9 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
+ struct rb_root_cached *root;
+ struct btrfs_delayed_root *delayed_root;
+
++ /* Not associated with any delayed_node */
++ if (!delayed_item->delayed_node)
++ return;
+ delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
+
+ BUG_ON(!delayed_root);
+@@ -1525,7 +1528,12 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
+ * we have reserved enough space when we start a new transaction,
+ * so reserving metadata failure is impossible.
+ */
+- BUG_ON(ret);
++ if (ret < 0) {
++ btrfs_err(trans->fs_info,
++"metadata reservation failed for delayed dir item deltiona, should have been reserved");
++ btrfs_release_delayed_item(item);
++ goto end;
++ }
+
+ mutex_lock(&node->mutex);
+ ret = __btrfs_add_delayed_deletion_item(node, item);
+@@ -1534,7 +1542,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
+ "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
+ index, node->root->root_key.objectid,
+ node->inode_id, ret);
+- BUG();
++ btrfs_delayed_item_release_metadata(dir->root, item);
++ btrfs_release_delayed_item(item);
+ }
+ mutex_unlock(&node->mutex);
+ end:
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index deb74a8c191a..6d26d38a2e12 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -423,6 +423,16 @@ int btrfs_verify_level_key(struct extent_buffer *eb, int level,
+ */
+ if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
+ return 0;
++
++ /* We have @first_key, so this @eb must have at least one item */
++ if (btrfs_header_nritems(eb) == 0) {
++ btrfs_err(fs_info,
++ "invalid tree nritems, bytenr=%llu nritems=0 expect >0",
++ eb->start);
++ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
++ return -EUCLEAN;
++ }
++
+ if (found_level)
+ btrfs_node_key_to_cpu(eb, &found_key, 0);
+ else
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index b8f472087902..37865929fdc2 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -7875,6 +7875,14 @@ search:
+ */
+ if ((flags & extra) && !(block_group->flags & extra))
+ goto loop;
++
++ /*
++ * This block group has different flags than we want.
++ * It's possible that we have MIXED_GROUP flag but no
++ * block group is mixed. Just skip such block group.
++ */
++ btrfs_release_block_group(block_group, delalloc);
++ continue;
+ }
+
+ have_block_group:
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 93900ff87df7..0d16ed19e84b 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3708,11 +3708,20 @@ err_unlock:
+ static void set_btree_ioerr(struct page *page)
+ {
+ struct extent_buffer *eb = (struct extent_buffer *)page->private;
++ struct btrfs_fs_info *fs_info;
+
+ SetPageError(page);
+ if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
+ return;
+
++ /*
++ * If we error out, we should add back the dirty_metadata_bytes
++ * to make it consistent.
++ */
++ fs_info = eb->fs_info;
++ percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
++ eb->len, fs_info->dirty_metadata_batch);
++
+ /*
+ * If writeback for a btree extent that doesn't belong to a log tree
+ * failed, increment the counter transaction->eb_write_errors.
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index f74dc259307b..232546190656 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -764,7 +764,8 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ } else {
+ ASSERT(num_bitmaps);
+ num_bitmaps--;
+- e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
++ e->bitmap = kmem_cache_zalloc(
++ btrfs_free_space_bitmap_cachep, GFP_NOFS);
+ if (!e->bitmap) {
+ kmem_cache_free(
+ btrfs_free_space_cachep, e);
+@@ -1881,7 +1882,7 @@ static void free_bitmap(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *bitmap_info)
+ {
+ unlink_free_space(ctl, bitmap_info);
+- kfree(bitmap_info->bitmap);
++ kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
+ kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
+ ctl->total_bitmaps--;
+ ctl->op->recalc_thresholds(ctl);
+@@ -2135,7 +2136,8 @@ new_bitmap:
+ }
+
+ /* allocate the bitmap */
+- info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
++ info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
++ GFP_NOFS);
+ spin_lock(&ctl->tree_lock);
+ if (!info->bitmap) {
+ ret = -ENOMEM;
+@@ -2146,7 +2148,9 @@ new_bitmap:
+
+ out:
+ if (info) {
+- kfree(info->bitmap);
++ if (info->bitmap)
++ kmem_cache_free(btrfs_free_space_bitmap_cachep,
++ info->bitmap);
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ }
+
+@@ -2802,7 +2806,8 @@ out:
+ if (entry->bytes == 0) {
+ ctl->free_extents--;
+ if (entry->bitmap) {
+- kfree(entry->bitmap);
++ kmem_cache_free(btrfs_free_space_bitmap_cachep,
++ entry->bitmap);
+ ctl->total_bitmaps--;
+ ctl->op->recalc_thresholds(ctl);
+ }
+@@ -3606,7 +3611,7 @@ again:
+ }
+
+ if (!map) {
+- map = kzalloc(PAGE_SIZE, GFP_NOFS);
++ map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
+ if (!map) {
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ return -ENOMEM;
+@@ -3635,7 +3640,8 @@ again:
+
+ if (info)
+ kmem_cache_free(btrfs_free_space_cachep, info);
+- kfree(map);
++ if (map)
++ kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
+ return 0;
+ }
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 8c9c7d76c900..4bdc43e1e53c 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -73,6 +73,7 @@ static struct kmem_cache *btrfs_inode_cachep;
+ struct kmem_cache *btrfs_trans_handle_cachep;
+ struct kmem_cache *btrfs_path_cachep;
+ struct kmem_cache *btrfs_free_space_cachep;
++struct kmem_cache *btrfs_free_space_bitmap_cachep;
+
+ static int btrfs_setsize(struct inode *inode, struct iattr *attr);
+ static int btrfs_truncate(struct inode *inode, bool skip_writeback);
+@@ -9361,6 +9362,7 @@ void __cold btrfs_destroy_cachep(void)
+ kmem_cache_destroy(btrfs_trans_handle_cachep);
+ kmem_cache_destroy(btrfs_path_cachep);
+ kmem_cache_destroy(btrfs_free_space_cachep);
++ kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
+ }
+
+ int __init btrfs_init_cachep(void)
+@@ -9390,6 +9392,12 @@ int __init btrfs_init_cachep(void)
+ if (!btrfs_free_space_cachep)
+ goto fail;
+
++ btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
++ PAGE_SIZE, PAGE_SIZE,
++ SLAB_RED_ZONE, NULL);
++ if (!btrfs_free_space_bitmap_cachep)
++ goto fail;
++
+ return 0;
+ fail:
+ btrfs_destroy_cachep();
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index f8a3c1b0a15a..001efc9ba1e7 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -3154,9 +3154,6 @@ out:
+ btrfs_free_path(path);
+
+ mutex_lock(&fs_info->qgroup_rescan_lock);
+- if (!btrfs_fs_closing(fs_info))
+- fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
+-
+ if (err > 0 &&
+ fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
+ fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+@@ -3172,16 +3169,30 @@ out:
+ trans = btrfs_start_transaction(fs_info->quota_root, 1);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
++ trans = NULL;
+ btrfs_err(fs_info,
+ "fail to start transaction for status update: %d",
+ err);
+- goto done;
+ }
+- ret = update_qgroup_status_item(trans);
+- if (ret < 0) {
+- err = ret;
+- btrfs_err(fs_info, "fail to update qgroup status: %d", err);
++
++ mutex_lock(&fs_info->qgroup_rescan_lock);
++ if (!btrfs_fs_closing(fs_info))
++ fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
++ if (trans) {
++ ret = update_qgroup_status_item(trans);
++ if (ret < 0) {
++ err = ret;
++ btrfs_err(fs_info, "fail to update qgroup status: %d",
++ err);
++ }
+ }
++ fs_info->qgroup_rescan_running = false;
++ complete_all(&fs_info->qgroup_rescan_completion);
++ mutex_unlock(&fs_info->qgroup_rescan_lock);
++
++ if (!trans)
++ return;
++
+ btrfs_end_transaction(trans);
+
+ if (btrfs_fs_closing(fs_info)) {
+@@ -3192,12 +3203,6 @@ out:
+ } else {
+ btrfs_err(fs_info, "qgroup scan failed with %d", err);
+ }
+-
+-done:
+- mutex_lock(&fs_info->qgroup_rescan_lock);
+- fs_info->qgroup_rescan_running = false;
+- mutex_unlock(&fs_info->qgroup_rescan_lock);
+- complete_all(&fs_info->qgroup_rescan_completion);
+ }
+
+ /*
+@@ -3425,6 +3430,9 @@ cleanup:
+ while ((unode = ulist_next(&reserved->range_changed, &uiter)))
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
+ unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
++ /* Also free data bytes of already reserved one */
++ btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid,
++ orig_reserved, BTRFS_QGROUP_RSV_DATA);
+ extent_changeset_release(reserved);
+ return ret;
+ }
+@@ -3469,7 +3477,7 @@ static int qgroup_free_reserved_data(struct inode *inode,
+ * EXTENT_QGROUP_RESERVED, we won't double free.
+ * So not need to rush.
+ */
+- ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree,
++ ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree,
+ free_start, free_start + free_len - 1,
+ EXTENT_QGROUP_RESERVED, &changeset);
+ if (ret < 0)
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index ccd5706199d7..9634cae1e1b1 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -821,6 +821,95 @@ static int check_inode_item(struct extent_buffer *leaf,
+ return 0;
+ }
+
++static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
++ int slot)
++{
++ struct btrfs_fs_info *fs_info = leaf->fs_info;
++ struct btrfs_root_item ri;
++ const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY |
++ BTRFS_ROOT_SUBVOL_DEAD;
++
++ /* No such tree id */
++ if (key->objectid == 0) {
++ generic_err(leaf, slot, "invalid root id 0");
++ return -EUCLEAN;
++ }
++
++ /*
++ * Some older kernel may create ROOT_ITEM with non-zero offset, so here
++ * we only check offset for reloc tree whose key->offset must be a
++ * valid tree.
++ */
++ if (key->objectid == BTRFS_TREE_RELOC_OBJECTID && key->offset == 0) {
++ generic_err(leaf, slot, "invalid root id 0 for reloc tree");
++ return -EUCLEAN;
++ }
++
++ if (btrfs_item_size_nr(leaf, slot) != sizeof(ri)) {
++ generic_err(leaf, slot,
++ "invalid root item size, have %u expect %zu",
++ btrfs_item_size_nr(leaf, slot), sizeof(ri));
++ }
++
++ read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot),
++ sizeof(ri));
++
++ /* Generation related */
++ if (btrfs_root_generation(&ri) >
++ btrfs_super_generation(fs_info->super_copy) + 1) {
++ generic_err(leaf, slot,
++ "invalid root generation, have %llu expect (0, %llu]",
++ btrfs_root_generation(&ri),
++ btrfs_super_generation(fs_info->super_copy) + 1);
++ return -EUCLEAN;
++ }
++ if (btrfs_root_generation_v2(&ri) >
++ btrfs_super_generation(fs_info->super_copy) + 1) {
++ generic_err(leaf, slot,
++ "invalid root v2 generation, have %llu expect (0, %llu]",
++ btrfs_root_generation_v2(&ri),
++ btrfs_super_generation(fs_info->super_copy) + 1);
++ return -EUCLEAN;
++ }
++ if (btrfs_root_last_snapshot(&ri) >
++ btrfs_super_generation(fs_info->super_copy) + 1) {
++ generic_err(leaf, slot,
++ "invalid root last_snapshot, have %llu expect (0, %llu]",
++ btrfs_root_last_snapshot(&ri),
++ btrfs_super_generation(fs_info->super_copy) + 1);
++ return -EUCLEAN;
++ }
++
++ /* Alignment and level check */
++ if (!IS_ALIGNED(btrfs_root_bytenr(&ri), fs_info->sectorsize)) {
++ generic_err(leaf, slot,
++ "invalid root bytenr, have %llu expect to be aligned to %u",
++ btrfs_root_bytenr(&ri), fs_info->sectorsize);
++ return -EUCLEAN;
++ }
++ if (btrfs_root_level(&ri) >= BTRFS_MAX_LEVEL) {
++ generic_err(leaf, slot,
++ "invalid root level, have %u expect [0, %u]",
++ btrfs_root_level(&ri), BTRFS_MAX_LEVEL - 1);
++ return -EUCLEAN;
++ }
++ if (ri.drop_level >= BTRFS_MAX_LEVEL) {
++ generic_err(leaf, slot,
++ "invalid root level, have %u expect [0, %u]",
++ ri.drop_level, BTRFS_MAX_LEVEL - 1);
++ return -EUCLEAN;
++ }
++
++ /* Flags check */
++ if (btrfs_root_flags(&ri) & ~valid_root_flags) {
++ generic_err(leaf, slot,
++ "invalid root flags, have 0x%llx expect mask 0x%llx",
++ btrfs_root_flags(&ri), valid_root_flags);
++ return -EUCLEAN;
++ }
++ return 0;
++}
++
+ /*
+ * Common point to switch the item-specific validation.
+ */
+@@ -856,6 +945,9 @@ static int check_leaf_item(struct extent_buffer *leaf,
+ case BTRFS_INODE_ITEM_KEY:
+ ret = check_inode_item(leaf, key, slot);
+ break;
++ case BTRFS_ROOT_ITEM_KEY:
++ ret = check_root_item(leaf, key, slot);
++ break;
+ }
+ return ret;
+ }
+@@ -899,6 +991,12 @@ static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
+ owner);
+ return -EUCLEAN;
+ }
++ /* Unknown tree */
++ if (owner == 0) {
++ generic_err(leaf, 0,
++ "invalid owner, root 0 is not defined");
++ return -EUCLEAN;
++ }
+ return 0;
+ }
+
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 65d9771e49f9..bd34ea0d27e9 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -433,6 +433,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
+ cifs_show_security(s, tcon->ses);
+ cifs_show_cache_flavor(s, cifs_sb);
+
++ if (tcon->no_lease)
++ seq_puts(s, ",nolease");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
+ seq_puts(s, ",multiuser");
+ else if (tcon->ses->user_name)
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 4777b3c4a92c..85aa1bc930f1 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -575,6 +575,7 @@ struct smb_vol {
+ bool noblocksnd:1;
+ bool noautotune:1;
+ bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
++ bool no_lease:1; /* disable requesting leases */
+ bool fsc:1; /* enable fscache */
+ bool mfsymlinks:1; /* use Minshall+French Symlinks */
+ bool multiuser:1;
+@@ -1079,6 +1080,7 @@ struct cifs_tcon {
+ bool need_reopen_files:1; /* need to reopen tcon file handles */
+ bool use_resilient:1; /* use resilient instead of durable handles */
+ bool use_persistent:1; /* use persistent instead of durable handles */
++ bool no_lease:1; /* Do not request leases on files or directories */
+ __le32 capabilities;
+ __u32 share_flags;
+ __u32 maximal_access;
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 85b2107e8a3d..bd8c00635ea4 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -74,7 +74,7 @@ enum {
+ Opt_user_xattr, Opt_nouser_xattr,
+ Opt_forceuid, Opt_noforceuid,
+ Opt_forcegid, Opt_noforcegid,
+- Opt_noblocksend, Opt_noautotune,
++ Opt_noblocksend, Opt_noautotune, Opt_nolease,
+ Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
+ Opt_mapposix, Opt_nomapposix,
+ Opt_mapchars, Opt_nomapchars, Opt_sfu,
+@@ -133,6 +133,7 @@ static const match_table_t cifs_mount_option_tokens = {
+ { Opt_noforcegid, "noforcegid" },
+ { Opt_noblocksend, "noblocksend" },
+ { Opt_noautotune, "noautotune" },
++ { Opt_nolease, "nolease" },
+ { Opt_hard, "hard" },
+ { Opt_soft, "soft" },
+ { Opt_perm, "perm" },
+@@ -1709,6 +1710,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
+ case Opt_noautotune:
+ vol->noautotune = 1;
+ break;
++ case Opt_nolease:
++ vol->no_lease = 1;
++ break;
+ case Opt_hard:
+ vol->retry = 1;
+ break;
+@@ -3230,6 +3234,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
+ return 0;
+ if (tcon->handle_timeout != volume_info->handle_timeout)
+ return 0;
++ if (tcon->no_lease != volume_info->no_lease)
++ return 0;
+ return 1;
+ }
+
+@@ -3444,6 +3450,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
+ tcon->nocase = volume_info->nocase;
+ tcon->nohandlecache = volume_info->nohandlecache;
+ tcon->local_lease = volume_info->local_lease;
++ tcon->no_lease = volume_info->no_lease;
+ INIT_LIST_HEAD(&tcon->pending_opens);
+
+ spin_lock(&cifs_tcp_ses_lock);
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 8ae8ef526b4a..0011e6bdaa9a 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -743,6 +743,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
+ if (rc)
+ goto oshr_exit;
+
++ atomic_inc(&tcon->num_remote_opens);
++
+ o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
+ oparms.fid->persistent_fid = o_rsp->PersistentFileId;
+ oparms.fid->volatile_fid = o_rsp->VolatileFileId;
+@@ -1167,6 +1169,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+
+ rc = compound_send_recv(xid, ses, flags, 3, rqst,
+ resp_buftype, rsp_iov);
++ /* no need to bump num_remote_opens because handle immediately closed */
+
+ sea_exit:
+ kfree(ea);
+@@ -1488,6 +1491,8 @@ smb2_ioctl_query_info(const unsigned int xid,
+ resp_buftype, rsp_iov);
+ if (rc)
+ goto iqinf_exit;
++
++ /* No need to bump num_remote_opens since handle immediately closed */
+ if (qi.flags & PASSTHRU_FSCTL) {
+ pqi = (struct smb_query_info __user *)arg;
+ io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
+@@ -3249,6 +3254,11 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+ if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
+ return;
+
++ /* Check if the server granted an oplock rather than a lease */
++ if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
++ return smb2_set_oplock_level(cinode, oplock, epoch,
++ purge_cache);
++
+ if (oplock & SMB2_LEASE_READ_CACHING_HE) {
+ new_oplock |= CIFS_CACHE_READ_FLG;
+ strcat(message, "R");
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index c3c8de5513db..a221536db0de 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2263,6 +2263,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = n_iov;
+
++ /* no need to inc num_remote_opens because we close it just below */
+ trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
+ FILE_WRITE_ATTRIBUTES);
+ /* resource #4: response buffer */
+@@ -2370,7 +2371,7 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
+ iov[1].iov_len = uni_path_len;
+ iov[1].iov_base = path;
+
+- if (!server->oplocks)
++ if ((!server->oplocks) || (tcon->no_lease))
+ *oplock = SMB2_OPLOCK_LEVEL_NONE;
+
+ if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
+diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
+index 50ddb795aaeb..a2db401a58ed 100644
+--- a/fs/cifs/xattr.c
++++ b/fs/cifs/xattr.c
+@@ -31,7 +31,7 @@
+ #include "cifs_fs_sb.h"
+ #include "cifs_unicode.h"
+
+-#define MAX_EA_VALUE_SIZE 65535
++#define MAX_EA_VALUE_SIZE CIFSMaxBufSize
+ #define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
+ #define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */
+ #define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index d40ed940001e..7a72cde366f7 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -3813,8 +3813,8 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
+ * illegal.
+ */
+ if (ee_block != map->m_lblk || ee_len > map->m_len) {
+-#ifdef EXT4_DEBUG
+- ext4_warning("Inode (%ld) finished: extent logical block %llu,"
++#ifdef CONFIG_EXT4_DEBUG
++ ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
+ " len %u; IO logical block %llu, len %u",
+ inode->i_ino, (unsigned long long)ee_block, ee_len,
+ (unsigned long long)map->m_lblk, map->m_len);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 85c648289b57..a084e84610f8 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4288,6 +4288,15 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+
+ trace_ext4_punch_hole(inode, offset, length, 0);
+
++ ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
++ if (ext4_has_inline_data(inode)) {
++ down_write(&EXT4_I(inode)->i_mmap_sem);
++ ret = ext4_convert_inline_data(inode);
++ up_write(&EXT4_I(inode)->i_mmap_sem);
++ if (ret)
++ return ret;
++ }
++
+ /*
+ * Write out all dirty pages to avoid race conditions
+ * Then release them.
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index ea8237513dfa..186468fba82e 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -377,7 +377,7 @@ static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
+ req->in.h.len = sizeof(struct fuse_in_header) +
+ len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
+ list_add_tail(&req->list, &fiq->pending);
+- wake_up_locked(&fiq->waitq);
++ wake_up(&fiq->waitq);
+ kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ }
+
+@@ -389,16 +389,16 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
+ forget->forget_one.nodeid = nodeid;
+ forget->forget_one.nlookup = nlookup;
+
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ if (fiq->connected) {
+ fiq->forget_list_tail->next = forget;
+ fiq->forget_list_tail = forget;
+- wake_up_locked(&fiq->waitq);
++ wake_up(&fiq->waitq);
+ kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ } else {
+ kfree(forget);
+ }
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ }
+
+ static void flush_bg_queue(struct fuse_conn *fc)
+@@ -412,10 +412,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
+ req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
+ list_del(&req->list);
+ fc->active_background++;
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ req->in.h.unique = fuse_get_unique(fiq);
+ queue_request(fiq, req);
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ }
+ }
+
+@@ -439,9 +439,9 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
+ * smp_mb() from queue_interrupt().
+ */
+ if (!list_empty(&req->intr_entry)) {
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ list_del_init(&req->intr_entry);
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ }
+ WARN_ON(test_bit(FR_PENDING, &req->flags));
+ WARN_ON(test_bit(FR_SENT, &req->flags));
+@@ -483,10 +483,10 @@ put_request:
+
+ static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
+ {
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ /* Check for we've sent request to interrupt this req */
+ if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ return -EINVAL;
+ }
+
+@@ -499,13 +499,13 @@ static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
+ smp_mb();
+ if (test_bit(FR_FINISHED, &req->flags)) {
+ list_del_init(&req->intr_entry);
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ return 0;
+ }
+- wake_up_locked(&fiq->waitq);
++ wake_up(&fiq->waitq);
+ kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ }
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ return 0;
+ }
+
+@@ -535,16 +535,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
+ if (!err)
+ return;
+
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ /* Request is not yet in userspace, bail out */
+ if (test_bit(FR_PENDING, &req->flags)) {
+ list_del(&req->list);
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ __fuse_put_request(req);
+ req->out.h.error = -EINTR;
+ return;
+ }
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ }
+
+ /*
+@@ -559,9 +559,9 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
+ struct fuse_iqueue *fiq = &fc->iq;
+
+ BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ if (!fiq->connected) {
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ req->out.h.error = -ENOTCONN;
+ } else {
+ req->in.h.unique = fuse_get_unique(fiq);
+@@ -569,7 +569,7 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
+ /* acquire extra reference, since request is still needed
+ after request_end() */
+ __fuse_get_request(req);
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+
+ request_wait_answer(fc, req);
+ /* Pairs with smp_wmb() in request_end() */
+@@ -700,12 +700,12 @@ static int fuse_request_send_notify_reply(struct fuse_conn *fc,
+
+ __clear_bit(FR_ISREPLY, &req->flags);
+ req->in.h.unique = unique;
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ if (fiq->connected) {
+ queue_request(fiq, req);
+ err = 0;
+ }
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+
+ return err;
+ }
+@@ -1149,12 +1149,12 @@ static int request_pending(struct fuse_iqueue *fiq)
+ * Unlike other requests this is assembled on demand, without a need
+ * to allocate a separate fuse_req structure.
+ *
+- * Called with fiq->waitq.lock held, releases it
++ * Called with fiq->lock held, releases it
+ */
+ static int fuse_read_interrupt(struct fuse_iqueue *fiq,
+ struct fuse_copy_state *cs,
+ size_t nbytes, struct fuse_req *req)
+-__releases(fiq->waitq.lock)
++__releases(fiq->lock)
+ {
+ struct fuse_in_header ih;
+ struct fuse_interrupt_in arg;
+@@ -1169,7 +1169,7 @@ __releases(fiq->waitq.lock)
+ ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
+ arg.unique = req->in.h.unique;
+
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ if (nbytes < reqsize)
+ return -EINVAL;
+
+@@ -1206,7 +1206,7 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
+ static int fuse_read_single_forget(struct fuse_iqueue *fiq,
+ struct fuse_copy_state *cs,
+ size_t nbytes)
+-__releases(fiq->waitq.lock)
++__releases(fiq->lock)
+ {
+ int err;
+ struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
+@@ -1220,7 +1220,7 @@ __releases(fiq->waitq.lock)
+ .len = sizeof(ih) + sizeof(arg),
+ };
+
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ kfree(forget);
+ if (nbytes < ih.len)
+ return -EINVAL;
+@@ -1238,7 +1238,7 @@ __releases(fiq->waitq.lock)
+
+ static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
+ struct fuse_copy_state *cs, size_t nbytes)
+-__releases(fiq->waitq.lock)
++__releases(fiq->lock)
+ {
+ int err;
+ unsigned max_forgets;
+@@ -1252,13 +1252,13 @@ __releases(fiq->waitq.lock)
+ };
+
+ if (nbytes < ih.len) {
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ return -EINVAL;
+ }
+
+ max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
+ head = dequeue_forget(fiq, max_forgets, &count);
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+
+ arg.count = count;
+ ih.len += count * sizeof(struct fuse_forget_one);
+@@ -1288,7 +1288,7 @@ __releases(fiq->waitq.lock)
+ static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
+ struct fuse_copy_state *cs,
+ size_t nbytes)
+-__releases(fiq->waitq.lock)
++__releases(fiq->lock)
+ {
+ if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
+ return fuse_read_single_forget(fiq, cs, nbytes);
+@@ -1318,16 +1318,19 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
+ unsigned int hash;
+
+ restart:
+- spin_lock(&fiq->waitq.lock);
+- err = -EAGAIN;
+- if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
+- !request_pending(fiq))
+- goto err_unlock;
++ for (;;) {
++ spin_lock(&fiq->lock);
++ if (!fiq->connected || request_pending(fiq))
++ break;
++ spin_unlock(&fiq->lock);
+
+- err = wait_event_interruptible_exclusive_locked(fiq->waitq,
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++ err = wait_event_interruptible_exclusive(fiq->waitq,
+ !fiq->connected || request_pending(fiq));
+- if (err)
+- goto err_unlock;
++ if (err)
++ return err;
++ }
+
+ if (!fiq->connected) {
+ err = fc->aborted ? -ECONNABORTED : -ENODEV;
+@@ -1351,7 +1354,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
+ req = list_entry(fiq->pending.next, struct fuse_req, list);
+ clear_bit(FR_PENDING, &req->flags);
+ list_del_init(&req->list);
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+
+ in = &req->in;
+ reqsize = in->h.len;
+@@ -1409,7 +1412,7 @@ out_end:
+ return err;
+
+ err_unlock:
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+ return err;
+ }
+
+@@ -2121,12 +2124,12 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
+ fiq = &fud->fc->iq;
+ poll_wait(file, &fiq->waitq, wait);
+
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ if (!fiq->connected)
+ mask = EPOLLERR;
+ else if (request_pending(fiq))
+ mask |= EPOLLIN | EPOLLRDNORM;
+- spin_unlock(&fiq->waitq.lock);
++ spin_unlock(&fiq->lock);
+
+ return mask;
+ }
+@@ -2221,15 +2224,15 @@ void fuse_abort_conn(struct fuse_conn *fc)
+ flush_bg_queue(fc);
+ spin_unlock(&fc->bg_lock);
+
+- spin_lock(&fiq->waitq.lock);
++ spin_lock(&fiq->lock);
+ fiq->connected = 0;
+ list_for_each_entry(req, &fiq->pending, list)
+ clear_bit(FR_PENDING, &req->flags);
+ list_splice_tail_init(&fiq->pending, &to_end);
+ while (forget_pending(fiq))
+ kfree(dequeue_forget(fiq, 1, NULL));
+- wake_up_all_locked(&fiq->waitq);
+- spin_unlock(&fiq->waitq.lock);
++ wake_up_all(&fiq->waitq);
++ spin_unlock(&fiq->lock);
+ kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+ end_polls(fc);
+ wake_up_all(&fc->blocked_waitq);
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index b8f9c83835d5..fdff7bf4fa4f 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1767,6 +1767,7 @@ static int fuse_writepage(struct page *page, struct writeback_control *wbc)
+ WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
+
+ redirty_page_for_writepage(wbc, page);
++ unlock_page(page);
+ return 0;
+ }
+
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index 24dbca777775..89bdc41e0d86 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -450,6 +450,9 @@ struct fuse_iqueue {
+ /** Connection established */
+ unsigned connected;
+
++ /** Lock protecting accesses to members of this structure */
++ spinlock_t lock;
++
+ /** Readers of the connection are waiting on this */
+ wait_queue_head_t waitq;
+
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 4bb885b0f032..987877860c01 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -582,6 +582,7 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
+ static void fuse_iqueue_init(struct fuse_iqueue *fiq)
+ {
+ memset(fiq, 0, sizeof(struct fuse_iqueue));
++ spin_lock_init(&fiq->lock);
+ init_waitqueue_head(&fiq->waitq);
+ INIT_LIST_HEAD(&fiq->pending);
+ INIT_LIST_HEAD(&fiq->interrupts);
+diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
+index 574d03f8a573..b2da3de6a78e 100644
+--- a/fs/fuse/readdir.c
++++ b/fs/fuse/readdir.c
+@@ -372,11 +372,13 @@ static enum fuse_parse_result fuse_parse_cache(struct fuse_file *ff,
+ for (;;) {
+ struct fuse_dirent *dirent = addr + offset;
+ unsigned int nbytes = size - offset;
+- size_t reclen = FUSE_DIRENT_SIZE(dirent);
++ size_t reclen;
+
+ if (nbytes < FUSE_NAME_OFFSET || !dirent->namelen)
+ break;
+
++ reclen = FUSE_DIRENT_SIZE(dirent); /* derefs ->namelen */
++
+ if (WARN_ON(dirent->namelen > FUSE_NAME_MAX))
+ return FOUND_ERR;
+ if (WARN_ON(reclen > nbytes))
+diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
+index 253e2f939d5f..b47939900932 100644
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -1670,6 +1670,7 @@ out_unlock:
+ brelse(dibh);
+ up_write(&ip->i_rw_mutex);
+ gfs2_trans_end(sdp);
++ buf_in_tr = false;
+ }
+ gfs2_glock_dq_uninit(rd_gh);
+ cond_resched();
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 03cd8f5bba85..701936f2bde3 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -288,6 +288,7 @@ struct io_ring_ctx {
+ struct sqe_submit {
+ const struct io_uring_sqe *sqe;
+ unsigned short index;
++ u32 sequence;
+ bool has_user;
+ bool needs_lock;
+ bool needs_fixed_file;
+@@ -1894,7 +1895,7 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
+
+ if (flags & IOSQE_IO_DRAIN) {
+ req->flags |= REQ_F_IO_DRAIN;
+- req->sequence = ctx->cached_sq_head - 1;
++ req->sequence = s->sequence;
+ }
+
+ if (!io_op_needs_file(s->sqe))
+@@ -2050,6 +2051,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
+ if (head < ctx->sq_entries) {
+ s->index = head;
+ s->sqe = &ctx->sq_sqes[head];
++ s->sequence = ctx->cached_sq_head;
+ ctx->cached_sq_head++;
+ return true;
+ }
+diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
+index cb8ec1f65c03..73c9775215b3 100644
+--- a/fs/overlayfs/export.c
++++ b/fs/overlayfs/export.c
+@@ -227,9 +227,8 @@ static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen)
+ /* Encode an upper or lower file handle */
+ fh = ovl_encode_real_fh(enc_lower ? ovl_dentry_lower(dentry) :
+ ovl_dentry_upper(dentry), !enc_lower);
+- err = PTR_ERR(fh);
+ if (IS_ERR(fh))
+- goto fail;
++ return PTR_ERR(fh);
+
+ err = -EOVERFLOW;
+ if (fh->len > buflen)
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index 7663aeb85fa3..bc14781886bf 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -383,7 +383,8 @@ static bool ovl_can_list(const char *s)
+ return true;
+
+ /* Never list trusted.overlay, list other trusted for superuser only */
+- return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
++ return !ovl_is_private_xattr(s) &&
++ ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
+ }
+
+ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index 76748255f843..6eba7f2492f6 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -33,6 +33,7 @@
+ #include <linux/pagevec.h>
+ #include <linux/backing-dev.h>
+ #include <linux/mman.h>
++#include <linux/fadvise.h>
+
+ static const struct vm_operations_struct xfs_file_vm_ops;
+
+@@ -939,6 +940,30 @@ out_unlock:
+ return error;
+ }
+
++STATIC int
++xfs_file_fadvise(
++ struct file *file,
++ loff_t start,
++ loff_t end,
++ int advice)
++{
++ struct xfs_inode *ip = XFS_I(file_inode(file));
++ int ret;
++ int lockflags = 0;
++
++ /*
++ * Operations creating pages in page cache need protection from hole
++ * punching and similar ops
++ */
++ if (advice == POSIX_FADV_WILLNEED) {
++ lockflags = XFS_IOLOCK_SHARED;
++ xfs_ilock(ip, lockflags);
++ }
++ ret = generic_fadvise(file, start, end, advice);
++ if (lockflags)
++ xfs_iunlock(ip, lockflags);
++ return ret;
++}
+
+ STATIC loff_t
+ xfs_file_remap_range(
+@@ -1235,6 +1260,7 @@ const struct file_operations xfs_file_operations = {
+ .fsync = xfs_file_fsync,
+ .get_unmapped_area = thp_get_unmapped_area,
+ .fallocate = xfs_file_fallocate,
++ .fadvise = xfs_file_fadvise,
+ .remap_file_range = xfs_file_remap_range,
+ };
+
+diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
+index 15d1aa53d96c..a5a99b43f68e 100644
+--- a/include/linux/blk-mq.h
++++ b/include/linux/blk-mq.h
+@@ -140,6 +140,7 @@ typedef int (poll_fn)(struct blk_mq_hw_ctx *);
+ typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
+ typedef bool (busy_fn)(struct request_queue *);
+ typedef void (complete_fn)(struct request *);
++typedef void (cleanup_rq_fn)(struct request *);
+
+
+ struct blk_mq_ops {
+@@ -200,6 +201,12 @@ struct blk_mq_ops {
+ /* Called from inside blk_get_request() */
+ void (*initialize_rq_fn)(struct request *rq);
+
++ /*
++ * Called before freeing one request which isn't completed yet,
++ * and usually for freeing the driver private data
++ */
++ cleanup_rq_fn *cleanup_rq;
++
+ /*
+ * If set, returns whether or not this queue currently is busy
+ */
+@@ -366,4 +373,10 @@ static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
+ BLK_QC_T_INTERNAL;
+ }
+
++static inline void blk_mq_cleanup_rq(struct request *rq)
++{
++ if (rq->q->mq_ops->cleanup_rq)
++ rq->q->mq_ops->cleanup_rq(rq);
++}
++
+ #endif
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 93baef66b942..4c6754e53672 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -202,9 +202,12 @@ struct request {
+ #ifdef CONFIG_BLK_WBT
+ unsigned short wbt_flags;
+ #endif
+-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
+- unsigned short throtl_size;
+-#endif
++ /*
++ * rq sectors used for blk stats. It has the same value
++ * with blk_rq_sectors(rq), except that it never be zeroed
++ * by completion.
++ */
++ unsigned short stats_sectors;
+
+ /*
+ * Number of scatter-gather DMA addr+len pairs after
+@@ -902,6 +905,7 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
+ * blk_rq_err_bytes() : bytes left till the next error boundary
+ * blk_rq_sectors() : sectors left in the entire request
+ * blk_rq_cur_sectors() : sectors left in the current segment
++ * blk_rq_stats_sectors() : sectors of the entire request used for stats
+ */
+ static inline sector_t blk_rq_pos(const struct request *rq)
+ {
+@@ -930,6 +934,11 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+ return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
+ }
+
++static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
++{
++ return rq->stats_sectors;
++}
++
+ #ifdef CONFIG_BLK_DEV_ZONED
+ static inline unsigned int blk_rq_zone_no(struct request *rq)
+ {
+diff --git a/include/linux/bug.h b/include/linux/bug.h
+index fe5916550da8..f639bd0122f3 100644
+--- a/include/linux/bug.h
++++ b/include/linux/bug.h
+@@ -47,6 +47,11 @@ void generic_bug_clear_once(void);
+
+ #else /* !CONFIG_GENERIC_BUG */
+
++static inline void *find_bug(unsigned long bugaddr)
++{
++ return NULL;
++}
++
+ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
+ struct pt_regs *regs)
+ {
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 5186ac5b2a29..3b84dd5523a9 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -3544,6 +3544,8 @@ extern void inode_nohighmem(struct inode *inode);
+ /* mm/fadvise.c */
+ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
+ int advice);
++extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
++ int advice);
+
+ #if defined(CONFIG_IO_URING)
+ extern struct sock *io_uring_get_socket(struct file *file);
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+index 7ac3755444d3..56a8ad506072 100644
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -493,6 +493,15 @@ void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
+
+ void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq);
+
++/*
++ * May be called from host driver's system/runtime suspend/resume callbacks,
++ * to know if SDIO IRQs has been claimed.
++ */
++static inline bool sdio_irq_claimed(struct mmc_host *host)
++{
++ return host->sdio_irqs > 0;
++}
++
+ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
+ {
+ host->ops->enable_sdio_irq(host, 0);
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 70e86148cb1e..862556761bbf 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -548,6 +548,7 @@
+ #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+ #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
+ #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
++#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
+ #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
+ #define PCI_DEVICE_ID_AMD_LANCE 0x2000
+ #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
+diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
+index dc905a4ff8d7..185d94829701 100644
+--- a/include/linux/quotaops.h
++++ b/include/linux/quotaops.h
+@@ -22,7 +22,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
+ /* i_mutex must being held */
+ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
+ {
+- return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
++ return (ia->ia_valid & ATTR_SIZE) ||
+ (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
+ (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
+ }
+diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
+index a6d9fce7f20e..4e84472c9be8 100644
+--- a/include/linux/sunrpc/xprt.h
++++ b/include/linux/sunrpc/xprt.h
+@@ -346,6 +346,7 @@ bool xprt_prepare_transmit(struct rpc_task *task);
+ void xprt_request_enqueue_transmit(struct rpc_task *task);
+ void xprt_request_enqueue_receive(struct rpc_task *task);
+ void xprt_request_wait_receive(struct rpc_task *task);
++void xprt_request_dequeue_xprt(struct rpc_task *task);
+ bool xprt_request_need_retransmit(struct rpc_task *task);
+ void xprt_transmit(struct rpc_task *task);
+ void xprt_end_transmit(struct rpc_task *task);
+diff --git a/include/net/route.h b/include/net/route.h
+index 55ff71ffb796..2b34a3a4386f 100644
+--- a/include/net/route.h
++++ b/include/net/route.h
+@@ -53,10 +53,11 @@ struct rtable {
+ unsigned int rt_flags;
+ __u16 rt_type;
+ __u8 rt_is_input;
+- u8 rt_gw_family;
++ __u8 rt_uses_gateway;
+
+ int rt_iif;
+
++ u8 rt_gw_family;
+ /* Info on neighbour */
+ union {
+ __be32 rt_gw4;
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 2504c269e658..1010bde1146b 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -1514,7 +1514,8 @@ static int check_kprobe_address_safe(struct kprobe *p,
+ /* Ensure it is not in reserved area nor out of text */
+ if (!kernel_text_address((unsigned long) p->addr) ||
+ within_kprobe_blacklist((unsigned long) p->addr) ||
+- jump_label_text_reserved(p->addr, p->addr)) {
++ jump_label_text_reserved(p->addr, p->addr) ||
++ find_bug((unsigned long)p->addr)) {
+ ret = -EINVAL;
+ goto out;
+ }
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 1888f6a3b694..424abf802f02 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3274,7 +3274,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ /* move first record forward until length fits into the buffer */
+ seq = dumper->cur_seq;
+ idx = dumper->cur_idx;
+- while (l > size && seq < dumper->next_seq) {
++ while (l >= size && seq < dumper->next_seq) {
+ struct printk_log *msg = log_from_idx(idx);
+
+ l -= msg_print_text(msg, true, time, NULL, 0);
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 980ca3ca643f..affa7aae758f 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -3123,13 +3123,13 @@ static int __init rcu_spawn_gp_kthread(void)
+ t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
+ if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
+ return 0;
+- rnp = rcu_get_root();
+- raw_spin_lock_irqsave_rcu_node(rnp, flags);
+- rcu_state.gp_kthread = t;
+ if (kthread_prio) {
+ sp.sched_priority = kthread_prio;
+ sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+ }
++ rnp = rcu_get_root();
++ raw_spin_lock_irqsave_rcu_node(rnp, flags);
++ rcu_state.gp_kthread = t;
+ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ wake_up_process(t);
+ rcu_spawn_nocb_kthreads();
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 42bc2986520d..b78986ce1f6b 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3051,8 +3051,36 @@ void scheduler_tick(void)
+
+ struct tick_work {
+ int cpu;
++ atomic_t state;
+ struct delayed_work work;
+ };
++/* Values for ->state, see diagram below. */
++#define TICK_SCHED_REMOTE_OFFLINE 0
++#define TICK_SCHED_REMOTE_OFFLINING 1
++#define TICK_SCHED_REMOTE_RUNNING 2
++
++/*
++ * State diagram for ->state:
++ *
++ *
++ * TICK_SCHED_REMOTE_OFFLINE
++ * | ^
++ * | |
++ * | | sched_tick_remote()
++ * | |
++ * | |
++ * +--TICK_SCHED_REMOTE_OFFLINING
++ * | ^
++ * | |
++ * sched_tick_start() | | sched_tick_stop()
++ * | |
++ * V |
++ * TICK_SCHED_REMOTE_RUNNING
++ *
++ *
++ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
++ * and sched_tick_start() are happy to leave the state in RUNNING.
++ */
+
+ static struct tick_work __percpu *tick_work_cpu;
+
+@@ -3065,6 +3093,7 @@ static void sched_tick_remote(struct work_struct *work)
+ struct task_struct *curr;
+ struct rq_flags rf;
+ u64 delta;
++ int os;
+
+ /*
+ * Handle the tick only if it appears the remote CPU is running in full
+@@ -3078,7 +3107,7 @@ static void sched_tick_remote(struct work_struct *work)
+
+ rq_lock_irq(rq, &rf);
+ curr = rq->curr;
+- if (is_idle_task(curr))
++ if (is_idle_task(curr) || cpu_is_offline(cpu))
+ goto out_unlock;
+
+ update_rq_clock(rq);
+@@ -3098,13 +3127,18 @@ out_requeue:
+ /*
+ * Run the remote tick once per second (1Hz). This arbitrary
+ * frequency is large enough to avoid overload but short enough
+- * to keep scheduler internal stats reasonably up to date.
++ * to keep scheduler internal stats reasonably up to date. But
++ * first update state to reflect hotplug activity if required.
+ */
+- queue_delayed_work(system_unbound_wq, dwork, HZ);
++ os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
++ WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
++ if (os == TICK_SCHED_REMOTE_RUNNING)
++ queue_delayed_work(system_unbound_wq, dwork, HZ);
+ }
+
+ static void sched_tick_start(int cpu)
+ {
++ int os;
+ struct tick_work *twork;
+
+ if (housekeeping_cpu(cpu, HK_FLAG_TICK))
+@@ -3113,15 +3147,20 @@ static void sched_tick_start(int cpu)
+ WARN_ON_ONCE(!tick_work_cpu);
+
+ twork = per_cpu_ptr(tick_work_cpu, cpu);
+- twork->cpu = cpu;
+- INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
+- queue_delayed_work(system_unbound_wq, &twork->work, HZ);
++ os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
++ WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
++ if (os == TICK_SCHED_REMOTE_OFFLINE) {
++ twork->cpu = cpu;
++ INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
++ queue_delayed_work(system_unbound_wq, &twork->work, HZ);
++ }
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ static void sched_tick_stop(int cpu)
+ {
+ struct tick_work *twork;
++ int os;
+
+ if (housekeeping_cpu(cpu, HK_FLAG_TICK))
+ return;
+@@ -3129,7 +3168,10 @@ static void sched_tick_stop(int cpu)
+ WARN_ON_ONCE(!tick_work_cpu);
+
+ twork = per_cpu_ptr(tick_work_cpu, cpu);
+- cancel_delayed_work_sync(&twork->work);
++ /* There cannot be competing actions, but don't rely on stop-machine. */
++ os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
++ WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
++ /* Don't cancel, as this would mess up the state machine. */
+ }
+ #endif /* CONFIG_HOTPLUG_CPU */
+
+@@ -3137,7 +3179,6 @@ int __init sched_tick_offload_init(void)
+ {
+ tick_work_cpu = alloc_percpu(struct tick_work);
+ BUG_ON(!tick_work_cpu);
+-
+ return 0;
+ }
+
+@@ -6472,10 +6513,6 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
+ #ifdef CONFIG_RT_GROUP_SCHED
+ if (!sched_rt_can_attach(css_tg(css), task))
+ return -EINVAL;
+-#else
+- /* We don't support RT-tasks being in separate groups */
+- if (task->sched_class != &fair_sched_class)
+- return -EINVAL;
+ #endif
+ /*
+ * Serialize against wake_up_new_task() such that if its
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index ae3ec77bb92f..e139b54716b4 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -117,6 +117,7 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
+ unsigned int next_freq)
+ {
+ struct cpufreq_policy *policy = sg_policy->policy;
++ int cpu;
+
+ if (!sugov_update_next_freq(sg_policy, time, next_freq))
+ return;
+@@ -126,7 +127,11 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
+ return;
+
+ policy->cur = next_freq;
+- trace_cpu_frequency(next_freq, smp_processor_id());
++
++ if (trace_cpu_frequency_enabled()) {
++ for_each_cpu(cpu, policy->cpus)
++ trace_cpu_frequency(next_freq, cpu);
++ }
+ }
+
+ static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 1c66480afda8..fcdafdcb129c 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -529,6 +529,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
+ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
+ {
+ struct rq *later_rq = NULL;
++ struct dl_bw *dl_b;
+
+ later_rq = find_lock_later_rq(p, rq);
+ if (!later_rq) {
+@@ -557,6 +558,38 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
+ double_lock_balance(rq, later_rq);
+ }
+
++ if (p->dl.dl_non_contending || p->dl.dl_throttled) {
++ /*
++ * Inactive timer is armed (or callback is running, but
++ * waiting for us to release rq locks). In any case, when it
++ * will fire (or continue), it will see running_bw of this
++ * task migrated to later_rq (and correctly handle it).
++ */
++ sub_running_bw(&p->dl, &rq->dl);
++ sub_rq_bw(&p->dl, &rq->dl);
++
++ add_rq_bw(&p->dl, &later_rq->dl);
++ add_running_bw(&p->dl, &later_rq->dl);
++ } else {
++ sub_rq_bw(&p->dl, &rq->dl);
++ add_rq_bw(&p->dl, &later_rq->dl);
++ }
++
++ /*
++ * And we finally need to fixup root_domain(s) bandwidth accounting,
++ * since p is still hanging out in the old (now moved to default) root
++ * domain.
++ */
++ dl_b = &rq->rd->dl_bw;
++ raw_spin_lock(&dl_b->lock);
++ __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
++ raw_spin_unlock(&dl_b->lock);
++
++ dl_b = &later_rq->rd->dl_bw;
++ raw_spin_lock(&dl_b->lock);
++ __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
++ raw_spin_unlock(&dl_b->lock);
++
+ set_task_cpu(p, later_rq->cpu);
+ double_unlock_balance(later_rq, rq);
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index b07672e793a8..5a312c030b8d 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -9319,9 +9319,10 @@ more_balance:
+ out_balanced:
+ /*
+ * We reach balance although we may have faced some affinity
+- * constraints. Clear the imbalance flag if it was set.
++ * constraints. Clear the imbalance flag only if other tasks got
++ * a chance to move and fix the imbalance.
+ */
+- if (sd_parent) {
++ if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
+ int *group_imbalance = &sd_parent->groups->sgc->imbalance;
+
+ if (*group_imbalance)
+@@ -10568,18 +10569,18 @@ err:
+ void online_fair_sched_group(struct task_group *tg)
+ {
+ struct sched_entity *se;
++ struct rq_flags rf;
+ struct rq *rq;
+ int i;
+
+ for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+ se = tg->se[i];
+-
+- raw_spin_lock_irq(&rq->lock);
++ rq_lock_irq(rq, &rf);
+ update_rq_clock(rq);
+ attach_entity_cfs_rq(se);
+ sync_throttle(tg, i);
+- raw_spin_unlock_irq(&rq->lock);
++ rq_unlock_irq(rq, &rf);
+ }
+ }
+
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index 80940939b733..e4bc4aa739b8 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -241,13 +241,14 @@ static void do_idle(void)
+ check_pgt_cache();
+ rmb();
+
++ local_irq_disable();
++
+ if (cpu_is_offline(cpu)) {
+- tick_nohz_idle_stop_tick_protected();
++ tick_nohz_idle_stop_tick();
+ cpuhp_report_idle_dead();
+ arch_cpu_idle_dead();
+ }
+
+- local_irq_disable();
+ arch_cpu_idle_enter();
+
+ /*
+diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
+index 6e52b67b420e..517e3719027e 100644
+--- a/kernel/sched/psi.c
++++ b/kernel/sched/psi.c
+@@ -1198,7 +1198,7 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
+ if (static_branch_likely(&psi_disabled))
+ return -EOPNOTSUPP;
+
+- buf_size = min(nbytes, (sizeof(buf) - 1));
++ buf_size = min(nbytes, sizeof(buf));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
+index 0519a8805aab..bfe0e0656f02 100644
+--- a/kernel/time/alarmtimer.c
++++ b/kernel/time/alarmtimer.c
+@@ -673,7 +673,7 @@ static int alarm_timer_create(struct k_itimer *new_timer)
+ enum alarmtimer_type type;
+
+ if (!alarmtimer_get_rtcdev())
+- return -ENOTSUPP;
++ return -EOPNOTSUPP;
+
+ if (!capable(CAP_WAKE_ALARM))
+ return -EPERM;
+@@ -791,7 +791,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
+ int ret = 0;
+
+ if (!alarmtimer_get_rtcdev())
+- return -ENOTSUPP;
++ return -EOPNOTSUPP;
+
+ if (flags & ~TIMER_ABSTIME)
+ return -EINVAL;
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index 0a426f4e3125..5bbad147a90c 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -375,7 +375,8 @@ static int posix_cpu_timer_del(struct k_itimer *timer)
+ struct sighand_struct *sighand;
+ struct task_struct *p = timer->it.cpu.task;
+
+- WARN_ON_ONCE(p == NULL);
++ if (WARN_ON_ONCE(!p))
++ return -EINVAL;
+
+ /*
+ * Protect against sighand release/switch in exit/exec and process/
+@@ -580,7 +581,8 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
+ u64 old_expires, new_expires, old_incr, val;
+ int ret;
+
+- WARN_ON_ONCE(p == NULL);
++ if (WARN_ON_ONCE(!p))
++ return -EINVAL;
+
+ /*
+ * Use the to_ktime conversion because that clamps the maximum
+@@ -715,10 +717,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
+
+ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
+ {
+- u64 now;
+ struct task_struct *p = timer->it.cpu.task;
++ u64 now;
+
+- WARN_ON_ONCE(p == NULL);
++ if (WARN_ON_ONCE(!p))
++ return;
+
+ /*
+ * Easy part: convert the reload time.
+@@ -1000,12 +1003,13 @@ static void check_process_timers(struct task_struct *tsk,
+ */
+ static void posix_cpu_timer_rearm(struct k_itimer *timer)
+ {
++ struct task_struct *p = timer->it.cpu.task;
+ struct sighand_struct *sighand;
+ unsigned long flags;
+- struct task_struct *p = timer->it.cpu.task;
+ u64 now;
+
+- WARN_ON_ONCE(p == NULL);
++ if (WARN_ON_ONCE(!p))
++ return;
+
+ /*
+ * Fetch the current sample and update the timer's expiry time.
+@@ -1202,7 +1206,9 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
+ u64 now;
+ int ret;
+
+- WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
++ if (WARN_ON_ONCE(clock_idx >= CPUCLOCK_SCHED))
++ return;
++
+ ret = cpu_timer_sample_group(clock_idx, tsk, &now);
+
+ if (oldval && ret != -EINVAL) {
+diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
+index ba16c08e8cb9..717c940112f9 100644
+--- a/lib/lzo/lzo1x_compress.c
++++ b/lib/lzo/lzo1x_compress.c
+@@ -83,17 +83,19 @@ next:
+ ALIGN((uintptr_t)ir, 4)) &&
+ (ir < limit) && (*ir == 0))
+ ir++;
+- for (; (ir + 4) <= limit; ir += 4) {
+- dv = *((u32 *)ir);
+- if (dv) {
++ if (IS_ALIGNED((uintptr_t)ir, 4)) {
++ for (; (ir + 4) <= limit; ir += 4) {
++ dv = *((u32 *)ir);
++ if (dv) {
+ # if defined(__LITTLE_ENDIAN)
+- ir += __builtin_ctz(dv) >> 3;
++ ir += __builtin_ctz(dv) >> 3;
+ # elif defined(__BIG_ENDIAN)
+- ir += __builtin_clz(dv) >> 3;
++ ir += __builtin_clz(dv) >> 3;
+ # else
+ # error "missing endian definition"
+ # endif
+- break;
++ break;
++ }
+ }
+ }
+ #endif
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 952dc2fb24e5..1e994920e6ff 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -2078,6 +2078,17 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
+ const bool sync = cc->mode != MIGRATE_ASYNC;
+ bool update_cached;
+
++ /*
++ * These counters track activities during zone compaction. Initialize
++ * them before compacting a new zone.
++ */
++ cc->total_migrate_scanned = 0;
++ cc->total_free_scanned = 0;
++ cc->nr_migratepages = 0;
++ cc->nr_freepages = 0;
++ INIT_LIST_HEAD(&cc->freepages);
++ INIT_LIST_HEAD(&cc->migratepages);
++
+ cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
+ ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
+ cc->classzone_idx);
+@@ -2281,10 +2292,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
+ {
+ enum compact_result ret;
+ struct compact_control cc = {
+- .nr_freepages = 0,
+- .nr_migratepages = 0,
+- .total_migrate_scanned = 0,
+- .total_free_scanned = 0,
+ .order = order,
+ .search_order = order,
+ .gfp_mask = gfp_mask,
+@@ -2305,8 +2312,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
+
+ if (capture)
+ current->capture_control = &capc;
+- INIT_LIST_HEAD(&cc.freepages);
+- INIT_LIST_HEAD(&cc.migratepages);
+
+ ret = compact_zone(&cc, &capc);
+
+@@ -2408,8 +2413,6 @@ static void compact_node(int nid)
+ struct zone *zone;
+ struct compact_control cc = {
+ .order = -1,
+- .total_migrate_scanned = 0,
+- .total_free_scanned = 0,
+ .mode = MIGRATE_SYNC,
+ .ignore_skip_hint = true,
+ .whole_zone = true,
+@@ -2423,11 +2426,7 @@ static void compact_node(int nid)
+ if (!populated_zone(zone))
+ continue;
+
+- cc.nr_freepages = 0;
+- cc.nr_migratepages = 0;
+ cc.zone = zone;
+- INIT_LIST_HEAD(&cc.freepages);
+- INIT_LIST_HEAD(&cc.migratepages);
+
+ compact_zone(&cc, NULL);
+
+@@ -2529,8 +2528,6 @@ static void kcompactd_do_work(pg_data_t *pgdat)
+ struct compact_control cc = {
+ .order = pgdat->kcompactd_max_order,
+ .search_order = pgdat->kcompactd_max_order,
+- .total_migrate_scanned = 0,
+- .total_free_scanned = 0,
+ .classzone_idx = pgdat->kcompactd_classzone_idx,
+ .mode = MIGRATE_SYNC_LIGHT,
+ .ignore_skip_hint = false,
+@@ -2554,16 +2551,10 @@ static void kcompactd_do_work(pg_data_t *pgdat)
+ COMPACT_CONTINUE)
+ continue;
+
+- cc.nr_freepages = 0;
+- cc.nr_migratepages = 0;
+- cc.total_migrate_scanned = 0;
+- cc.total_free_scanned = 0;
+- cc.zone = zone;
+- INIT_LIST_HEAD(&cc.freepages);
+- INIT_LIST_HEAD(&cc.migratepages);
+-
+ if (kthread_should_stop())
+ return;
++
++ cc.zone = zone;
+ status = compact_zone(&cc, NULL);
+
+ if (status == COMPACT_SUCCESS) {
+diff --git a/mm/fadvise.c b/mm/fadvise.c
+index 467bcd032037..4f17c83db575 100644
+--- a/mm/fadvise.c
++++ b/mm/fadvise.c
+@@ -27,8 +27,7 @@
+ * deactivate the pages and clear PG_Referenced.
+ */
+
+-static int generic_fadvise(struct file *file, loff_t offset, loff_t len,
+- int advice)
++int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
+ {
+ struct inode *inode;
+ struct address_space *mapping;
+@@ -178,6 +177,7 @@ static int generic_fadvise(struct file *file, loff_t offset, loff_t len,
+ }
+ return 0;
+ }
++EXPORT_SYMBOL(generic_fadvise);
+
+ int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
+ {
+diff --git a/mm/madvise.c b/mm/madvise.c
+index 628022e674a7..ae56d0ef337d 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -14,6 +14,7 @@
+ #include <linux/userfaultfd_k.h>
+ #include <linux/hugetlb.h>
+ #include <linux/falloc.h>
++#include <linux/fadvise.h>
+ #include <linux/sched.h>
+ #include <linux/ksm.h>
+ #include <linux/fs.h>
+@@ -275,6 +276,7 @@ static long madvise_willneed(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+ {
+ struct file *file = vma->vm_file;
++ loff_t offset;
+
+ *prev = vma;
+ #ifdef CONFIG_SWAP
+@@ -298,12 +300,20 @@ static long madvise_willneed(struct vm_area_struct *vma,
+ return 0;
+ }
+
+- start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+- if (end > vma->vm_end)
+- end = vma->vm_end;
+- end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+-
+- force_page_cache_readahead(file->f_mapping, file, start, end - start);
++ /*
++ * Filesystem's fadvise may need to take various locks. We need to
++ * explicitly grab a reference because the vma (and hence the
++ * vma's reference to the file) can go away as soon as we drop
++ * mmap_sem.
++ */
++ *prev = NULL; /* tell sys_madvise we drop mmap_sem */
++ get_file(file);
++ up_read(¤t->mm->mmap_sem);
++ offset = (loff_t)(start - vma->vm_start)
++ + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
++ vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
++ fput(file);
++ down_read(¤t->mm->mmap_sem);
+ return 0;
+ }
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 30ebecf67527..3ef243948993 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2719,6 +2719,16 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
+
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
+ !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
++
++ /*
++ * Enforce __GFP_NOFAIL allocation because callers are not
++ * prepared to see failures and likely do not have any failure
++ * handling code.
++ */
++ if (gfp & __GFP_NOFAIL) {
++ page_counter_charge(&memcg->kmem, nr_pages);
++ return 0;
++ }
+ cancel_charge(memcg, nr_pages);
+ return -ENOMEM;
+ }
+diff --git a/mm/oom_kill.c b/mm/oom_kill.c
+index f719b64741d6..f649d49eb359 100644
+--- a/mm/oom_kill.c
++++ b/mm/oom_kill.c
+@@ -1060,9 +1060,10 @@ bool out_of_memory(struct oom_control *oc)
+ * The OOM killer does not compensate for IO-less reclaim.
+ * pagefault_out_of_memory lost its gfp context so we have to
+ * make sure exclude 0 mask - all other users should have at least
+- * ___GFP_DIRECT_RECLAIM to get here.
++ * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
++ * invoke the OOM killer even if it is a GFP_NOFS allocation.
+ */
+- if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
++ if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
+ return true;
+
+ /*
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index 185c07eac0da..8f87c05a27e1 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -297,14 +297,11 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool)
+ }
+
+ /* Initializes the z3fold header of a newly allocated z3fold page */
+-static struct z3fold_header *init_z3fold_page(struct page *page,
++static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
+ struct z3fold_pool *pool, gfp_t gfp)
+ {
+ struct z3fold_header *zhdr = page_address(page);
+- struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp);
+-
+- if (!slots)
+- return NULL;
++ struct z3fold_buddy_slots *slots;
+
+ INIT_LIST_HEAD(&page->lru);
+ clear_bit(PAGE_HEADLESS, &page->private);
+@@ -312,6 +309,12 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
+ clear_bit(NEEDS_COMPACTING, &page->private);
+ clear_bit(PAGE_STALE, &page->private);
+ clear_bit(PAGE_CLAIMED, &page->private);
++ if (headless)
++ return zhdr;
++
++ slots = alloc_slots(pool, gfp);
++ if (!slots)
++ return NULL;
+
+ spin_lock_init(&zhdr->page_lock);
+ kref_init(&zhdr->refcount);
+@@ -368,9 +371,10 @@ static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
+ * Encodes the handle of a particular buddy within a z3fold page
+ * Pool lock should be held as this function accesses first_num
+ */
+-static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
++static unsigned long __encode_handle(struct z3fold_header *zhdr,
++ struct z3fold_buddy_slots *slots,
++ enum buddy bud)
+ {
+- struct z3fold_buddy_slots *slots;
+ unsigned long h = (unsigned long)zhdr;
+ int idx = 0;
+
+@@ -387,11 +391,15 @@ static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
+ if (bud == LAST)
+ h |= (zhdr->last_chunks << BUDDY_SHIFT);
+
+- slots = zhdr->slots;
+ slots->slot[idx] = h;
+ return (unsigned long)&slots->slot[idx];
+ }
+
++static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
++{
++ return __encode_handle(zhdr, zhdr->slots, bud);
++}
++
+ /* Returns the z3fold page where a given handle is stored */
+ static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
+ {
+@@ -626,6 +634,7 @@ static void do_compact_page(struct z3fold_header *zhdr, bool locked)
+ }
+
+ if (unlikely(PageIsolated(page) ||
++ test_bit(PAGE_CLAIMED, &page->private) ||
+ test_bit(PAGE_STALE, &page->private))) {
+ z3fold_page_unlock(zhdr);
+ return;
+@@ -926,7 +935,7 @@ retry:
+ if (!page)
+ return -ENOMEM;
+
+- zhdr = init_z3fold_page(page, pool, gfp);
++ zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
+ if (!zhdr) {
+ __free_page(page);
+ return -ENOMEM;
+@@ -1102,6 +1111,7 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
+ struct z3fold_header *zhdr = NULL;
+ struct page *page = NULL;
+ struct list_head *pos;
++ struct z3fold_buddy_slots slots;
+ unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
+
+ spin_lock(&pool->lock);
+@@ -1120,16 +1130,22 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
+ /* this bit could have been set by free, in which case
+ * we pass over to the next page in the pool.
+ */
+- if (test_and_set_bit(PAGE_CLAIMED, &page->private))
++ if (test_and_set_bit(PAGE_CLAIMED, &page->private)) {
++ page = NULL;
+ continue;
++ }
+
+- if (unlikely(PageIsolated(page)))
++ if (unlikely(PageIsolated(page))) {
++ clear_bit(PAGE_CLAIMED, &page->private);
++ page = NULL;
+ continue;
++ }
++ zhdr = page_address(page);
+ if (test_bit(PAGE_HEADLESS, &page->private))
+ break;
+
+- zhdr = page_address(page);
+ if (!z3fold_page_trylock(zhdr)) {
++ clear_bit(PAGE_CLAIMED, &page->private);
+ zhdr = NULL;
+ continue; /* can't evict at this point */
+ }
+@@ -1147,26 +1163,30 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
+
+ if (!test_bit(PAGE_HEADLESS, &page->private)) {
+ /*
+- * We need encode the handles before unlocking, since
+- * we can race with free that will set
+- * (first|last)_chunks to 0
++ * We need encode the handles before unlocking, and
++ * use our local slots structure because z3fold_free
++ * can zero out zhdr->slots and we can't do much
++ * about that
+ */
+ first_handle = 0;
+ last_handle = 0;
+ middle_handle = 0;
+ if (zhdr->first_chunks)
+- first_handle = encode_handle(zhdr, FIRST);
++ first_handle = __encode_handle(zhdr, &slots,
++ FIRST);
+ if (zhdr->middle_chunks)
+- middle_handle = encode_handle(zhdr, MIDDLE);
++ middle_handle = __encode_handle(zhdr, &slots,
++ MIDDLE);
+ if (zhdr->last_chunks)
+- last_handle = encode_handle(zhdr, LAST);
++ last_handle = __encode_handle(zhdr, &slots,
++ LAST);
+ /*
+ * it's safe to unlock here because we hold a
+ * reference to this page
+ */
+ z3fold_page_unlock(zhdr);
+ } else {
+- first_handle = encode_handle(zhdr, HEADLESS);
++ first_handle = __encode_handle(zhdr, &slots, HEADLESS);
+ last_handle = middle_handle = 0;
+ }
+
+@@ -1196,9 +1216,9 @@ next:
+ spin_lock(&pool->lock);
+ list_add(&page->lru, &pool->lru);
+ spin_unlock(&pool->lock);
++ clear_bit(PAGE_CLAIMED, &page->private);
+ } else {
+ z3fold_page_lock(zhdr);
+- clear_bit(PAGE_CLAIMED, &page->private);
+ if (kref_put(&zhdr->refcount,
+ release_z3fold_page_locked)) {
+ atomic64_dec(&pool->pages_nr);
+@@ -1213,6 +1233,7 @@ next:
+ list_add(&page->lru, &pool->lru);
+ spin_unlock(&pool->lock);
+ z3fold_page_unlock(zhdr);
++ clear_bit(PAGE_CLAIMED, &page->private);
+ }
+
+ /* We started off locked to we need to lock the pool back */
+@@ -1317,7 +1338,8 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
+ VM_BUG_ON_PAGE(!PageMovable(page), page);
+ VM_BUG_ON_PAGE(PageIsolated(page), page);
+
+- if (test_bit(PAGE_HEADLESS, &page->private))
++ if (test_bit(PAGE_HEADLESS, &page->private) ||
++ test_bit(PAGE_CLAIMED, &page->private))
+ return false;
+
+ zhdr = page_address(page);
+diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
+index a8cb6b2e20c1..5a203acdcae5 100644
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1023,6 +1023,11 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol,
+ */
+ if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
+ goto out;
++
++ rc = -EPERM;
++ if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
++ goto out;
++
+ rc = -ENOMEM;
+ sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, kern);
+ if (!sk)
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index ca5207767dc2..bb222b882b67 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -855,6 +855,8 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
+ break;
+
+ case SOCK_RAW:
++ if (!capable(CAP_NET_RAW))
++ return -EPERM;
+ break;
+ default:
+ return -ESOCKTNOSUPPORT;
+diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
+index dacbd58e1799..0bce822b95d0 100644
+--- a/net/ieee802154/socket.c
++++ b/net/ieee802154/socket.c
+@@ -1008,6 +1008,9 @@ static int ieee802154_create(struct net *net, struct socket *sock,
+
+ switch (sock->type) {
+ case SOCK_RAW:
++ rc = -EPERM;
++ if (!capable(CAP_NET_RAW))
++ goto out;
+ proto = &ieee802154_raw_prot;
+ ops = &ieee802154_raw_ops;
+ break;
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 7fd6db3fe366..e4fa9e5833e4 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -560,7 +560,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
+ rt = ip_route_output_flow(net, fl4, sk);
+ if (IS_ERR(rt))
+ goto no_route;
+- if (opt && opt->opt.is_strictroute && rt->rt_gw_family)
++ if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
+ goto route_err;
+ rcu_read_unlock();
+ return &rt->dst;
+@@ -598,7 +598,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
+ rt = ip_route_output_flow(net, fl4, sk);
+ if (IS_ERR(rt))
+ goto no_route;
+- if (opt && opt->opt.is_strictroute && rt->rt_gw_family)
++ if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
+ goto route_err;
+ return &rt->dst;
+
+diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
+index 06f6f280b9ff..00ec819f949b 100644
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -123,7 +123,7 @@ int ip_forward(struct sk_buff *skb)
+
+ rt = skb_rtable(skb);
+
+- if (opt->is_strictroute && rt->rt_gw_family)
++ if (opt->is_strictroute && rt->rt_uses_gateway)
+ goto sr_failed;
+
+ IPCB(skb)->flags |= IPSKB_FORWARDED;
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 8c2ec35b6512..b6f3a9f0b8be 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -482,7 +482,7 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
+ skb_dst_set_noref(skb, &rt->dst);
+
+ packet_routed:
+- if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_gw_family)
++ if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
+ goto no_route;
+
+ /* OK, we know where to send it, allocate and build IP header. */
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 8ea0735a6754..f6b7b11835ee 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -634,6 +634,7 @@ static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnh
+
+ if (fnhe->fnhe_gw) {
+ rt->rt_flags |= RTCF_REDIRECTED;
++ rt->rt_uses_gateway = 1;
+ rt->rt_gw_family = AF_INET;
+ rt->rt_gw4 = fnhe->fnhe_gw;
+ }
+@@ -1312,7 +1313,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
+ mtu = READ_ONCE(dst->dev->mtu);
+
+ if (unlikely(ip_mtu_locked(dst))) {
+- if (rt->rt_gw_family && mtu > 576)
++ if (rt->rt_uses_gateway && mtu > 576)
+ mtu = 576;
+ }
+
+@@ -1569,6 +1570,7 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
+ struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+
+ if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
++ rt->rt_uses_gateway = 1;
+ rt->rt_gw_family = nhc->nhc_gw_family;
+ /* only INET and INET6 are supported */
+ if (likely(nhc->nhc_gw_family == AF_INET))
+@@ -1634,6 +1636,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
+ rt->rt_iif = 0;
+ rt->rt_pmtu = 0;
+ rt->rt_mtu_locked = 0;
++ rt->rt_uses_gateway = 0;
+ rt->rt_gw_family = 0;
+ rt->rt_gw4 = 0;
+ INIT_LIST_HEAD(&rt->rt_uncached);
+@@ -2664,6 +2667,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
+ rt->rt_genid = rt_genid_ipv4(net);
+ rt->rt_flags = ort->rt_flags;
+ rt->rt_type = ort->rt_type;
++ rt->rt_uses_gateway = ort->rt_uses_gateway;
+ rt->rt_gw_family = ort->rt_gw_family;
+ if (rt->rt_gw_family == AF_INET)
+ rt->rt_gw4 = ort->rt_gw4;
+@@ -2747,21 +2751,23 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
+ if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
+ goto nla_put_failure;
+ }
+- if (rt->rt_gw_family == AF_INET &&
+- nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
+- goto nla_put_failure;
+- } else if (rt->rt_gw_family == AF_INET6) {
+- int alen = sizeof(struct in6_addr);
+- struct nlattr *nla;
+- struct rtvia *via;
+-
+- nla = nla_reserve(skb, RTA_VIA, alen + 2);
+- if (!nla)
++ if (rt->rt_uses_gateway) {
++ if (rt->rt_gw_family == AF_INET &&
++ nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
+ goto nla_put_failure;
++ } else if (rt->rt_gw_family == AF_INET6) {
++ int alen = sizeof(struct in6_addr);
++ struct nlattr *nla;
++ struct rtvia *via;
+
+- via = nla_data(nla);
+- via->rtvia_family = AF_INET6;
+- memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
++ nla = nla_reserve(skb, RTA_VIA, alen + 2);
++ if (!nla)
++ goto nla_put_failure;
++
++ via = nla_data(nla);
++ via->rtvia_family = AF_INET6;
++ memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
++ }
+ }
+
+ expires = rt->dst.expires;
+diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
+index 56be7d27f208..00ade9c185ea 100644
+--- a/net/ipv4/tcp_bbr.c
++++ b/net/ipv4/tcp_bbr.c
+@@ -386,7 +386,7 @@ static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
+ * which allows 2 outstanding 2-packet sequences, to try to keep pipe
+ * full even with ACK-every-other-packet delayed ACKs.
+ */
+-static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain)
++static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd)
+ {
+ struct bbr *bbr = inet_csk_ca(sk);
+
+@@ -397,7 +397,7 @@ static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain)
+ cwnd = (cwnd + 1) & ~1U;
+
+ /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
+- if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT)
++ if (bbr->mode == BBR_PROBE_BW && bbr->cycle_idx == 0)
+ cwnd += 2;
+
+ return cwnd;
+@@ -409,7 +409,7 @@ static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
+ u32 inflight;
+
+ inflight = bbr_bdp(sk, bw, gain);
+- inflight = bbr_quantization_budget(sk, inflight, gain);
++ inflight = bbr_quantization_budget(sk, inflight);
+
+ return inflight;
+ }
+@@ -529,7 +529,7 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
+ * due to aggregation (of data and/or ACKs) visible in the ACK stream.
+ */
+ target_cwnd += bbr_ack_aggregation_cwnd(sk);
+- target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain);
++ target_cwnd = bbr_quantization_budget(sk, target_cwnd);
+
+ /* If we're below target cwnd, slow start cwnd toward target cwnd. */
+ if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index c801cd37cc2a..3e8b38c73d8c 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -210,7 +210,7 @@ static int tcp_write_timeout(struct sock *sk)
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct net *net = sock_net(sk);
+- bool expired, do_reset;
++ bool expired = false, do_reset;
+ int retry_until;
+
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+@@ -242,9 +242,10 @@ static int tcp_write_timeout(struct sock *sk)
+ if (tcp_out_of_resources(sk, do_reset))
+ return 1;
+ }
++ }
++ if (!expired)
+ expired = retransmits_timed_out(sk, retry_until,
+ icsk->icsk_user_timeout);
+- }
+ tcp_fastopen_active_detect_blackhole(sk, expired);
+
+ if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
+diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
+index cdef8f9a3b01..35b84b52b702 100644
+--- a/net/ipv4/xfrm4_policy.c
++++ b/net/ipv4/xfrm4_policy.c
+@@ -85,6 +85,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
+ RTCF_LOCAL);
+ xdst->u.rt.rt_type = rt->rt_type;
++ xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
+ xdst->u.rt.rt_gw_family = rt->rt_gw_family;
+ if (rt->rt_gw_family == AF_INET)
+ xdst->u.rt.rt_gw4 = rt->rt_gw4;
+diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
+index bcfae13409b5..67b60b1da922 100644
+--- a/net/ipv6/fib6_rules.c
++++ b/net/ipv6/fib6_rules.c
+@@ -285,7 +285,8 @@ static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg
+ return false;
+
+ suppress_route:
+- ip6_rt_put(rt);
++ if (!(arg->flags & FIB_LOOKUP_NOREF))
++ ip6_rt_put(rt);
+ return true;
+ }
+
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index 9b8742947aff..8dfea26536c9 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -1004,10 +1004,13 @@ static int llcp_sock_create(struct net *net, struct socket *sock,
+ sock->type != SOCK_RAW)
+ return -ESOCKTNOSUPPORT;
+
+- if (sock->type == SOCK_RAW)
++ if (sock->type == SOCK_RAW) {
++ if (!capable(CAP_NET_RAW))
++ return -EPERM;
+ sock->ops = &llcp_rawsock_ops;
+- else
++ } else {
+ sock->ops = &llcp_sock_ops;
++ }
+
+ sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern);
+ if (sk == NULL)
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 6747bc57b6fa..c29e9f766bd3 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -2245,7 +2245,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
+ [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
+ [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
+- [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
++ [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
+ [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
+ [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index 6c8b0f6d28f9..88f98f27ad88 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -150,6 +150,7 @@ static void __qrtr_node_release(struct kref *kref)
+ list_del(&node->item);
+ mutex_unlock(&qrtr_node_lock);
+
++ cancel_work_sync(&node->work);
+ skb_queue_purge(&node->rx_queue);
+ kfree(node);
+ }
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 4e5d2e9ace5d..061e0d047f4f 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -828,6 +828,15 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
+ return c;
+ }
+
++static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
++ [TCA_ACT_KIND] = { .type = NLA_NUL_STRING,
++ .len = IFNAMSIZ - 1 },
++ [TCA_ACT_INDEX] = { .type = NLA_U32 },
++ [TCA_ACT_COOKIE] = { .type = NLA_BINARY,
++ .len = TC_COOKIE_MAX_SIZE },
++ [TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
++};
++
+ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ struct nlattr *nla, struct nlattr *est,
+ char *name, int ovr, int bind,
+@@ -843,8 +852,8 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ int err;
+
+ if (name == NULL) {
+- err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, NULL,
+- extack);
++ err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
++ tcf_action_policy, extack);
+ if (err < 0)
+ goto err_out;
+ err = -EINVAL;
+@@ -853,18 +862,9 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
+ NL_SET_ERR_MSG(extack, "TC action kind must be specified");
+ goto err_out;
+ }
+- if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) {
+- NL_SET_ERR_MSG(extack, "TC action name too long");
+- goto err_out;
+- }
+- if (tb[TCA_ACT_COOKIE]) {
+- int cklen = nla_len(tb[TCA_ACT_COOKIE]);
+-
+- if (cklen > TC_COOKIE_MAX_SIZE) {
+- NL_SET_ERR_MSG(extack, "TC cookie size above the maximum");
+- goto err_out;
+- }
++ nla_strlcpy(act_name, kind, IFNAMSIZ);
+
++ if (tb[TCA_ACT_COOKIE]) {
+ cookie = nla_memdup_cookie(tb);
+ if (!cookie) {
+ NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
+@@ -1095,7 +1095,8 @@ static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
+ int index;
+ int err;
+
+- err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, NULL, extack);
++ err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
++ tcf_action_policy, extack);
+ if (err < 0)
+ goto err_out;
+
+@@ -1149,7 +1150,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
+
+ b = skb_tail_pointer(skb);
+
+- err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, NULL, extack);
++ err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
++ tcf_action_policy, extack);
+ if (err < 0)
+ goto err_out;
+
+@@ -1437,7 +1439,7 @@ static struct nlattr *find_dump_kind(struct nlattr **nla)
+
+ if (tb[1] == NULL)
+ return NULL;
+- if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], NULL, NULL) < 0)
++ if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
+ return NULL;
+ kind = tb2[TCA_ACT_KIND];
+
+diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
+index 10229124a992..86344fd2ff1f 100644
+--- a/net/sched/act_sample.c
++++ b/net/sched/act_sample.c
+@@ -146,6 +146,7 @@ static bool tcf_sample_dev_ok_push(struct net_device *dev)
+ case ARPHRD_TUNNEL6:
+ case ARPHRD_SIT:
+ case ARPHRD_IPGRE:
++ case ARPHRD_IP6GRE:
+ case ARPHRD_VOID:
+ case ARPHRD_NONE:
+ return false;
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index b67c456f26aa..526cb48e7b8a 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -3031,8 +3031,10 @@ out:
+ void tcf_exts_destroy(struct tcf_exts *exts)
+ {
+ #ifdef CONFIG_NET_CLS_ACT
+- tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
+- kfree(exts->actions);
++ if (exts->actions) {
++ tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
++ kfree(exts->actions);
++ }
+ exts->nr_actions = 0;
+ #endif
+ }
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 1047825d9f48..81d58b280612 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1390,7 +1390,8 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
+ }
+
+ const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
+- [TCA_KIND] = { .type = NLA_STRING },
++ [TCA_KIND] = { .type = NLA_NUL_STRING,
++ .len = IFNAMSIZ - 1 },
+ [TCA_RATE] = { .type = NLA_BINARY,
+ .len = sizeof(struct tc_estimator) },
+ [TCA_STAB] = { .type = NLA_NESTED },
+diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
+index 810645b5c086..4a403d35438f 100644
+--- a/net/sched/sch_cbs.c
++++ b/net/sched/sch_cbs.c
+@@ -392,7 +392,6 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
+ {
+ struct cbs_sched_data *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+- int err;
+
+ if (!opt) {
+ NL_SET_ERR_MSG(extack, "Missing CBS qdisc options which are mandatory");
+@@ -404,6 +403,10 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
+ if (!q->qdisc)
+ return -ENOMEM;
+
++ spin_lock(&cbs_list_lock);
++ list_add(&q->cbs_list, &cbs_list);
++ spin_unlock(&cbs_list_lock);
++
+ qdisc_hash_add(q->qdisc, false);
+
+ q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0);
+@@ -413,17 +416,7 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
+
+ qdisc_watchdog_init(&q->watchdog, sch);
+
+- err = cbs_change(sch, opt, extack);
+- if (err)
+- return err;
+-
+- if (!q->offload) {
+- spin_lock(&cbs_list_lock);
+- list_add(&q->cbs_list, &cbs_list);
+- spin_unlock(&cbs_list_lock);
+- }
+-
+- return 0;
++ return cbs_change(sch, opt, extack);
+ }
+
+ static void cbs_destroy(struct Qdisc *sch)
+@@ -431,15 +424,18 @@ static void cbs_destroy(struct Qdisc *sch)
+ struct cbs_sched_data *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+
+- spin_lock(&cbs_list_lock);
+- list_del(&q->cbs_list);
+- spin_unlock(&cbs_list_lock);
++ /* Nothing to do if we couldn't create the underlying qdisc */
++ if (!q->qdisc)
++ return;
+
+ qdisc_watchdog_cancel(&q->watchdog);
+ cbs_disable_offload(dev, q);
+
+- if (q->qdisc)
+- qdisc_put(q->qdisc);
++ spin_lock(&cbs_list_lock);
++ list_del(&q->cbs_list);
++ spin_unlock(&cbs_list_lock);
++
++ qdisc_put(q->qdisc);
+ }
+
+ static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb)
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index b17f2ed970e2..f5cb35e550f8 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -777,7 +777,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
+ struct disttable *d;
+ int i;
+
+- if (n > NETEM_DIST_MAX)
++ if (!n || n > NETEM_DIST_MAX)
+ return -EINVAL;
+
+ d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index fbb85ea24ea0..0d79321f51ee 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1785,6 +1785,7 @@ rpc_xdr_encode(struct rpc_task *task)
+ req->rq_rbuffer,
+ req->rq_rcvsize);
+
++ req->rq_reply_bytes_recvd = 0;
+ req->rq_snd_buf.head[0].iov_len = 0;
+ xdr_init_encode(&xdr, &req->rq_snd_buf,
+ req->rq_snd_buf.head[0].iov_base, req);
+@@ -1804,6 +1805,8 @@ call_encode(struct rpc_task *task)
+ if (!rpc_task_need_encode(task))
+ goto out;
+ dprint_status(task);
++ /* Dequeue task from the receive queue while we're encoding */
++ xprt_request_dequeue_xprt(task);
+ /* Encode here so that rpcsec_gss can use correct sequence number. */
+ rpc_xdr_encode(task);
+ /* Did the encode result in an error condition? */
+@@ -2437,9 +2440,6 @@ call_decode(struct rpc_task *task)
+ return;
+ case -EAGAIN:
+ task->tk_status = 0;
+- xdr_free_bvec(&req->rq_rcv_buf);
+- req->rq_reply_bytes_recvd = 0;
+- req->rq_rcv_buf.len = 0;
+ if (task->tk_client->cl_discrtry)
+ xprt_conditional_disconnect(req->rq_xprt,
+ req->rq_connect_cookie);
+diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
+index 48c93b9e525e..b256806d69cd 100644
+--- a/net/sunrpc/xdr.c
++++ b/net/sunrpc/xdr.c
+@@ -1237,16 +1237,29 @@ xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
+ EXPORT_SYMBOL_GPL(xdr_encode_word);
+
+ /* If the netobj starting offset bytes from the start of xdr_buf is contained
+- * entirely in the head or the tail, set object to point to it; otherwise
+- * try to find space for it at the end of the tail, copy it there, and
+- * set obj to point to it. */
++ * entirely in the head, pages, or tail, set object to point to it; otherwise
++ * shift the buffer until it is contained entirely within the pages or tail.
++ */
+ int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
+ {
+ struct xdr_buf subbuf;
++ unsigned int boundary;
+
+ if (xdr_decode_word(buf, offset, &obj->len))
+ return -EFAULT;
+- if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
++ offset += 4;
++
++ /* Is the obj partially in the head? */
++ boundary = buf->head[0].iov_len;
++ if (offset < boundary && (offset + obj->len) > boundary)
++ xdr_shift_buf(buf, boundary - offset);
++
++ /* Is the obj partially in the pages? */
++ boundary += buf->page_len;
++ if (offset < boundary && (offset + obj->len) > boundary)
++ xdr_shrink_pagelen(buf, boundary - offset);
++
++ if (xdr_buf_subsegment(buf, &subbuf, offset, obj->len))
+ return -EFAULT;
+
+ /* Is the obj contained entirely in the head? */
+@@ -1258,11 +1271,7 @@ int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned in
+ if (subbuf.tail[0].iov_len == obj->len)
+ return 0;
+
+- /* use end of tail as storage for obj:
+- * (We don't copy to the beginning because then we'd have
+- * to worry about doing a potentially overlapping copy.
+- * This assumes the object is at most half the length of the
+- * tail.) */
++ /* Find a contiguous area in @buf to hold all of @obj */
+ if (obj->len > buf->buflen - buf->len)
+ return -ENOMEM;
+ if (buf->tail[0].iov_len != 0)
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index f7a995bd2a6c..76999de7b949 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1295,6 +1295,36 @@ xprt_request_dequeue_transmit(struct rpc_task *task)
+ spin_unlock(&xprt->queue_lock);
+ }
+
++/**
++ * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
++ * @task: pointer to rpc_task
++ *
++ * Remove a task from the transmit and receive queues, and ensure that
++ * it is not pinned by the receive work item.
++ */
++void
++xprt_request_dequeue_xprt(struct rpc_task *task)
++{
++ struct rpc_rqst *req = task->tk_rqstp;
++ struct rpc_xprt *xprt = req->rq_xprt;
++
++ if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
++ test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
++ xprt_is_pinned_rqst(req)) {
++ spin_lock(&xprt->queue_lock);
++ xprt_request_dequeue_transmit_locked(task);
++ xprt_request_dequeue_receive_locked(task);
++ while (xprt_is_pinned_rqst(req)) {
++ set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
++ spin_unlock(&xprt->queue_lock);
++ xprt_wait_on_pinned_rqst(req);
++ spin_lock(&xprt->queue_lock);
++ clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
++ }
++ spin_unlock(&xprt->queue_lock);
++ }
++}
++
+ /**
+ * xprt_request_prepare - prepare an encoded request for transport
+ * @req: pointer to rpc_rqst
+@@ -1719,28 +1749,6 @@ void xprt_retry_reserve(struct rpc_task *task)
+ xprt_do_reserve(xprt, task);
+ }
+
+-static void
+-xprt_request_dequeue_all(struct rpc_task *task, struct rpc_rqst *req)
+-{
+- struct rpc_xprt *xprt = req->rq_xprt;
+-
+- if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
+- test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
+- xprt_is_pinned_rqst(req)) {
+- spin_lock(&xprt->queue_lock);
+- xprt_request_dequeue_transmit_locked(task);
+- xprt_request_dequeue_receive_locked(task);
+- while (xprt_is_pinned_rqst(req)) {
+- set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
+- spin_unlock(&xprt->queue_lock);
+- xprt_wait_on_pinned_rqst(req);
+- spin_lock(&xprt->queue_lock);
+- clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
+- }
+- spin_unlock(&xprt->queue_lock);
+- }
+-}
+-
+ /**
+ * xprt_release - release an RPC request slot
+ * @task: task which is finished with the slot
+@@ -1764,7 +1772,7 @@ void xprt_release(struct rpc_task *task)
+ task->tk_ops->rpc_count_stats(task, task->tk_calldata);
+ else if (task->tk_client)
+ rpc_count_iostats(task, task->tk_client->cl_metrics);
+- xprt_request_dequeue_all(task, req);
++ xprt_request_dequeue_xprt(task);
+ spin_lock_bh(&xprt->transport_lock);
+ xprt->ops->release_xprt(xprt, task);
+ if (xprt->ops->release_request)
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index e74837824cea..f68818dbac1a 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -960,6 +960,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ }
+
+ cfg80211_process_rdev_events(rdev);
++ cfg80211_mlme_purge_registrations(dev->ieee80211_ptr);
+ }
+
+ err = rdev_change_virtual_intf(rdev, dev, ntype, params);
+diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
+index 6410bd22fe38..03757cc60e06 100644
+--- a/scripts/Makefile.kasan
++++ b/scripts/Makefile.kasan
+@@ -1,4 +1,9 @@
+ # SPDX-License-Identifier: GPL-2.0
++ifdef CONFIG_KASAN
++CFLAGS_KASAN_NOSANITIZE := -fno-builtin
++KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
++endif
++
+ ifdef CONFIG_KASAN_GENERIC
+
+ ifdef CONFIG_KASAN_INLINE
+@@ -7,8 +12,6 @@ else
+ call_threshold := 0
+ endif
+
+-KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
+-
+ CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
+
+ cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
+@@ -45,7 +48,3 @@ CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
+ $(instrumentation_flags)
+
+ endif # CONFIG_KASAN_SW_TAGS
+-
+-ifdef CONFIG_KASAN
+-CFLAGS_KASAN_NOSANITIZE := -fno-builtin
+-endif
+diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
+index 6d5bbd31db7f..bd29e4e7a524 100644
+--- a/scripts/gcc-plugins/randomize_layout_plugin.c
++++ b/scripts/gcc-plugins/randomize_layout_plugin.c
+@@ -443,13 +443,13 @@ static int is_pure_ops_struct(const_tree node)
+ if (node == fieldtype)
+ continue;
+
+- if (!is_fptr(fieldtype))
+- return 0;
+-
+- if (code != RECORD_TYPE && code != UNION_TYPE)
++ if (code == RECORD_TYPE || code == UNION_TYPE) {
++ if (!is_pure_ops_struct(fieldtype))
++ return 0;
+ continue;
++ }
+
+- if (!is_pure_ops_struct(fieldtype))
++ if (!is_fptr(fieldtype))
+ return 0;
+ }
+
+diff --git a/security/keys/trusted.c b/security/keys/trusted.c
+index ade699131065..1fbd77816610 100644
+--- a/security/keys/trusted.c
++++ b/security/keys/trusted.c
+@@ -1228,11 +1228,16 @@ hashalg_fail:
+
+ static int __init init_digests(void)
+ {
++ int i;
++
+ digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
+ GFP_KERNEL);
+ if (!digests)
+ return -ENOMEM;
+
++ for (i = 0; i < chip->nr_allocated_banks; i++)
++ digests[i].alg_id = chip->allocated_banks[i].alg_id;
++
+ return 0;
+ }
+
+diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
+index 03cda2166ea3..72908b4de77c 100644
+--- a/sound/firewire/motu/motu.c
++++ b/sound/firewire/motu/motu.c
+@@ -247,6 +247,17 @@ static const struct snd_motu_spec motu_audio_express = {
+ .analog_out_ports = 4,
+ };
+
++static const struct snd_motu_spec motu_4pre = {
++ .name = "4pre",
++ .protocol = &snd_motu_protocol_v3,
++ .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
++ SND_MOTU_SPEC_TX_MICINST_CHUNK |
++ SND_MOTU_SPEC_TX_RETURN_CHUNK |
++ SND_MOTU_SPEC_RX_SEPARETED_MAIN,
++ .analog_in_ports = 2,
++ .analog_out_ports = 2,
++};
++
+ #define SND_MOTU_DEV_ENTRY(model, data) \
+ { \
+ .match_flags = IEEE1394_MATCH_VENDOR_ID | \
+@@ -265,6 +276,7 @@ static const struct ieee1394_device_id motu_id_table[] = {
+ SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3), /* FireWire only. */
+ SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3), /* Hybrid. */
+ SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
++ SND_MOTU_DEV_ENTRY(0x000045, &motu_4pre),
+ { }
+ };
+ MODULE_DEVICE_TABLE(ieee1394, motu_id_table);
+diff --git a/sound/firewire/tascam/tascam-pcm.c b/sound/firewire/tascam/tascam-pcm.c
+index a8cd9b156488..67d8b80b471e 100644
+--- a/sound/firewire/tascam/tascam-pcm.c
++++ b/sound/firewire/tascam/tascam-pcm.c
+@@ -56,6 +56,9 @@ static int pcm_open(struct snd_pcm_substream *substream)
+ goto err_locked;
+
+ err = snd_tscm_stream_get_clock(tscm, &clock);
++ if (err < 0)
++ goto err_locked;
++
+ if (clock != SND_TSCM_CLOCK_INTERNAL ||
+ amdtp_stream_pcm_running(&tscm->rx_stream) ||
+ amdtp_stream_pcm_running(&tscm->tx_stream)) {
+diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
+index e6fcd9e19961..3cb0bbfa03c6 100644
+--- a/sound/firewire/tascam/tascam-stream.c
++++ b/sound/firewire/tascam/tascam-stream.c
+@@ -8,20 +8,37 @@
+ #include <linux/delay.h>
+ #include "tascam.h"
+
++#define CLOCK_STATUS_MASK 0xffff0000
++#define CLOCK_CONFIG_MASK 0x0000ffff
++
+ #define CALLBACK_TIMEOUT 500
+
+ static int get_clock(struct snd_tscm *tscm, u32 *data)
+ {
++ int trial = 0;
+ __be32 reg;
+ int err;
+
+- err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST,
+- TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS,
+- ®, sizeof(reg), 0);
+- if (err >= 0)
++ while (trial++ < 5) {
++ err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST,
++ TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS,
++ ®, sizeof(reg), 0);
++ if (err < 0)
++ return err;
++
+ *data = be32_to_cpu(reg);
++ if (*data & CLOCK_STATUS_MASK)
++ break;
+
+- return err;
++ // In intermediate state after changing clock status.
++ msleep(50);
++ }
++
++ // Still in the intermediate state.
++ if (trial >= 5)
++ return -EAGAIN;
++
++ return 0;
+ }
+
+ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
+@@ -34,7 +51,7 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
+ err = get_clock(tscm, &data);
+ if (err < 0)
+ return err;
+- data &= 0x0000ffff;
++ data &= CLOCK_CONFIG_MASK;
+
+ if (rate > 0) {
+ data &= 0x000000ff;
+@@ -79,17 +96,14 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
+
+ int snd_tscm_stream_get_rate(struct snd_tscm *tscm, unsigned int *rate)
+ {
+- u32 data = 0x0;
+- unsigned int trials = 0;
++ u32 data;
+ int err;
+
+- while (data == 0x0 || trials++ < 5) {
+- err = get_clock(tscm, &data);
+- if (err < 0)
+- return err;
++ err = get_clock(tscm, &data);
++ if (err < 0)
++ return err;
+
+- data = (data & 0xff000000) >> 24;
+- }
++ data = (data & 0xff000000) >> 24;
+
+ /* Check base rate. */
+ if ((data & 0x0f) == 0x01)
+diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
+index 812dc144fb5b..2258804c5857 100644
+--- a/sound/hda/hdac_controller.c
++++ b/sound/hda/hdac_controller.c
+@@ -445,6 +445,8 @@ static void azx_int_disable(struct hdac_bus *bus)
+ list_for_each_entry(azx_dev, &bus->stream_list, list)
+ snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0);
+
++ synchronize_irq(bus->irq);
++
+ /* disable SIE for all streams */
+ snd_hdac_chip_writeb(bus, INTCTL, 0);
+
+diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
+index 5f59316f982a..7d15093844b9 100644
+--- a/sound/i2c/other/ak4xxx-adda.c
++++ b/sound/i2c/other/ak4xxx-adda.c
+@@ -775,11 +775,12 @@ static int build_adc_controls(struct snd_akm4xxx *ak)
+ return err;
+
+ memset(&knew, 0, sizeof(knew));
+- knew.name = ak->adc_info[mixer_ch].selector_name;
+- if (!knew.name) {
++ if (!ak->adc_info ||
++ !ak->adc_info[mixer_ch].selector_name) {
+ knew.name = "Capture Channel";
+ knew.index = mixer_ch + ak->idx_offset * 2;
+- }
++ } else
++ knew.name = ak->adc_info[mixer_ch].selector_name;
+
+ knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ knew.info = ak4xxx_capture_source_info;
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 106328584998..c2a6554c9877 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -846,7 +846,13 @@ static void snd_hda_codec_dev_release(struct device *dev)
+ snd_hda_sysfs_clear(codec);
+ kfree(codec->modelname);
+ kfree(codec->wcaps);
+- kfree(codec);
++
++ /*
++ * In the case of ASoC HD-audio, hda_codec is device managed.
++ * It will be freed when the ASoC device is removed.
++ */
++ if (codec->core.type == HDA_DEV_LEGACY)
++ kfree(codec);
+ }
+
+ #define DEV_NAME_LEN 31
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index dd96def48a3a..1158bcf55148 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -869,10 +869,13 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
+ */
+ if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
+ hbus->response_reset = 1;
++ dev_err(chip->card->dev,
++ "No response from codec, resetting bus: last cmd=0x%08x\n",
++ bus->last_cmd[addr]);
+ return -EAGAIN; /* give a chance to retry */
+ }
+
+- dev_err(chip->card->dev,
++ dev_WARN(chip->card->dev,
+ "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
+ bus->last_cmd[addr]);
+ chip->single_cmd = 1;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index e7da1a59884a..03dd532967bd 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1349,9 +1349,9 @@ static int azx_free(struct azx *chip)
+ }
+
+ if (bus->chip_init) {
++ azx_stop_chip(chip);
+ azx_clear_irq_pending(chip);
+ azx_stop_all_streams(chip);
+- azx_stop_chip(chip);
+ }
+
+ if (bus->irq >= 0)
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index e49c1c00f5ce..ca0404edd939 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2611,6 +2611,8 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
+ /* precondition and allocation for Intel codecs */
+ static int alloc_intel_hdmi(struct hda_codec *codec)
+ {
++ int err;
++
+ /* requires i915 binding */
+ if (!codec->bus->core.audio_component) {
+ codec_info(codec, "No i915 binding for Intel HDMI/DP codec\n");
+@@ -2619,7 +2621,12 @@ static int alloc_intel_hdmi(struct hda_codec *codec)
+ return -ENODEV;
+ }
+
+- return alloc_generic_hdmi(codec);
++ err = alloc_generic_hdmi(codec);
++ if (err < 0)
++ return err;
++ /* no need to handle unsol events */
++ codec->patch_ops.unsol_event = NULL;
++ return 0;
+ }
+
+ /* parse and post-process for Intel codecs */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index c1ddfd2fac52..36aee8ad2054 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1058,6 +1058,9 @@ static const struct snd_pci_quirk beep_white_list[] = {
+ SND_PCI_QUIRK(0x1043, 0x834a, "EeePC", 1),
+ SND_PCI_QUIRK(0x1458, 0xa002, "GA-MA790X", 1),
+ SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
++ /* blacklist -- no beep available */
++ SND_PCI_QUIRK(0x17aa, 0x309e, "Lenovo ThinkCentre M73", 0),
++ SND_PCI_QUIRK(0x17aa, 0x30a3, "Lenovo ThinkCentre M93", 0),
+ {}
+ };
+
+@@ -3755,6 +3758,72 @@ static void alc269_x101_hp_automute_hook(struct hda_codec *codec,
+ vref);
+ }
+
++/*
++ * Magic sequence to make Huawei Matebook X right speaker working (bko#197801)
++ */
++struct hda_alc298_mbxinit {
++ unsigned char value_0x23;
++ unsigned char value_0x25;
++};
++
++static void alc298_huawei_mbx_stereo_seq(struct hda_codec *codec,
++ const struct hda_alc298_mbxinit *initval,
++ bool first)
++{
++ snd_hda_codec_write(codec, 0x06, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x0);
++ alc_write_coef_idx(codec, 0x26, 0xb000);
++
++ if (first)
++ snd_hda_codec_write(codec, 0x21, 0, AC_VERB_GET_PIN_SENSE, 0x0);
++
++ snd_hda_codec_write(codec, 0x6, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x80);
++ alc_write_coef_idx(codec, 0x26, 0xf000);
++ alc_write_coef_idx(codec, 0x23, initval->value_0x23);
++
++ if (initval->value_0x23 != 0x1e)
++ alc_write_coef_idx(codec, 0x25, initval->value_0x25);
++
++ snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 0x26);
++ snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_PROC_COEF, 0xb010);
++}
++
++static void alc298_fixup_huawei_mbx_stereo(struct hda_codec *codec,
++ const struct hda_fixup *fix,
++ int action)
++{
++ /* Initialization magic */
++ static const struct hda_alc298_mbxinit dac_init[] = {
++ {0x0c, 0x00}, {0x0d, 0x00}, {0x0e, 0x00}, {0x0f, 0x00},
++ {0x10, 0x00}, {0x1a, 0x40}, {0x1b, 0x82}, {0x1c, 0x00},
++ {0x1d, 0x00}, {0x1e, 0x00}, {0x1f, 0x00},
++ {0x20, 0xc2}, {0x21, 0xc8}, {0x22, 0x26}, {0x23, 0x24},
++ {0x27, 0xff}, {0x28, 0xff}, {0x29, 0xff}, {0x2a, 0x8f},
++ {0x2b, 0x02}, {0x2c, 0x48}, {0x2d, 0x34}, {0x2e, 0x00},
++ {0x2f, 0x00},
++ {0x30, 0x00}, {0x31, 0x00}, {0x32, 0x00}, {0x33, 0x00},
++ {0x34, 0x00}, {0x35, 0x01}, {0x36, 0x93}, {0x37, 0x0c},
++ {0x38, 0x00}, {0x39, 0x00}, {0x3a, 0xf8}, {0x38, 0x80},
++ {}
++ };
++ const struct hda_alc298_mbxinit *seq;
++
++ if (action != HDA_FIXUP_ACT_INIT)
++ return;
++
++ /* Start */
++ snd_hda_codec_write(codec, 0x06, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x00);
++ snd_hda_codec_write(codec, 0x06, 0, AC_VERB_SET_DIGI_CONVERT_3, 0x80);
++ alc_write_coef_idx(codec, 0x26, 0xf000);
++ alc_write_coef_idx(codec, 0x22, 0x31);
++ alc_write_coef_idx(codec, 0x23, 0x0b);
++ alc_write_coef_idx(codec, 0x25, 0x00);
++ snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_COEF_INDEX, 0x26);
++ snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_PROC_COEF, 0xb010);
++
++ for (seq = dac_init; seq->value_0x23; seq++)
++ alc298_huawei_mbx_stereo_seq(codec, seq, seq == dac_init);
++}
++
+ static void alc269_fixup_x101_headset_mic(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+ {
+@@ -5780,6 +5849,7 @@ enum {
+ ALC255_FIXUP_DUMMY_LINEOUT_VERB,
+ ALC255_FIXUP_DELL_HEADSET_MIC,
+ ALC256_FIXUP_HUAWEI_MACH_WX9_PINS,
++ ALC298_FIXUP_HUAWEI_MBX_STEREO,
+ ALC295_FIXUP_HP_X360,
+ ALC221_FIXUP_HP_HEADSET_MIC,
+ ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
+@@ -5800,6 +5870,7 @@ enum {
+ ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+ ALC299_FIXUP_PREDATOR_SPK,
+ ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
++ ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -6089,6 +6160,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC255_FIXUP_MIC_MUTE_LED
+ },
++ [ALC298_FIXUP_HUAWEI_MBX_STEREO] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc298_fixup_huawei_mbx_stereo,
++ .chained = true,
++ .chain_id = ALC255_FIXUP_MIC_MUTE_LED
++ },
+ [ALC269_FIXUP_ASUS_X101_FUNC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_x101_headset_mic,
+@@ -6850,6 +6927,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
++ [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x19, 0x04a11040 },
++ { 0x21, 0x04211020 },
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7113,6 +7200,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+ SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
+ SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
++ SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
+
+ #if 0
+ /* Below is a quirk table taken from the old code.
+@@ -7280,6 +7368,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-headset-jack"},
+ {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-chrome-book"},
+ {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
++ {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
++ {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
+ {}
+ };
+ #define ALC225_STANDARD_PINS \
+diff --git a/sound/soc/atmel/mchp-i2s-mcc.c b/sound/soc/atmel/mchp-i2s-mcc.c
+index 86495883ca3f..ab7d5f98e759 100644
+--- a/sound/soc/atmel/mchp-i2s-mcc.c
++++ b/sound/soc/atmel/mchp-i2s-mcc.c
+@@ -670,8 +670,13 @@ static int mchp_i2s_mcc_hw_params(struct snd_pcm_substream *substream,
+ }
+
+ ret = regmap_write(dev->regmap, MCHP_I2SMCC_MRA, mra);
+- if (ret < 0)
++ if (ret < 0) {
++ if (dev->gclk_use) {
++ clk_unprepare(dev->gclk);
++ dev->gclk_use = 0;
++ }
+ return ret;
++ }
+ return regmap_write(dev->regmap, MCHP_I2SMCC_MRB, mrb);
+ }
+
+@@ -686,31 +691,37 @@ static int mchp_i2s_mcc_hw_free(struct snd_pcm_substream *substream,
+ err = wait_event_interruptible_timeout(dev->wq_txrdy,
+ dev->tx_rdy,
+ msecs_to_jiffies(500));
++ if (err == 0) {
++ dev_warn_once(dev->dev,
++ "Timeout waiting for Tx ready\n");
++ regmap_write(dev->regmap, MCHP_I2SMCC_IDRA,
++ MCHP_I2SMCC_INT_TXRDY_MASK(dev->channels));
++ dev->tx_rdy = 1;
++ }
+ } else {
+ err = wait_event_interruptible_timeout(dev->wq_rxrdy,
+ dev->rx_rdy,
+ msecs_to_jiffies(500));
+- }
+-
+- if (err == 0) {
+- u32 idra;
+-
+- dev_warn_once(dev->dev, "Timeout waiting for %s\n",
+- is_playback ? "Tx ready" : "Rx ready");
+- if (is_playback)
+- idra = MCHP_I2SMCC_INT_TXRDY_MASK(dev->channels);
+- else
+- idra = MCHP_I2SMCC_INT_RXRDY_MASK(dev->channels);
+- regmap_write(dev->regmap, MCHP_I2SMCC_IDRA, idra);
++ if (err == 0) {
++ dev_warn_once(dev->dev,
++ "Timeout waiting for Rx ready\n");
++ regmap_write(dev->regmap, MCHP_I2SMCC_IDRA,
++ MCHP_I2SMCC_INT_RXRDY_MASK(dev->channels));
++ dev->rx_rdy = 1;
++ }
+ }
+
+ if (!mchp_i2s_mcc_is_running(dev)) {
+ regmap_write(dev->regmap, MCHP_I2SMCC_CR, MCHP_I2SMCC_CR_CKDIS);
+
+ if (dev->gclk_running) {
+- clk_disable_unprepare(dev->gclk);
++ clk_disable(dev->gclk);
+ dev->gclk_running = 0;
+ }
++ if (dev->gclk_use) {
++ clk_unprepare(dev->gclk);
++ dev->gclk_use = 0;
++ }
+ }
+
+ return 0;
+@@ -809,6 +820,8 @@ static int mchp_i2s_mcc_dai_probe(struct snd_soc_dai *dai)
+
+ init_waitqueue_head(&dev->wq_txrdy);
+ init_waitqueue_head(&dev->wq_rxrdy);
++ dev->tx_rdy = 1;
++ dev->rx_rdy = 1;
+
+ snd_soc_dai_init_dma_data(dai, &dev->playback, &dev->capture);
+
+diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
+index 6db002cc2058..96d04896193f 100644
+--- a/sound/soc/codecs/es8316.c
++++ b/sound/soc/codecs/es8316.c
+@@ -51,7 +51,10 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_vol_tlv, -9600, 50, 1);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_max_gain_tlv, -650, 150, 0);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_min_gain_tlv, -1200, 150, 0);
+ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_target_tlv, -1650, 150, 0);
+-static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(hpmixer_gain_tlv, -1200, 150, 0);
++static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpmixer_gain_tlv,
++ 0, 4, TLV_DB_SCALE_ITEM(-1200, 150, 0),
++ 8, 11, TLV_DB_SCALE_ITEM(-450, 150, 0),
++);
+
+ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv,
+ 0, 0, TLV_DB_SCALE_ITEM(-350, 0, 0),
+@@ -89,7 +92,7 @@ static const struct snd_kcontrol_new es8316_snd_controls[] = {
+ SOC_DOUBLE_TLV("Headphone Playback Volume", ES8316_CPHP_ICAL_VOL,
+ 4, 0, 3, 1, hpout_vol_tlv),
+ SOC_DOUBLE_TLV("Headphone Mixer Volume", ES8316_HPMIX_VOL,
+- 0, 4, 7, 0, hpmixer_gain_tlv),
++ 0, 4, 11, 0, hpmixer_gain_tlv),
+
+ SOC_ENUM("Playback Polarity", dacpol),
+ SOC_DOUBLE_R_TLV("DAC Playback Volume", ES8316_DAC_VOLL,
+diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c
+index 7d4940256914..91242b6f8ea7 100644
+--- a/sound/soc/codecs/hdac_hda.c
++++ b/sound/soc/codecs/hdac_hda.c
+@@ -495,6 +495,10 @@ static int hdac_hda_dev_probe(struct hdac_device *hdev)
+
+ static int hdac_hda_dev_remove(struct hdac_device *hdev)
+ {
++ struct hdac_hda_priv *hda_pvt;
++
++ hda_pvt = dev_get_drvdata(&hdev->dev);
++ cancel_delayed_work_sync(&hda_pvt->codec.jackpoll_work);
+ return 0;
+ }
+
+diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
+index a6a4748c97f9..7cbaedffa1ef 100644
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -1173,12 +1173,17 @@ static int sgtl5000_set_power_regs(struct snd_soc_component *component)
+ SGTL5000_INT_OSC_EN);
+ /* Enable VDDC charge pump */
+ ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
+- } else if (vddio >= 3100 && vdda >= 3100) {
++ } else {
+ ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP;
+- /* VDDC use VDDIO rail */
+- lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
+- lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
+- SGTL5000_VDDC_MAN_ASSN_SHIFT;
++ /*
++ * if vddio == vdda the source of charge pump should be
++ * assigned manually to VDDIO
++ */
++ if (vddio == vdda) {
++ lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
++ lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
++ SGTL5000_VDDC_MAN_ASSN_SHIFT;
++ }
+ }
+
+ snd_soc_component_write(component, SGTL5000_CHIP_LINREG_CTRL, lreg_ctrl);
+@@ -1288,6 +1293,7 @@ static int sgtl5000_probe(struct snd_soc_component *component)
+ int ret;
+ u16 reg;
+ struct sgtl5000_priv *sgtl5000 = snd_soc_component_get_drvdata(component);
++ unsigned int zcd_mask = SGTL5000_HP_ZCD_EN | SGTL5000_ADC_ZCD_EN;
+
+ /* power up sgtl5000 */
+ ret = sgtl5000_set_power_regs(component);
+@@ -1315,9 +1321,8 @@ static int sgtl5000_probe(struct snd_soc_component *component)
+ 0x1f);
+ snd_soc_component_write(component, SGTL5000_CHIP_PAD_STRENGTH, reg);
+
+- snd_soc_component_write(component, SGTL5000_CHIP_ANA_CTRL,
+- SGTL5000_HP_ZCD_EN |
+- SGTL5000_ADC_ZCD_EN);
++ snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
++ zcd_mask, zcd_mask);
+
+ snd_soc_component_update_bits(component, SGTL5000_CHIP_MIC_CTRL,
+ SGTL5000_BIAS_R_MASK,
+diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
+index 9b37e98da0db..26a4f6cd3288 100644
+--- a/sound/soc/codecs/tlv320aic31xx.c
++++ b/sound/soc/codecs/tlv320aic31xx.c
+@@ -1553,7 +1553,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
+ aic31xx->gpio_reset = devm_gpiod_get_optional(aic31xx->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(aic31xx->gpio_reset)) {
+- dev_err(aic31xx->dev, "not able to acquire gpio\n");
++ if (PTR_ERR(aic31xx->gpio_reset) != -EPROBE_DEFER)
++ dev_err(aic31xx->dev, "not able to acquire gpio\n");
+ return PTR_ERR(aic31xx->gpio_reset);
+ }
+
+@@ -1564,7 +1565,9 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
+ ARRAY_SIZE(aic31xx->supplies),
+ aic31xx->supplies);
+ if (ret) {
+- dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
++ if (ret != -EPROBE_DEFER)
++ dev_err(aic31xx->dev,
++ "Failed to request supplies: %d\n", ret);
+ return ret;
+ }
+
+diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
+index 09b2967befd9..d83be26d6446 100644
+--- a/sound/soc/fsl/fsl_ssi.c
++++ b/sound/soc/fsl/fsl_ssi.c
+@@ -799,15 +799,6 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
+ u32 wl = SSI_SxCCR_WL(sample_size);
+ int ret;
+
+- /*
+- * SSI is properly configured if it is enabled and running in
+- * the synchronous mode; Note that AC97 mode is an exception
+- * that should set separate configurations for STCCR and SRCCR
+- * despite running in the synchronous mode.
+- */
+- if (ssi->streams && ssi->synchronous)
+- return 0;
+-
+ if (fsl_ssi_is_i2s_master(ssi)) {
+ ret = fsl_ssi_set_bclk(substream, dai, hw_params);
+ if (ret)
+@@ -823,6 +814,15 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
+ }
+ }
+
++ /*
++ * SSI is properly configured if it is enabled and running in
++ * the synchronous mode; Note that AC97 mode is an exception
++ * that should set separate configurations for STCCR and SRCCR
++ * despite running in the synchronous mode.
++ */
++ if (ssi->streams && ssi->synchronous)
++ return 0;
++
+ if (!fsl_ssi_is_ac97(ssi)) {
+ /*
+ * Keep the ssi->i2s_net intact while having a local variable
+diff --git a/sound/soc/intel/common/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c
+index 0e8e0a7a11df..5854868650b9 100644
+--- a/sound/soc/intel/common/sst-acpi.c
++++ b/sound/soc/intel/common/sst-acpi.c
+@@ -141,11 +141,12 @@ static int sst_acpi_probe(struct platform_device *pdev)
+ }
+
+ platform_set_drvdata(pdev, sst_acpi);
++ mach->pdata = sst_pdata;
+
+ /* register machine driver */
+ sst_acpi->pdev_mach =
+ platform_device_register_data(dev, mach->drv_name, -1,
+- sst_pdata, sizeof(*sst_pdata));
++ mach, sizeof(*mach));
+ if (IS_ERR(sst_acpi->pdev_mach))
+ return PTR_ERR(sst_acpi->pdev_mach);
+
+diff --git a/sound/soc/intel/common/sst-ipc.c b/sound/soc/intel/common/sst-ipc.c
+index b95411ed0b62..9b659c130724 100644
+--- a/sound/soc/intel/common/sst-ipc.c
++++ b/sound/soc/intel/common/sst-ipc.c
+@@ -222,6 +222,8 @@ struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
+
+ if (ipc->ops.reply_msg_match != NULL)
+ header = ipc->ops.reply_msg_match(header, &mask);
++ else
++ mask = (u64)-1;
+
+ if (list_empty(&ipc->rx_list)) {
+ dev_err(ipc->dev, "error: rx list empty but received 0x%llx\n",
+diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c
+index 69cbe9eb026b..bffc6a9619fc 100644
+--- a/sound/soc/intel/skylake/skl-debug.c
++++ b/sound/soc/intel/skylake/skl-debug.c
+@@ -188,7 +188,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf,
+ memset(d->fw_read_buff, 0, FW_REG_BUF);
+
+ if (w0_stat_sz > 0)
+- __iowrite32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
++ __ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
+
+ for (offset = 0; offset < FW_REG_SIZE; offset += 16) {
+ ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
+diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
+index 1132109cb992..e01815cec6fd 100644
+--- a/sound/soc/intel/skylake/skl-nhlt.c
++++ b/sound/soc/intel/skylake/skl-nhlt.c
+@@ -225,7 +225,7 @@ int skl_nhlt_update_topology_bin(struct skl *skl)
+ struct hdac_bus *bus = skl_to_bus(skl);
+ struct device *dev = bus->dev;
+
+- dev_dbg(dev, "oem_id %.6s, oem_table_id %8s oem_revision %d\n",
++ dev_dbg(dev, "oem_id %.6s, oem_table_id %.8s oem_revision %d\n",
+ nhlt->header.oem_id, nhlt->header.oem_table_id,
+ nhlt->header.oem_revision);
+
+diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
+index e821ccc70f47..141d9a030c59 100644
+--- a/sound/soc/sh/rcar/adg.c
++++ b/sound/soc/sh/rcar/adg.c
+@@ -30,6 +30,7 @@ struct rsnd_adg {
+ struct clk *clkout[CLKOUTMAX];
+ struct clk_onecell_data onecell;
+ struct rsnd_mod mod;
++ int clk_rate[CLKMAX];
+ u32 flags;
+ u32 ckr;
+ u32 rbga;
+@@ -113,9 +114,9 @@ static void __rsnd_adg_get_timesel_ratio(struct rsnd_priv *priv,
+ unsigned int val, en;
+ unsigned int min, diff;
+ unsigned int sel_rate[] = {
+- clk_get_rate(adg->clk[CLKA]), /* 0000: CLKA */
+- clk_get_rate(adg->clk[CLKB]), /* 0001: CLKB */
+- clk_get_rate(adg->clk[CLKC]), /* 0010: CLKC */
++ adg->clk_rate[CLKA], /* 0000: CLKA */
++ adg->clk_rate[CLKB], /* 0001: CLKB */
++ adg->clk_rate[CLKC], /* 0010: CLKC */
+ adg->rbga_rate_for_441khz, /* 0011: RBGA */
+ adg->rbgb_rate_for_48khz, /* 0100: RBGB */
+ };
+@@ -301,7 +302,7 @@ int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
+ * AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI.
+ */
+ for_each_rsnd_clk(clk, adg, i) {
+- if (rate == clk_get_rate(clk))
++ if (rate == adg->clk_rate[i])
+ return sel_table[i];
+ }
+
+@@ -368,10 +369,18 @@ void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
+
+ for_each_rsnd_clk(clk, adg, i) {
+ ret = 0;
+- if (enable)
++ if (enable) {
+ ret = clk_prepare_enable(clk);
+- else
++
++ /*
++ * We shouldn't use clk_get_rate() under
++ * atomic context. Let's keep it when
++ * rsnd_adg_clk_enable() was called
++ */
++ adg->clk_rate[i] = clk_get_rate(adg->clk[i]);
++ } else {
+ clk_disable_unprepare(clk);
++ }
+
+ if (ret < 0)
+ dev_warn(dev, "can't use clk %d\n", i);
+diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
+index 748f5f641002..d93db2c2b527 100644
+--- a/sound/soc/soc-generic-dmaengine-pcm.c
++++ b/sound/soc/soc-generic-dmaengine-pcm.c
+@@ -306,6 +306,12 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
+
+ if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
+ pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
++
++ if (rtd->pcm->streams[i].pcm->name[0] == '\0') {
++ strncpy(rtd->pcm->streams[i].pcm->name,
++ rtd->pcm->streams[i].pcm->id,
++ sizeof(rtd->pcm->streams[i].pcm->name));
++ }
+ }
+
+ return 0;
+diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
+index b8b37f082309..0d8437b080bf 100644
+--- a/sound/soc/sof/intel/hda-codec.c
++++ b/sound/soc/sof/intel/hda-codec.c
+@@ -62,8 +62,7 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address)
+ address, resp);
+
+ #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
+- /* snd_hdac_ext_bus_device_exit will use kfree to free hdev */
+- hda_priv = kzalloc(sizeof(*hda_priv), GFP_KERNEL);
++ hda_priv = devm_kzalloc(sdev->dev, sizeof(*hda_priv), GFP_KERNEL);
+ if (!hda_priv)
+ return -ENOMEM;
+
+@@ -82,8 +81,7 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address)
+
+ return 0;
+ #else
+- /* snd_hdac_ext_bus_device_exit will use kfree to free hdev */
+- hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
++ hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+diff --git a/sound/soc/sof/sof-pci-dev.c b/sound/soc/sof/sof-pci-dev.c
+index b778dffb2d25..49daf1390dac 100644
+--- a/sound/soc/sof/sof-pci-dev.c
++++ b/sound/soc/sof/sof-pci-dev.c
+@@ -203,6 +203,9 @@ static void sof_pci_probe_complete(struct device *dev)
+ */
+ pm_runtime_allow(dev);
+
++ /* mark last_busy for pm_runtime to make sure not suspend immediately */
++ pm_runtime_mark_last_busy(dev);
++
+ /* follow recommendation in pci-driver.c to decrement usage counter */
+ pm_runtime_put_noidle(dev);
+ }
+diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
+index fd7c37596f21..1d946a192708 100644
+--- a/sound/soc/sunxi/sun4i-i2s.c
++++ b/sound/soc/sunxi/sun4i-i2s.c
+@@ -219,10 +219,11 @@ static const struct sun4i_i2s_clk_div sun4i_i2s_mclk_div[] = {
+ };
+
+ static int sun4i_i2s_get_bclk_div(struct sun4i_i2s *i2s,
+- unsigned int oversample_rate,
++ unsigned long parent_rate,
++ unsigned int sampling_rate,
+ unsigned int word_size)
+ {
+- int div = oversample_rate / word_size / 2;
++ int div = parent_rate / sampling_rate / word_size / 2;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sun4i_i2s_bclk_div); i++) {
+@@ -312,8 +313,8 @@ static int sun4i_i2s_set_clk_rate(struct snd_soc_dai *dai,
+ return -EINVAL;
+ }
+
+- bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
+- word_size);
++ bclk_div = sun4i_i2s_get_bclk_div(i2s, i2s->mclk_freq,
++ rate, word_size);
+ if (bclk_div < 0) {
+ dev_err(dai->dev, "Unsupported BCLK divider: %d\n", bclk_div);
+ return -EINVAL;
+diff --git a/sound/soc/uniphier/aio-cpu.c b/sound/soc/uniphier/aio-cpu.c
+index ee90e6c3937c..2ae582a99b63 100644
+--- a/sound/soc/uniphier/aio-cpu.c
++++ b/sound/soc/uniphier/aio-cpu.c
+@@ -424,8 +424,11 @@ int uniphier_aio_dai_suspend(struct snd_soc_dai *dai)
+ {
+ struct uniphier_aio *aio = uniphier_priv(dai);
+
+- reset_control_assert(aio->chip->rst);
+- clk_disable_unprepare(aio->chip->clk);
++ aio->chip->num_wup_aios--;
++ if (!aio->chip->num_wup_aios) {
++ reset_control_assert(aio->chip->rst);
++ clk_disable_unprepare(aio->chip->clk);
++ }
+
+ return 0;
+ }
+@@ -439,13 +442,15 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
+ if (!aio->chip->active)
+ return 0;
+
+- ret = clk_prepare_enable(aio->chip->clk);
+- if (ret)
+- return ret;
++ if (!aio->chip->num_wup_aios) {
++ ret = clk_prepare_enable(aio->chip->clk);
++ if (ret)
++ return ret;
+
+- ret = reset_control_deassert(aio->chip->rst);
+- if (ret)
+- goto err_out_clock;
++ ret = reset_control_deassert(aio->chip->rst);
++ if (ret)
++ goto err_out_clock;
++ }
+
+ aio_iecout_set_enable(aio->chip, true);
+ aio_chip_init(aio->chip);
+@@ -458,7 +463,7 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
+
+ ret = aio_init(sub);
+ if (ret)
+- goto err_out_clock;
++ goto err_out_reset;
+
+ if (!sub->setting)
+ continue;
+@@ -466,11 +471,16 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
+ aio_port_reset(sub);
+ aio_src_reset(sub);
+ }
++ aio->chip->num_wup_aios++;
+
+ return 0;
+
++err_out_reset:
++ if (!aio->chip->num_wup_aios)
++ reset_control_assert(aio->chip->rst);
+ err_out_clock:
+- clk_disable_unprepare(aio->chip->clk);
++ if (!aio->chip->num_wup_aios)
++ clk_disable_unprepare(aio->chip->clk);
+
+ return ret;
+ }
+@@ -619,6 +629,7 @@ int uniphier_aio_probe(struct platform_device *pdev)
+ return PTR_ERR(chip->rst);
+
+ chip->num_aios = chip->chip_spec->num_dais;
++ chip->num_wup_aios = chip->num_aios;
+ chip->aios = devm_kcalloc(dev,
+ chip->num_aios, sizeof(struct uniphier_aio),
+ GFP_KERNEL);
+diff --git a/sound/soc/uniphier/aio.h b/sound/soc/uniphier/aio.h
+index ca6ccbae0ee8..a7ff7e556429 100644
+--- a/sound/soc/uniphier/aio.h
++++ b/sound/soc/uniphier/aio.h
+@@ -285,6 +285,7 @@ struct uniphier_aio_chip {
+
+ struct uniphier_aio *aios;
+ int num_aios;
++ int num_wup_aios;
+ struct uniphier_aio_pll *plls;
+ int num_plls;
+
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index e4bbf79de956..33cd26763c0e 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -457,6 +457,7 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
+ }
+ ep = get_endpoint(alts, 1)->bEndpointAddress;
+ if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
++ get_endpoint(alts, 0)->bSynchAddress != 0 &&
+ ((is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
+ (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
+ dev_err(&dev->dev,
+diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
+index 57aaeaf8e192..edba4d93e9e6 100644
+--- a/tools/include/uapi/asm/bitsperlong.h
++++ b/tools/include/uapi/asm/bitsperlong.h
+@@ -1,22 +1,22 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ #if defined(__i386__) || defined(__x86_64__)
+-#include "../../arch/x86/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/x86/include/uapi/asm/bitsperlong.h"
+ #elif defined(__aarch64__)
+-#include "../../arch/arm64/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/arm64/include/uapi/asm/bitsperlong.h"
+ #elif defined(__powerpc__)
+-#include "../../arch/powerpc/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/powerpc/include/uapi/asm/bitsperlong.h"
+ #elif defined(__s390__)
+-#include "../../arch/s390/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/s390/include/uapi/asm/bitsperlong.h"
+ #elif defined(__sparc__)
+-#include "../../arch/sparc/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/sparc/include/uapi/asm/bitsperlong.h"
+ #elif defined(__mips__)
+-#include "../../arch/mips/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/mips/include/uapi/asm/bitsperlong.h"
+ #elif defined(__ia64__)
+-#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/ia64/include/uapi/asm/bitsperlong.h"
+ #elif defined(__riscv)
+-#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/riscv/include/uapi/asm/bitsperlong.h"
+ #elif defined(__alpha__)
+-#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
++#include "../../../arch/alpha/include/uapi/asm/bitsperlong.h"
+ #else
+ #include <asm-generic/bitsperlong.h>
+ #endif
+diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
+index 3292c290654f..86ce17a1f7fb 100644
+--- a/tools/lib/traceevent/Makefile
++++ b/tools/lib/traceevent/Makefile
+@@ -62,15 +62,15 @@ set_plugin_dir := 1
+
+ # Set plugin_dir to preffered global plugin location
+ # If we install under $HOME directory we go under
+-# $(HOME)/.traceevent/plugins
++# $(HOME)/.local/lib/traceevent/plugins
+ #
+ # We dont set PLUGIN_DIR in case we install under $HOME
+ # directory, because by default the code looks under:
+-# $(HOME)/.traceevent/plugins by default.
++# $(HOME)/.local/lib/traceevent/plugins by default.
+ #
+ ifeq ($(plugin_dir),)
+ ifeq ($(prefix),$(HOME))
+-override plugin_dir = $(HOME)/.traceevent/plugins
++override plugin_dir = $(HOME)/.local/lib/traceevent/plugins
+ set_plugin_dir := 0
+ else
+ override plugin_dir = $(libdir)/traceevent/plugins
+diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
+index 8ca28de9337a..e1f7ddd5a6cf 100644
+--- a/tools/lib/traceevent/event-plugin.c
++++ b/tools/lib/traceevent/event-plugin.c
+@@ -18,7 +18,7 @@
+ #include "event-utils.h"
+ #include "trace-seq.h"
+
+-#define LOCAL_PLUGIN_DIR ".traceevent/plugins"
++#define LOCAL_PLUGIN_DIR ".local/lib/traceevent/plugins/"
+
+ static struct registered_plugin_options {
+ struct registered_plugin_options *next;
+diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
+index 865a9762f22e..3f84403c0983 100644
+--- a/tools/perf/arch/x86/util/kvm-stat.c
++++ b/tools/perf/arch/x86/util/kvm-stat.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <errno.h>
+-#include "../../util/kvm-stat.h"
+-#include "../../util/evsel.h"
++#include "../../../util/kvm-stat.h"
++#include "../../../util/evsel.h"
+ #include <asm/svm.h>
+ #include <asm/vmx.h>
+ #include <asm/kvm.h>
+diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
+index 950539f9a4f7..b1eb963b4a6e 100644
+--- a/tools/perf/arch/x86/util/tsc.c
++++ b/tools/perf/arch/x86/util/tsc.c
+@@ -5,10 +5,10 @@
+ #include <linux/stddef.h>
+ #include <linux/perf_event.h>
+
+-#include "../../perf.h"
++#include "../../../perf.h"
+ #include <linux/types.h>
+-#include "../../util/debug.h"
+-#include "../../util/tsc.h"
++#include "../../../util/debug.h"
++#include "../../../util/tsc.h"
+
+ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
+ struct perf_tsc_conversion *tc)
+diff --git a/tools/perf/perf.c b/tools/perf/perf.c
+index 72df4b6fa36f..4c45cdf38ada 100644
+--- a/tools/perf/perf.c
++++ b/tools/perf/perf.c
+@@ -440,6 +440,9 @@ int main(int argc, const char **argv)
+
+ srandom(time(NULL));
+
++ /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
++ config_exclusive_filename = getenv("PERF_CONFIG");
++
+ err = perf_config(perf_default_config, NULL);
+ if (err)
+ return err;
+diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+index 147efeb6b195..e97f55ba61c2 100755
+--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
++++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+@@ -31,6 +31,10 @@ if [ $err -ne 0 ] ; then
+ exit $err
+ fi
+
++# Do not use whatever ~/.perfconfig file, it may change the output
++# via trace.{show_timestamp,show_prefix,etc}
++export PERF_CONFIG=/dev/null
++
+ trace_open_vfs_getname
+ err=$?
+ rm -f ${file}
+diff --git a/tools/perf/trace/beauty/ioctl.c b/tools/perf/trace/beauty/ioctl.c
+index 52242fa4072b..e19eb6ea361d 100644
+--- a/tools/perf/trace/beauty/ioctl.c
++++ b/tools/perf/trace/beauty/ioctl.c
+@@ -21,7 +21,7 @@
+ static size_t ioctl__scnprintf_tty_cmd(int nr, int dir, char *bf, size_t size)
+ {
+ static const char *ioctl_tty_cmd[] = {
+- "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
++ [_IOC_NR(TCGETS)] = "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
+ "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL", "TIOCSCTTY",
+ "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI", "TIOCGWINSZ", "TIOCSWINSZ",
+ "TIOCMGET", "TIOCMBIS", "TIOCMBIC", "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR",
+diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c
+index 27cf3ab88d13..f4edb18f67ec 100644
+--- a/tools/perf/ui/browsers/scripts.c
++++ b/tools/perf/ui/browsers/scripts.c
+@@ -131,8 +131,10 @@ static int list_scripts(char *script_name, bool *custom,
+ int key = ui_browser__input_window("perf script command",
+ "Enter perf script command line (without perf script prefix)",
+ script_args, "", 0);
+- if (key != K_ENTER)
+- return -1;
++ if (key != K_ENTER) {
++ ret = -1;
++ goto out;
++ }
+ sprintf(script_name, "%s script %s", perf, script_args);
+ } else if (choice < num + max_std) {
+ strcpy(script_name, paths[choice]);
+diff --git a/tools/perf/ui/helpline.c b/tools/perf/ui/helpline.c
+index b3c421429ed4..54bcd08df87e 100644
+--- a/tools/perf/ui/helpline.c
++++ b/tools/perf/ui/helpline.c
+@@ -3,10 +3,10 @@
+ #include <stdlib.h>
+ #include <string.h>
+
+-#include "../debug.h"
++#include "../util/debug.h"
+ #include "helpline.h"
+ #include "ui.h"
+-#include "../util.h"
++#include "../util/util.h"
+
+ char ui_helpline__current[512];
+
+diff --git a/tools/perf/ui/util.c b/tools/perf/ui/util.c
+index 63bf06e80ab9..9ed76e88a3e4 100644
+--- a/tools/perf/ui/util.c
++++ b/tools/perf/ui/util.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include "util.h"
+-#include "../debug.h"
++#include "../util/debug.h"
+
+
+ /*
+diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
+index a474ede17cd6..001bb444d205 100644
+--- a/tools/perf/util/evlist.c
++++ b/tools/perf/util/evlist.c
+@@ -21,6 +21,7 @@
+ #include "bpf-event.h"
+ #include <signal.h>
+ #include <unistd.h>
++#include <sched.h>
+
+ #include "parse-events.h"
+ #include <subcmd/parse-options.h>
+@@ -1870,6 +1871,14 @@ static void *perf_evlist__poll_thread(void *arg)
+ struct perf_evlist *evlist = arg;
+ bool draining = false;
+ int i, done = 0;
++ /*
++ * In order to read symbols from other namespaces perf to needs to call
++ * setns(2). This isn't permitted if the struct_fs has multiple users.
++ * unshare(2) the fs so that we may continue to setns into namespaces
++ * that we're observing when, for instance, reading the build-ids at
++ * the end of a 'perf record' session.
++ */
++ unshare(CLONE_FS);
+
+ while (!done) {
+ bool got_data = false;
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index abe9af867967..1bc72fe47c2d 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -2205,8 +2205,10 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
+ /* On s390 the socket_id number is not related to the numbers of cpus.
+ * The socket_id number might be higher than the numbers of cpus.
+ * This depends on the configuration.
++ * AArch64 is the same.
+ */
+- if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
++ if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
++ || !strncmp(ph->env.arch, "aarch64", 7)))
+ do_core_id_test = false;
+
+ for (i = 0; i < (u32)cpu_nr; i++) {
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index 7ace7a10054d..966c248d6a3a 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -193,7 +193,10 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
+ hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
+ hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
+ hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
+- hists__new_col_len(hists, HISTC_TIME, 12);
++ if (symbol_conf.nanosecs)
++ hists__new_col_len(hists, HISTC_TIME, 16);
++ else
++ hists__new_col_len(hists, HISTC_TIME, 12);
+
+ if (h->srcline) {
+ len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
+diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
+index 9c81ee092784..3fc32c7075d1 100644
+--- a/tools/perf/util/map.c
++++ b/tools/perf/util/map.c
+@@ -641,6 +641,7 @@ struct map_groups *map_groups__new(struct machine *machine)
+ void map_groups__delete(struct map_groups *mg)
+ {
+ map_groups__exit(mg);
++ unwind__finish_access(mg);
+ free(mg);
+ }
+
+@@ -881,7 +882,7 @@ int map_groups__clone(struct thread *thread, struct map_groups *parent)
+ if (new == NULL)
+ goto out_unlock;
+
+- err = unwind__prepare_access(thread, new, NULL);
++ err = unwind__prepare_access(mg, new, NULL);
+ if (err)
+ goto out_unlock;
+
+diff --git a/tools/perf/util/map_groups.h b/tools/perf/util/map_groups.h
+index 4dcda33e0fdf..db1e4ffc2276 100644
+--- a/tools/perf/util/map_groups.h
++++ b/tools/perf/util/map_groups.h
+@@ -31,6 +31,10 @@ struct map_groups {
+ struct maps maps;
+ struct machine *machine;
+ refcount_t refcnt;
++#ifdef HAVE_LIBUNWIND_SUPPORT
++ void *addr_space;
++ struct unwind_libunwind_ops *unwind_libunwind_ops;
++#endif
+ };
+
+ #define KMAP_NAME_LEN 256
+diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
+index 4a9f88d9b7ab..12608f14c991 100644
+--- a/tools/perf/util/thread.c
++++ b/tools/perf/util/thread.c
+@@ -105,7 +105,6 @@ void thread__delete(struct thread *thread)
+ }
+ up_write(&thread->comm_lock);
+
+- unwind__finish_access(thread);
+ nsinfo__zput(thread->nsinfo);
+ srccode_state_free(&thread->srccode_state);
+
+@@ -235,7 +234,7 @@ static int ____thread__set_comm(struct thread *thread, const char *str,
+ list_add(&new->list, &thread->comm_list);
+
+ if (exec)
+- unwind__flush_access(thread);
++ unwind__flush_access(thread->mg);
+ }
+
+ thread->comm_set = true;
+@@ -315,7 +314,7 @@ int thread__insert_map(struct thread *thread, struct map *map)
+ {
+ int ret;
+
+- ret = unwind__prepare_access(thread, map, NULL);
++ ret = unwind__prepare_access(thread->mg, map, NULL);
+ if (ret)
+ return ret;
+
+@@ -335,7 +334,7 @@ static int __thread__prepare_access(struct thread *thread)
+ down_read(&maps->lock);
+
+ for (map = maps__first(maps); map; map = map__next(map)) {
+- err = unwind__prepare_access(thread, map, &initialized);
++ err = unwind__prepare_access(thread->mg, map, &initialized);
+ if (err || initialized)
+ break;
+ }
+diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
+index cf8375c017a0..6e5dd445bafd 100644
+--- a/tools/perf/util/thread.h
++++ b/tools/perf/util/thread.h
+@@ -44,10 +44,6 @@ struct thread {
+ struct thread_stack *ts;
+ struct nsinfo *nsinfo;
+ struct srccode_state srccode_state;
+-#ifdef HAVE_LIBUNWIND_SUPPORT
+- void *addr_space;
+- struct unwind_libunwind_ops *unwind_libunwind_ops;
+-#endif
+ bool filter;
+ int filter_entry_depth;
+ };
+diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
+index 25e1406b1f8b..da59672b566d 100644
+--- a/tools/perf/util/unwind-libunwind-local.c
++++ b/tools/perf/util/unwind-libunwind-local.c
+@@ -615,26 +615,26 @@ static unw_accessors_t accessors = {
+ .get_proc_name = get_proc_name,
+ };
+
+-static int _unwind__prepare_access(struct thread *thread)
++static int _unwind__prepare_access(struct map_groups *mg)
+ {
+- thread->addr_space = unw_create_addr_space(&accessors, 0);
+- if (!thread->addr_space) {
++ mg->addr_space = unw_create_addr_space(&accessors, 0);
++ if (!mg->addr_space) {
+ pr_err("unwind: Can't create unwind address space.\n");
+ return -ENOMEM;
+ }
+
+- unw_set_caching_policy(thread->addr_space, UNW_CACHE_GLOBAL);
++ unw_set_caching_policy(mg->addr_space, UNW_CACHE_GLOBAL);
+ return 0;
+ }
+
+-static void _unwind__flush_access(struct thread *thread)
++static void _unwind__flush_access(struct map_groups *mg)
+ {
+- unw_flush_cache(thread->addr_space, 0, 0);
++ unw_flush_cache(mg->addr_space, 0, 0);
+ }
+
+-static void _unwind__finish_access(struct thread *thread)
++static void _unwind__finish_access(struct map_groups *mg)
+ {
+- unw_destroy_addr_space(thread->addr_space);
++ unw_destroy_addr_space(mg->addr_space);
+ }
+
+ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
+@@ -659,7 +659,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
+ */
+ if (max_stack - 1 > 0) {
+ WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL");
+- addr_space = ui->thread->addr_space;
++ addr_space = ui->thread->mg->addr_space;
+
+ if (addr_space == NULL)
+ return -1;
+diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
+index c0811977d7d5..b843f9d0a9ea 100644
+--- a/tools/perf/util/unwind-libunwind.c
++++ b/tools/perf/util/unwind-libunwind.c
+@@ -11,13 +11,13 @@ struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
+ struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
+ struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
+
+-static void unwind__register_ops(struct thread *thread,
++static void unwind__register_ops(struct map_groups *mg,
+ struct unwind_libunwind_ops *ops)
+ {
+- thread->unwind_libunwind_ops = ops;
++ mg->unwind_libunwind_ops = ops;
+ }
+
+-int unwind__prepare_access(struct thread *thread, struct map *map,
++int unwind__prepare_access(struct map_groups *mg, struct map *map,
+ bool *initialized)
+ {
+ const char *arch;
+@@ -28,7 +28,7 @@ int unwind__prepare_access(struct thread *thread, struct map *map,
+ if (!dwarf_callchain_users)
+ return 0;
+
+- if (thread->addr_space) {
++ if (mg->addr_space) {
+ pr_debug("unwind: thread map already set, dso=%s\n",
+ map->dso->name);
+ if (initialized)
+@@ -37,14 +37,14 @@ int unwind__prepare_access(struct thread *thread, struct map *map,
+ }
+
+ /* env->arch is NULL for live-mode (i.e. perf top) */
+- if (!thread->mg->machine->env || !thread->mg->machine->env->arch)
++ if (!mg->machine->env || !mg->machine->env->arch)
+ goto out_register;
+
+- dso_type = dso__type(map->dso, thread->mg->machine);
++ dso_type = dso__type(map->dso, mg->machine);
+ if (dso_type == DSO__TYPE_UNKNOWN)
+ return 0;
+
+- arch = perf_env__arch(thread->mg->machine->env);
++ arch = perf_env__arch(mg->machine->env);
+
+ if (!strcmp(arch, "x86")) {
+ if (dso_type != DSO__TYPE_64BIT)
+@@ -59,37 +59,37 @@ int unwind__prepare_access(struct thread *thread, struct map *map,
+ return 0;
+ }
+ out_register:
+- unwind__register_ops(thread, ops);
++ unwind__register_ops(mg, ops);
+
+- err = thread->unwind_libunwind_ops->prepare_access(thread);
++ err = mg->unwind_libunwind_ops->prepare_access(mg);
+ if (initialized)
+ *initialized = err ? false : true;
+ return err;
+ }
+
+-void unwind__flush_access(struct thread *thread)
++void unwind__flush_access(struct map_groups *mg)
+ {
+ if (!dwarf_callchain_users)
+ return;
+
+- if (thread->unwind_libunwind_ops)
+- thread->unwind_libunwind_ops->flush_access(thread);
++ if (mg->unwind_libunwind_ops)
++ mg->unwind_libunwind_ops->flush_access(mg);
+ }
+
+-void unwind__finish_access(struct thread *thread)
++void unwind__finish_access(struct map_groups *mg)
+ {
+ if (!dwarf_callchain_users)
+ return;
+
+- if (thread->unwind_libunwind_ops)
+- thread->unwind_libunwind_ops->finish_access(thread);
++ if (mg->unwind_libunwind_ops)
++ mg->unwind_libunwind_ops->finish_access(mg);
+ }
+
+ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ struct thread *thread,
+ struct perf_sample *data, int max_stack)
+ {
+- if (thread->unwind_libunwind_ops)
+- return thread->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
++ if (thread->mg->unwind_libunwind_ops)
++ return thread->mg->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
+ return 0;
+ }
+diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h
+index 8a44a1569a21..3a7d00c20d86 100644
+--- a/tools/perf/util/unwind.h
++++ b/tools/perf/util/unwind.h
+@@ -6,6 +6,7 @@
+ #include <linux/types.h>
+
+ struct map;
++struct map_groups;
+ struct perf_sample;
+ struct symbol;
+ struct thread;
+@@ -19,9 +20,9 @@ struct unwind_entry {
+ typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
+
+ struct unwind_libunwind_ops {
+- int (*prepare_access)(struct thread *thread);
+- void (*flush_access)(struct thread *thread);
+- void (*finish_access)(struct thread *thread);
++ int (*prepare_access)(struct map_groups *mg);
++ void (*flush_access)(struct map_groups *mg);
++ void (*finish_access)(struct map_groups *mg);
+ int (*get_entries)(unwind_entry_cb_t cb, void *arg,
+ struct thread *thread,
+ struct perf_sample *data, int max_stack);
+@@ -46,20 +47,20 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+ #endif
+
+ int LIBUNWIND__ARCH_REG_ID(int regnum);
+-int unwind__prepare_access(struct thread *thread, struct map *map,
++int unwind__prepare_access(struct map_groups *mg, struct map *map,
+ bool *initialized);
+-void unwind__flush_access(struct thread *thread);
+-void unwind__finish_access(struct thread *thread);
++void unwind__flush_access(struct map_groups *mg);
++void unwind__finish_access(struct map_groups *mg);
+ #else
+-static inline int unwind__prepare_access(struct thread *thread __maybe_unused,
++static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused,
+ struct map *map __maybe_unused,
+ bool *initialized __maybe_unused)
+ {
+ return 0;
+ }
+
+-static inline void unwind__flush_access(struct thread *thread __maybe_unused) {}
+-static inline void unwind__finish_access(struct thread *thread __maybe_unused) {}
++static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {}
++static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {}
+ #endif
+ #else
+ static inline int
+@@ -72,14 +73,14 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
+ return 0;
+ }
+
+-static inline int unwind__prepare_access(struct thread *thread __maybe_unused,
++static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused,
+ struct map *map __maybe_unused,
+ bool *initialized __maybe_unused)
+ {
+ return 0;
+ }
+
+-static inline void unwind__flush_access(struct thread *thread __maybe_unused) {}
+-static inline void unwind__finish_access(struct thread *thread __maybe_unused) {}
++static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {}
++static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {}
+ #endif /* HAVE_DWARF_UNWIND_SUPPORT */
+ #endif /* __UNWIND_H */
+diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h
+index 7ffe562e7ae7..2627b038b6f2 100644
+--- a/tools/perf/util/xyarray.h
++++ b/tools/perf/util/xyarray.h
+@@ -2,6 +2,7 @@
+ #ifndef _PERF_XYARRAY_H_
+ #define _PERF_XYARRAY_H_ 1
+
++#include <linux/compiler.h>
+ #include <sys/types.h>
+
+ struct xyarray {
+@@ -10,7 +11,7 @@ struct xyarray {
+ size_t entries;
+ size_t max_x;
+ size_t max_y;
+- char contents[];
++ char contents[] __aligned(8);
+ };
+
+ struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);
+diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
+index 9457aaeae092..df7a55d94b75 100755
+--- a/tools/testing/selftests/net/fib_tests.sh
++++ b/tools/testing/selftests/net/fib_tests.sh
+@@ -9,13 +9,15 @@ ret=0
+ ksft_skip=4
+
+ # all tests in this script. Can be overridden with -t option
+-TESTS="unregister down carrier nexthop ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw"
++TESTS="unregister down carrier nexthop suppress ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics ipv4_route_metrics ipv4_route_v6_gw"
+
+ VERBOSE=0
+ PAUSE_ON_FAIL=no
+ PAUSE=no
+ IP="ip -netns ns1"
+
++which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
++
+ log_test()
+ {
+ local rc=$1
+@@ -582,6 +584,20 @@ fib_nexthop_test()
+ cleanup
+ }
+
++fib_suppress_test()
++{
++ $IP link add dummy1 type dummy
++ $IP link set dummy1 up
++ $IP -6 route add default dev dummy1
++ $IP -6 rule add table main suppress_prefixlength 0
++ ping -f -c 1000 -W 1 1234::1 || true
++ $IP -6 rule del table main suppress_prefixlength 0
++ $IP link del dummy1
++
++ # If we got here without crashing, we're good.
++ return 0
++}
++
+ ################################################################################
+ # Tests on route add and replace
+
+@@ -1054,7 +1070,7 @@ ipv6_route_metrics_test()
+ log_test $rc 0 "Multipath route with mtu metric"
+
+ $IP -6 ro add 2001:db8:104::/64 via 2001:db8:101::2 mtu 1300
+- run_cmd "ip netns exec ns1 ping6 -w1 -c1 -s 1500 2001:db8:104::1"
++ run_cmd "ip netns exec ns1 ${ping6} -w1 -c1 -s 1500 2001:db8:104::1"
+ log_test $? 0 "Using route with mtu metric"
+
+ run_cmd "$IP -6 ro add 2001:db8:114::/64 via 2001:db8:101::2 congctl lock foo"
+@@ -1558,6 +1574,7 @@ do
+ fib_down_test|down) fib_down_test;;
+ fib_carrier_test|carrier) fib_carrier_test;;
+ fib_nexthop_test|nexthop) fib_nexthop_test;;
++ fib_suppress_test|suppress) fib_suppress_test;;
+ ipv6_route_test|ipv6_rt) ipv6_route_test;;
+ ipv4_route_test|ipv4_rt) ipv4_route_test;;
+ ipv6_addr_metric) ipv6_addr_metric_test;;
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-10-01 10:11 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-10-01 10:11 UTC (permalink / raw
To: gentoo-commits
commit: dc5b549ce32e4bfca0b4f2aaed1bd71e10e95ade
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Oct 1 10:11:32 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Oct 1 10:11:32 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dc5b549c
Linux patch 5.2.18
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1017_linux-5.2.18.patch | 1940 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 1944 insertions(+)
diff --git a/0000_README b/0000_README
index 200ad40..dc5ec25 100644
--- a/0000_README
+++ b/0000_README
@@ -111,6 +111,10 @@ Patch: 1016_linux-5.2.17.patch
From: https://www.kernel.org
Desc: Linux 5.2.17
+Patch: 1017_linux-5.2.18.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.18
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1017_linux-5.2.18.patch b/1017_linux-5.2.18.patch
new file mode 100644
index 0000000..52759a9
--- /dev/null
+++ b/1017_linux-5.2.18.patch
@@ -0,0 +1,1940 @@
+diff --git a/Makefile b/Makefile
+index 32226d81fbb5..440e473687eb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 17
++SUBLEVEL = 18
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
+index 4ed5d57f2359..48244640fc49 100644
+--- a/arch/powerpc/include/asm/opal.h
++++ b/arch/powerpc/include/asm/opal.h
+@@ -272,7 +272,7 @@ int64_t opal_xive_get_vp_info(uint64_t vp,
+ int64_t opal_xive_set_vp_info(uint64_t vp,
+ uint64_t flags,
+ uint64_t report_cl_pair);
+-int64_t opal_xive_allocate_irq(uint32_t chip_id);
++int64_t opal_xive_allocate_irq_raw(uint32_t chip_id);
+ int64_t opal_xive_free_irq(uint32_t girq);
+ int64_t opal_xive_sync(uint32_t type, uint32_t id);
+ int64_t opal_xive_dump(uint32_t type, uint32_t id);
+diff --git a/arch/powerpc/platforms/powernv/opal-call.c b/arch/powerpc/platforms/powernv/opal-call.c
+index 36c8fa3647a2..53cf67f5ef42 100644
+--- a/arch/powerpc/platforms/powernv/opal-call.c
++++ b/arch/powerpc/platforms/powernv/opal-call.c
+@@ -257,7 +257,7 @@ OPAL_CALL(opal_xive_set_queue_info, OPAL_XIVE_SET_QUEUE_INFO);
+ OPAL_CALL(opal_xive_donate_page, OPAL_XIVE_DONATE_PAGE);
+ OPAL_CALL(opal_xive_alloc_vp_block, OPAL_XIVE_ALLOCATE_VP_BLOCK);
+ OPAL_CALL(opal_xive_free_vp_block, OPAL_XIVE_FREE_VP_BLOCK);
+-OPAL_CALL(opal_xive_allocate_irq, OPAL_XIVE_ALLOCATE_IRQ);
++OPAL_CALL(opal_xive_allocate_irq_raw, OPAL_XIVE_ALLOCATE_IRQ);
+ OPAL_CALL(opal_xive_free_irq, OPAL_XIVE_FREE_IRQ);
+ OPAL_CALL(opal_xive_get_vp_info, OPAL_XIVE_GET_VP_INFO);
+ OPAL_CALL(opal_xive_set_vp_info, OPAL_XIVE_SET_VP_INFO);
+diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
+index 2f26b74f6cfa..cf156aadefe9 100644
+--- a/arch/powerpc/sysdev/xive/native.c
++++ b/arch/powerpc/sysdev/xive/native.c
+@@ -231,6 +231,17 @@ static bool xive_native_match(struct device_node *node)
+ return of_device_is_compatible(node, "ibm,opal-xive-vc");
+ }
+
++static s64 opal_xive_allocate_irq(u32 chip_id)
++{
++ s64 irq = opal_xive_allocate_irq_raw(chip_id);
++
++ /*
++ * Old versions of skiboot can incorrectly return 0xffffffff to
++ * indicate no space, fix it up here.
++ */
++ return irq == 0xffffffff ? OPAL_RESOURCE : irq;
++}
++
+ #ifdef CONFIG_SMP
+ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
+ {
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index 9489ffc06411..4f325e47519f 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -60,6 +60,12 @@ module_param(report_key_events, int, 0644);
+ MODULE_PARM_DESC(report_key_events,
+ "0: none, 1: output changes, 2: brightness changes, 3: all");
+
++static int hw_changes_brightness = -1;
++module_param(hw_changes_brightness, int, 0644);
++MODULE_PARM_DESC(hw_changes_brightness,
++ "Set this to 1 on buggy hw which changes the brightness itself when "
++ "a hotkey is pressed: -1: auto, 0: normal 1: hw-changes-brightness");
++
+ /*
+ * Whether the struct acpi_video_device_attrib::device_id_scheme bit should be
+ * assumed even if not actually set.
+@@ -405,6 +411,14 @@ static int video_set_report_key_events(const struct dmi_system_id *id)
+ return 0;
+ }
+
++static int video_hw_changes_brightness(
++ const struct dmi_system_id *d)
++{
++ if (hw_changes_brightness == -1)
++ hw_changes_brightness = 1;
++ return 0;
++}
++
+ static const struct dmi_system_id video_dmi_table[] = {
+ /*
+ * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
+@@ -529,6 +543,21 @@ static const struct dmi_system_id video_dmi_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
+ },
+ },
++ /*
++ * Some machines change the brightness themselves when a brightness
++ * hotkey gets pressed, despite us telling them not to. In this case
++ * acpi_video_device_notify() should only call backlight_force_update(
++ * BACKLIGHT_UPDATE_HOTKEY) and not do anything else.
++ */
++ {
++ /* https://bugzilla.kernel.org/show_bug.cgi?id=204077 */
++ .callback = video_hw_changes_brightness,
++ .ident = "Packard Bell EasyNote MZ35",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Packard Bell"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "EasyNote MZ35"),
++ },
++ },
+ {}
+ };
+
+@@ -1612,6 +1641,14 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
+ bus = video_device->video;
+ input = bus->input;
+
++ if (hw_changes_brightness > 0) {
++ if (video_device->backlight)
++ backlight_force_update(video_device->backlight,
++ BACKLIGHT_UPDATE_HOTKEY);
++ acpi_notifier_call_chain(device, event, 0);
++ return;
++ }
++
+ switch (event) {
+ case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS: /* Cycle brightness */
+ brightness_switch_event(video_device, event);
+diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
+index 208feef63de4..d04b443cad1f 100644
+--- a/drivers/bluetooth/btrtl.c
++++ b/drivers/bluetooth/btrtl.c
+@@ -637,6 +637,26 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
+ }
+ EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
+
++int btrtl_shutdown_realtek(struct hci_dev *hdev)
++{
++ struct sk_buff *skb;
++ int ret;
++
++ /* According to the vendor driver, BT must be reset on close to avoid
++ * firmware crash.
++ */
++ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
++ if (IS_ERR(skb)) {
++ ret = PTR_ERR(skb);
++ bt_dev_err(hdev, "HCI reset during shutdown failed");
++ return ret;
++ }
++ kfree_skb(skb);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(btrtl_shutdown_realtek);
++
+ static unsigned int btrtl_convert_baudrate(u32 device_baudrate)
+ {
+ switch (device_baudrate) {
+diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
+index f1676144fce8..10ad40c3e42c 100644
+--- a/drivers/bluetooth/btrtl.h
++++ b/drivers/bluetooth/btrtl.h
+@@ -55,6 +55,7 @@ void btrtl_free(struct btrtl_device_info *btrtl_dev);
+ int btrtl_download_firmware(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev);
+ int btrtl_setup_realtek(struct hci_dev *hdev);
++int btrtl_shutdown_realtek(struct hci_dev *hdev);
+ int btrtl_get_uart_settings(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
+ unsigned int *controller_baudrate,
+@@ -83,6 +84,11 @@ static inline int btrtl_setup_realtek(struct hci_dev *hdev)
+ return -EOPNOTSUPP;
+ }
+
++static inline int btrtl_shutdown_realtek(struct hci_dev *hdev)
++{
++ return -EOPNOTSUPP;
++}
++
+ static inline int btrtl_get_uart_settings(struct hci_dev *hdev,
+ struct btrtl_device_info *btrtl_dev,
+ unsigned int *controller_baudrate,
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 7954a7924923..aa6e2f9d4861 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -378,6 +378,9 @@ static const struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
+
++ /* Additional Realtek 8822CE Bluetooth devices */
++ { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK },
++
+ /* Silicon Wave based devices */
+ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
+
+@@ -3181,6 +3184,7 @@ static int btusb_probe(struct usb_interface *intf,
+ #ifdef CONFIG_BT_HCIBTUSB_RTL
+ if (id->driver_info & BTUSB_REALTEK) {
+ hdev->setup = btrtl_setup_realtek;
++ hdev->shutdown = btrtl_shutdown_realtek;
+
+ /* Realtek devices lose their updated firmware over suspend,
+ * but the USB hub doesn't notice any status change.
+diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
+index 01ef2fab5764..1a17a606c13d 100644
+--- a/drivers/clk/imx/clk-imx8mm.c
++++ b/drivers/clk/imx/clk-imx8mm.c
+@@ -55,8 +55,8 @@ static const struct imx_pll14xx_rate_table imx8mm_pll1416x_tbl[] = {
+ };
+
+ static const struct imx_pll14xx_rate_table imx8mm_audiopll_tbl[] = {
+- PLL_1443X_RATE(786432000U, 655, 5, 2, 23593),
+- PLL_1443X_RATE(722534400U, 301, 5, 1, 3670),
++ PLL_1443X_RATE(393216000U, 262, 2, 3, 9437),
++ PLL_1443X_RATE(361267200U, 361, 3, 3, 17511),
+ };
+
+ static const struct imx_pll14xx_rate_table imx8mm_videopll_tbl[] = {
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index f9d7d6aaf3db..b26b6975b727 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -3190,6 +3190,7 @@ static int talitos_remove(struct platform_device *ofdev)
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ crypto_unregister_aead(&t_alg->algt.alg.aead);
++ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ crypto_unregister_ahash(&t_alg->algt.alg.hash);
+ break;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index dc3ac66a4450..279ced1d64ed 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -4209,20 +4209,10 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
+ static int dm_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state)
+ {
+- struct drm_plane_state *old_plane_state =
+- drm_atomic_get_old_plane_state(new_plane_state->state, plane);
+-
+ /* Only support async updates on cursor planes. */
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ return -EINVAL;
+
+- /*
+- * DRM calls prepare_fb and cleanup_fb on new_plane_state for
+- * async commits so don't allow fb changes.
+- */
+- if (old_plane_state->fb != new_plane_state->fb)
+- return -EINVAL;
+-
+ return 0;
+ }
+
+@@ -6798,6 +6788,26 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ if (ret)
+ goto fail;
+
++ if (state->legacy_cursor_update) {
++ /*
++ * This is a fast cursor update coming from the plane update
++ * helper, check if it can be done asynchronously for better
++ * performance.
++ */
++ state->async_update =
++ !drm_atomic_helper_async_check(dev, state);
++
++ /*
++ * Skip the remaining global validation if this is an async
++ * update. Cursor updates can be done without affecting
++ * state or bandwidth calcs and this avoids the performance
++ * penalty of locking the private state object and
++ * allocating a new dc_state.
++ */
++ if (state->async_update)
++ return 0;
++ }
++
+ /* Check scaling and underscan changes*/
+ /* TODO Removed scaling changes validation due to inability to commit
+ * new stream into context w\o causing full reset. Need to
+@@ -6850,13 +6860,29 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ ret = -EINVAL;
+ goto fail;
+ }
+- } else if (state->legacy_cursor_update) {
++ } else {
+ /*
+- * This is a fast cursor update coming from the plane update
+- * helper, check if it can be done asynchronously for better
+- * performance.
++ * The commit is a fast update. Fast updates shouldn't change
++ * the DC context, affect global validation, and can have their
++ * commit work done in parallel with other commits not touching
++ * the same resource. If we have a new DC context as part of
++ * the DM atomic state from validation we need to free it and
++ * retain the existing one instead.
+ */
+- state->async_update = !drm_atomic_helper_async_check(dev, state);
++ struct dm_atomic_state *new_dm_state, *old_dm_state;
++
++ new_dm_state = dm_atomic_get_new_state(state);
++ old_dm_state = dm_atomic_get_old_state(state);
++
++ if (new_dm_state && old_dm_state) {
++ if (new_dm_state->context)
++ dc_release_state(new_dm_state->context);
++
++ new_dm_state->context = old_dm_state->context;
++
++ if (old_dm_state->context)
++ dc_retain_state(old_dm_state->context);
++ }
+ }
+
+ /* Must be success */
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+index 95f332ee3e7e..16614d73a5fc 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+@@ -32,6 +32,10 @@ endif
+
+ calcs_ccflags := -mhard-float -msse $(cc_stack_align)
+
++ifdef CONFIG_CC_IS_CLANG
++calcs_ccflags += -msse2
++endif
++
+ CFLAGS_dcn_calcs.o := $(calcs_ccflags)
+ CFLAGS_dcn_calc_auto.o := $(calcs_ccflags)
+ CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
+index d97ca6528f9d..934ffe1b4b00 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
+@@ -32,6 +32,10 @@ endif
+
+ dml_ccflags := -mhard-float -msse $(cc_stack_align)
+
++ifdef CONFIG_CC_IS_CLANG
++dml_ccflags += -msse2
++endif
++
+ CFLAGS_display_mode_lib.o := $(dml_ccflags)
+ CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
+ CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags)
+diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
+index 54a6414c5d96..429c58ce56ce 100644
+--- a/drivers/gpu/drm/drm_dp_helper.c
++++ b/drivers/gpu/drm/drm_dp_helper.c
+@@ -1278,7 +1278,9 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
+ /* LG LP140WF6-SPM1 eDP panel */
+ { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
+ /* Apple panels need some additional handling to support PSR */
+- { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }
++ { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) },
++ /* CH7511 seems to leave SINK_COUNT zeroed */
++ { OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) },
+ };
+
+ #undef OUI
+diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
+index dd427c7ff967..f13d45f40ed1 100644
+--- a/drivers/gpu/drm/drm_probe_helper.c
++++ b/drivers/gpu/drm/drm_probe_helper.c
+@@ -581,6 +581,9 @@ static void output_poll_execute(struct work_struct *work)
+ enum drm_connector_status old_status;
+ bool repoll = false, changed;
+
++ if (!dev->mode_config.poll_enabled)
++ return;
++
+ /* Pick up any changes detected by the probe functions. */
+ changed = dev->mode_config.delayed_event;
+ dev->mode_config.delayed_event = false;
+@@ -735,7 +738,11 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init);
+ */
+ void drm_kms_helper_poll_fini(struct drm_device *dev)
+ {
+- drm_kms_helper_poll_disable(dev);
++ if (!dev->mode_config.poll_enabled)
++ return;
++
++ dev->mode_config.poll_enabled = false;
++ cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
+ }
+ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
+index 06ee23823a68..acfafc4bda0e 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
+@@ -169,14 +169,34 @@ nv50_head_atomic_check_view(struct nv50_head_atom *armh,
+ */
+ switch (mode) {
+ case DRM_MODE_SCALE_CENTER:
+- asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
+- asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH);
+- /* fall-through */
++ /* NOTE: This will cause scaling when the input is
++ * larger than the output.
++ */
++ asyh->view.oW = min(asyh->view.iW, asyh->view.oW);
++ asyh->view.oH = min(asyh->view.iH, asyh->view.oH);
++ break;
+ case DRM_MODE_SCALE_ASPECT:
+- if (asyh->view.oH < asyh->view.oW) {
++ /* Determine whether the scaling should be on width or on
++ * height. This is done by comparing the aspect ratios of the
++ * sizes. If the output AR is larger than input AR, that means
++ * we want to change the width (letterboxed on the
++ * left/right), otherwise on the height (letterboxed on the
++ * top/bottom).
++ *
++ * E.g. 4:3 (1.333) AR image displayed on a 16:10 (1.6) AR
++ * screen will have letterboxes on the left/right. However a
++ * 16:9 (1.777) AR image on that same screen will have
++ * letterboxes on the top/bottom.
++ *
++ * inputAR = iW / iH; outputAR = oW / oH
++ * outputAR > inputAR is equivalent to oW * iH > iW * oH
++ */
++ if (asyh->view.oW * asyh->view.iH > asyh->view.iW * asyh->view.oH) {
++ /* Recompute output width, i.e. left/right letterbox */
+ u32 r = (asyh->view.iW << 19) / asyh->view.iH;
+ asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
+ } else {
++ /* Recompute output height, i.e. top/bottom letterbox */
+ u32 r = (asyh->view.iH << 19) / asyh->view.iW;
+ asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
+ }
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 76aa474e92c1..264139be7e29 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -568,6 +568,7 @@
+ #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
+ #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
+ #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
++#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941 0x0941
+ #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
+
+ #define USB_VENDOR_ID_HUION 0x256c
+diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
+index c8c6d0436ac9..d17e5c2e9246 100644
+--- a/drivers/hid/hid-lg.c
++++ b/drivers/hid/hid-lg.c
+@@ -818,7 +818,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
+
+ if (!buf) {
+ ret = -ENOMEM;
+- goto err_free;
++ goto err_stop;
+ }
+
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
+@@ -850,9 +850,12 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ ret = lg4ff_init(hdev);
+
+ if (ret)
+- goto err_free;
++ goto err_stop;
+
+ return 0;
++
++err_stop:
++ hid_hw_stop(hdev);
+ err_free:
+ kfree(drv_data);
+ return ret;
+@@ -863,8 +866,7 @@ static void lg_remove(struct hid_device *hdev)
+ struct lg_drv_data *drv_data = hid_get_drvdata(hdev);
+ if (drv_data->quirks & LG_FF4)
+ lg4ff_deinit(hdev);
+- else
+- hid_hw_stop(hdev);
++ hid_hw_stop(hdev);
+ kfree(drv_data);
+ }
+
+diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
+index cefba038520c..03f0220062ca 100644
+--- a/drivers/hid/hid-lg4ff.c
++++ b/drivers/hid/hid-lg4ff.c
+@@ -1477,7 +1477,6 @@ int lg4ff_deinit(struct hid_device *hid)
+ }
+ }
+ #endif
+- hid_hw_stop(hid);
+ drv_data->device_props = NULL;
+
+ kfree(entry);
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index bfcf2ee58d14..0af0fb304c0c 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -1732,14 +1732,14 @@ static int logi_dj_probe(struct hid_device *hdev,
+ if (retval < 0) {
+ hid_err(hdev, "%s: logi_dj_recv_query_paired_devices error:%d\n",
+ __func__, retval);
+- goto logi_dj_recv_query_paired_devices_failed;
++ /*
++ * This can happen with a KVM, let the probe succeed,
++ * logi_dj_recv_queue_unknown_work will retry later.
++ */
+ }
+ }
+
+- return retval;
+-
+-logi_dj_recv_query_paired_devices_failed:
+- hid_hw_close(hdev);
++ return 0;
+
+ llopen_failed:
+ switch_to_dj_mode_fail:
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 4effce12607b..424d0f775ffa 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -3749,28 +3749,8 @@ static const struct hid_device_id hidpp_devices[] = {
+
+ { L27MHZ_DEVICE(HID_ANY_ID) },
+
+- { /* Logitech G203/Prodigy Gaming Mouse */
+- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC084) },
+- { /* Logitech G302 Gaming Mouse */
+- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07F) },
+- { /* Logitech G303 Gaming Mouse */
+- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC080) },
+- { /* Logitech G400 Gaming Mouse */
+- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07E) },
+ { /* Logitech G403 Wireless Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
+- { /* Logitech G403 Gaming Mouse */
+- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC083) },
+- { /* Logitech G403 Hero Gaming Mouse over USB */
+- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08F) },
+- { /* Logitech G502 Proteus Core Gaming Mouse */
+- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07D) },
+- { /* Logitech G502 Proteus Spectrum Gaming Mouse over USB */
+- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC332) },
+- { /* Logitech G502 Hero Gaming Mouse over USB */
+- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08B) },
+- { /* Logitech G700s Gaming Mouse over USB */
+- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07C) },
+ { /* Logitech G703 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) },
+ { /* Logitech G703 Hero Gaming Mouse over USB */
+diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
+index 21544ebff855..5a3b3d974d84 100644
+--- a/drivers/hid/hid-prodikeys.c
++++ b/drivers/hid/hid-prodikeys.c
+@@ -551,10 +551,14 @@ static void pcmidi_setup_extra_keys(
+
+ static int pcmidi_set_operational(struct pcmidi_snd *pm)
+ {
++ int rc;
++
+ if (pm->ifnum != 1)
+ return 0; /* only set up ONCE for interace 1 */
+
+- pcmidi_get_output_report(pm);
++ rc = pcmidi_get_output_report(pm);
++ if (rc < 0)
++ return rc;
+ pcmidi_submit_output_report(pm, 0xc1);
+ return 0;
+ }
+@@ -683,7 +687,11 @@ static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
+ spin_lock_init(&pm->rawmidi_in_lock);
+
+ init_sustain_timers(pm);
+- pcmidi_set_operational(pm);
++ err = pcmidi_set_operational(pm);
++ if (err < 0) {
++ pk_error("failed to find output report\n");
++ goto fail_register;
++ }
+
+ /* register it */
+ err = snd_card_register(card);
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 4fe2c3ab76f9..efeeac5af633 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -91,6 +91,7 @@ static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
++ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0941), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
+diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
+index 49dd2d905c7f..73c0f7a95e2d 100644
+--- a/drivers/hid/hid-sony.c
++++ b/drivers/hid/hid-sony.c
+@@ -2811,7 +2811,6 @@ err_stop:
+ sony_cancel_work_sync(sc);
+ sony_remove_dev_list(sc);
+ sony_release_device_id(sc);
+- hid_hw_stop(hdev);
+ return ret;
+ }
+
+@@ -2876,6 +2875,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ */
+ if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
+ hid_err(hdev, "failed to claim input\n");
++ hid_hw_stop(hdev);
+ return -ENODEV;
+ }
+
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index 006bd6f4f653..62ef47a730b0 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -370,7 +370,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
+
+ mutex_lock(&minors_lock);
+ dev = hidraw_table[minor];
+- if (!dev) {
++ if (!dev || !dev->exist) {
+ ret = -ENODEV;
+ goto out;
+ }
+diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
+index ff3fd011796e..3334f5865de7 100644
+--- a/drivers/md/dm-zoned-target.c
++++ b/drivers/md/dm-zoned-target.c
+@@ -133,8 +133,6 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
+
+ refcount_inc(&bioctx->ref);
+ generic_make_request(clone);
+- if (clone->bi_status == BLK_STS_IOERR)
+- return -EIO;
+
+ if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
+ zone->wp_block += nr_blocks;
+diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
+index c8fa5906bdf9..ed3e640eb03a 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -1628,29 +1628,35 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+ continue;
+ }
+
+- if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
++ /*
++ * We check "time_after" and "!chip_good" before checking
++ * "chip_good" to avoid the failure due to scheduling.
++ */
++ if (time_after(jiffies, timeo) && !chip_good(map, adr, datum)) {
+ xip_enable(map, chip, adr);
+ printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
+ xip_disable(map, chip, adr);
++ ret = -EIO;
+ break;
+ }
+
+- if (chip_ready(map, adr))
++ if (chip_good(map, adr, datum))
+ break;
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ UDELAY(map, chip, adr, 1);
+ }
++
+ /* Did we succeed? */
+- if (!chip_good(map, adr, datum)) {
++ if (ret) {
+ /* reset on all failures. */
+ map_write(map, CMD(0xF0), chip->start);
+ /* FIXME - should have reset delay before continuing */
+
+- if (++retry_cnt <= MAX_RETRIES)
++ if (++retry_cnt <= MAX_RETRIES) {
++ ret = 0;
+ goto retry;
+-
+- ret = -EIO;
++ }
+ }
+ xip_enable(map, chip, adr);
+ op_done:
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+index fc5ea87bd387..fe879c07ae3c 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+@@ -11,7 +11,6 @@
+ #include <linux/io.h>
+ #include <linux/ip.h>
+ #include <linux/ipv6.h>
+-#include <linux/marvell_phy.h>
+ #include <linux/module.h>
+ #include <linux/phy.h>
+ #include <linux/platform_device.h>
+@@ -1150,13 +1149,6 @@ static void hns_nic_adjust_link(struct net_device *ndev)
+ }
+ }
+
+-static int hns_phy_marvell_fixup(struct phy_device *phydev)
+-{
+- phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE;
+-
+- return 0;
+-}
+-
+ /**
+ *hns_nic_init_phy - init phy
+ *@ndev: net device
+@@ -1182,16 +1174,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
+ phy_dev->dev_flags = 0;
+
+- /* register the PHY fixup (for Marvell 88E1510) */
+- ret = phy_register_fixup_for_uid(MARVELL_PHY_ID_88E1510,
+- MARVELL_PHY_ID_MASK,
+- hns_phy_marvell_fixup);
+- /* we can live without it, so just issue a warning */
+- if (ret)
+- netdev_warn(ndev,
+- "Cannot register PHY fixup, ret=%d\n",
+- ret);
+-
+ ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
+ h->phy_if);
+ } else {
+@@ -2447,11 +2429,8 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
+ hns_nic_uninit_ring_data(priv);
+ priv->ring_data = NULL;
+
+- if (ndev->phydev) {
+- phy_unregister_fixup_for_uid(MARVELL_PHY_ID_88E1510,
+- MARVELL_PHY_ID_MASK);
++ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
+- }
+
+ if (!IS_ERR_OR_NULL(priv->ae_handle))
+ hnae_put_handle(priv->ae_handle);
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index fa4bb940665c..5cb55ea671e3 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1984,8 +1984,11 @@ static void __ibmvnic_reset(struct work_struct *work)
+ rwi = get_next_rwi(adapter);
+ while (rwi) {
+ if (adapter->state == VNIC_REMOVING ||
+- adapter->state == VNIC_REMOVED)
+- goto out;
++ adapter->state == VNIC_REMOVED) {
++ kfree(rwi);
++ rc = EBUSY;
++ break;
++ }
+
+ if (adapter->force_reset_recovery) {
+ adapter->force_reset_recovery = false;
+@@ -2011,7 +2014,7 @@ static void __ibmvnic_reset(struct work_struct *work)
+ netdev_dbg(adapter->netdev, "Reset failed\n");
+ free_all_rwi(adapter);
+ }
+-out:
++
+ adapter->resetting = false;
+ if (we_lock_rtnl)
+ rtnl_unlock();
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
+index 43abdfd0deed..7e171d37bcd6 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
+@@ -35,7 +35,7 @@
+ #define PLL_READY_GATE_EN BIT(3)
+ /* QPHY_PCS_STATUS bit */
+ #define PHYSTATUS BIT(6)
+-/* QPHY_COM_PCS_READY_STATUS bit */
++/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */
+ #define PCS_READY BIT(0)
+
+ /* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
+@@ -115,6 +115,7 @@ enum qphy_reg_layout {
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_READY_STATUS,
++ QPHY_PCS_STATUS,
+ QPHY_PCS_AUTONOMOUS_MODE_CTRL,
+ QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
+@@ -133,7 +134,7 @@ static const unsigned int pciephy_regs_layout[] = {
+ [QPHY_FLL_MAN_CODE] = 0xd4,
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+- [QPHY_PCS_READY_STATUS] = 0x174,
++ [QPHY_PCS_STATUS] = 0x174,
+ };
+
+ static const unsigned int usb3phy_regs_layout[] = {
+@@ -144,7 +145,7 @@ static const unsigned int usb3phy_regs_layout[] = {
+ [QPHY_FLL_MAN_CODE] = 0xd0,
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+- [QPHY_PCS_READY_STATUS] = 0x17c,
++ [QPHY_PCS_STATUS] = 0x17c,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
+@@ -153,7 +154,7 @@ static const unsigned int usb3phy_regs_layout[] = {
+ static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
+ [QPHY_SW_RESET] = 0x00,
+ [QPHY_START_CTRL] = 0x08,
+- [QPHY_PCS_READY_STATUS] = 0x174,
++ [QPHY_PCS_STATUS] = 0x174,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
+@@ -911,7 +912,6 @@ struct qmp_phy_cfg {
+
+ unsigned int start_ctrl;
+ unsigned int pwrdn_ctrl;
+- unsigned int mask_pcs_ready;
+ unsigned int mask_com_pcs_ready;
+
+ /* true, if PHY has a separate PHY_COM control block */
+@@ -1074,7 +1074,6 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
+
+ .start_ctrl = PCS_START | PLL_READY_GATE_EN,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+- .mask_pcs_ready = PHYSTATUS,
+ .mask_com_pcs_ready = PCS_READY,
+
+ .has_phy_com_ctrl = true,
+@@ -1106,7 +1105,6 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+- .mask_pcs_ready = PHYSTATUS,
+ };
+
+ /* list of resets */
+@@ -1136,7 +1134,6 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+- .mask_pcs_ready = PHYSTATUS,
+
+ .has_phy_com_ctrl = false,
+ .has_lane_rst = false,
+@@ -1167,7 +1164,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+- .mask_pcs_ready = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+@@ -1199,7 +1195,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+- .mask_pcs_ready = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN,
+@@ -1226,7 +1221,6 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+- .mask_pcs_ready = PCS_READY,
+
+ .is_dual_lane_phy = true,
+ .no_pcs_sw_reset = true,
+@@ -1254,7 +1248,6 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+- .mask_pcs_ready = PHYSTATUS,
+ .mask_com_pcs_ready = PCS_READY,
+ };
+
+@@ -1280,7 +1273,6 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN,
+- .mask_pcs_ready = PHYSTATUS,
+
+ .is_dual_lane_phy = true,
+ };
+@@ -1458,7 +1450,7 @@ static int qcom_qmp_phy_enable(struct phy *phy)
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *dp_com = qmp->dp_com;
+ void __iomem *status;
+- unsigned int mask, val;
++ unsigned int mask, val, ready;
+ int ret;
+
+ dev_vdbg(qmp->dev, "Initializing QMP phy\n");
+@@ -1546,10 +1538,17 @@ static int qcom_qmp_phy_enable(struct phy *phy)
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+- status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
+- mask = cfg->mask_pcs_ready;
++ if (cfg->type == PHY_TYPE_UFS) {
++ status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
++ mask = PCS_READY;
++ ready = PCS_READY;
++ } else {
++ status = pcs + cfg->regs[QPHY_PCS_STATUS];
++ mask = PHYSTATUS;
++ ready = 0;
++ }
+
+- ret = readl_poll_timeout(status, val, val & mask, 1,
++ ret = readl_poll_timeout(status, val, (val & mask) == ready, 10,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
+index 197d8a192721..70efa3d29825 100644
+--- a/drivers/platform/x86/i2c-multi-instantiate.c
++++ b/drivers/platform/x86/i2c-multi-instantiate.c
+@@ -92,7 +92,7 @@ static int i2c_multi_inst_probe(struct platform_device *pdev)
+ for (i = 0; i < multi->num_clients && inst_data[i].type; i++) {
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, inst_data[i].type, I2C_NAME_SIZE);
+- snprintf(name, sizeof(name), "%s-%s.%d", match->id,
++ snprintf(name, sizeof(name), "%s-%s.%d", dev_name(dev),
+ inst_data[i].type, i);
+ board_info.dev_name = name;
+ switch (inst_data[i].flags & IRQ_RESOURCE_TYPE) {
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 42de31d20616..8ae8ef526b4a 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -656,6 +656,15 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
+ return 0;
+ }
+
++ /*
++ * We do not hold the lock for the open because in case
++ * SMB2_open needs to reconnect, it will end up calling
++ * cifs_mark_open_files_invalid() which takes the lock again
++ * thus causing a deadlock
++ */
++
++ mutex_unlock(&tcon->crfid.fid_mutex);
++
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+@@ -677,7 +686,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
+
+ rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
+ if (rc)
+- goto oshr_exit;
++ goto oshr_free;
+ smb2_set_next_command(tcon, &rqst[0]);
+
+ memset(&qi_iov, 0, sizeof(qi_iov));
+@@ -690,18 +699,10 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
+ sizeof(struct smb2_file_all_info) +
+ PATH_MAX * 2, 0, NULL);
+ if (rc)
+- goto oshr_exit;
++ goto oshr_free;
+
+ smb2_set_related(&rqst[1]);
+
+- /*
+- * We do not hold the lock for the open because in case
+- * SMB2_open needs to reconnect, it will end up calling
+- * cifs_mark_open_files_invalid() which takes the lock again
+- * thus causing a deadlock
+- */
+-
+- mutex_unlock(&tcon->crfid.fid_mutex);
+ rc = compound_send_recv(xid, ses, flags, 2, rqst,
+ resp_buftype, rsp_iov);
+ mutex_lock(&tcon->crfid.fid_mutex);
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index d0539ddad6e2..7d9fce215f46 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -894,6 +894,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
+ unsigned int cp_blks = 1 + __cp_payload(sbi);
+ block_t cp_blk_no;
+ int i;
++ int err;
+
+ sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks),
+ GFP_KERNEL);
+@@ -921,6 +922,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
+ } else if (cp2) {
+ cur_page = cp2;
+ } else {
++ err = -EFSCORRUPTED;
+ goto fail_no_cp;
+ }
+
+@@ -933,8 +935,10 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
+ sbi->cur_cp_pack = 2;
+
+ /* Sanity checking of checkpoint */
+- if (f2fs_sanity_check_ckpt(sbi))
++ if (f2fs_sanity_check_ckpt(sbi)) {
++ err = -EFSCORRUPTED;
+ goto free_fail_no_cp;
++ }
+
+ if (cp_blks <= 1)
+ goto done;
+@@ -948,8 +952,10 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
+ unsigned char *ckpt = (unsigned char *)sbi->ckpt;
+
+ cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
+- if (IS_ERR(cur_page))
++ if (IS_ERR(cur_page)) {
++ err = PTR_ERR(cur_page);
+ goto free_fail_no_cp;
++ }
+ sit_bitmap_ptr = page_address(cur_page);
+ memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
+ f2fs_put_page(cur_page, 1);
+@@ -964,7 +970,7 @@ free_fail_no_cp:
+ f2fs_put_page(cp2, 1);
+ fail_no_cp:
+ kvfree(sbi->ckpt);
+- return -EINVAL;
++ return err;
+ }
+
+ static void __add_dirty_inode(struct inode *inode, enum inode_type type)
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 923923603a7d..85f3879a31cb 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -455,7 +455,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
+ if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
+ fio->is_por ? META_POR : (__is_meta_io(fio) ?
+ META_GENERIC : DATA_GENERIC_ENHANCE)))
+- return -EFAULT;
++ return -EFSCORRUPTED;
+
+ trace_f2fs_submit_page_bio(page, fio);
+ f2fs_trace_ios(fio, 0);
+@@ -734,7 +734,7 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
+ DATA_GENERIC_ENHANCE_READ)) {
+- err = -EFAULT;
++ err = -EFSCORRUPTED;
+ goto put_err;
+ }
+ goto got_it;
+@@ -754,7 +754,7 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
+ !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
+ dn.data_blkaddr,
+ DATA_GENERIC_ENHANCE)) {
+- err = -EFAULT;
++ err = -EFSCORRUPTED;
+ goto put_err;
+ }
+ got_it:
+@@ -1100,7 +1100,7 @@ next_block:
+
+ if (__is_valid_data_blkaddr(blkaddr) &&
+ !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
+- err = -EFAULT;
++ err = -EFSCORRUPTED;
+ goto sync_out;
+ }
+
+@@ -1570,7 +1570,7 @@ got_it:
+
+ if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
+ DATA_GENERIC_ENHANCE_READ)) {
+- ret = -EFAULT;
++ ret = -EFSCORRUPTED;
+ goto out;
+ }
+ } else {
+@@ -1851,7 +1851,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
+
+ if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+ DATA_GENERIC_ENHANCE))
+- return -EFAULT;
++ return -EFSCORRUPTED;
+
+ ipu_force = true;
+ fio->need_lock = LOCK_DONE;
+@@ -1878,7 +1878,7 @@ got_it:
+ if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
+ !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+ DATA_GENERIC_ENHANCE)) {
+- err = -EFAULT;
++ err = -EFSCORRUPTED;
+ goto out_writepage;
+ }
+ /*
+@@ -2536,7 +2536,7 @@ repeat:
+ } else {
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
+ DATA_GENERIC_ENHANCE_READ)) {
+- err = -EFAULT;
++ err = -EFSCORRUPTED;
+ goto fail;
+ }
+ err = f2fs_submit_page_read(inode, page, blkaddr);
+diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
+index 59bc46017855..4be433f20930 100644
+--- a/fs/f2fs/dir.c
++++ b/fs/f2fs/dir.c
+@@ -820,7 +820,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
+ "%s: corrupted namelen=%d, run fsck to fix.",
+ __func__, le16_to_cpu(de->name_len));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+- err = -EINVAL;
++ err = -EFSCORRUPTED;
+ goto out;
+ }
+
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index cbdc2f88a98c..b545beb8b04e 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -3718,4 +3718,7 @@ static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
+ return false;
+ }
+
++#define EFSBADCRC EBADMSG /* Bad CRC detected */
++#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
++
+ #endif /* _LINUX_F2FS_H */
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 45b45f37d347..33c6e14d0c87 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1026,7 +1026,7 @@ next_dnode:
+ !f2fs_is_valid_blkaddr(sbi, *blkaddr,
+ DATA_GENERIC_ENHANCE)) {
+ f2fs_put_dnode(&dn);
+- return -EFAULT;
++ return -EFSCORRUPTED;
+ }
+
+ if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index bb6fd5a506d3..08224776618e 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -658,7 +658,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
+ DATA_GENERIC_ENHANCE_READ))) {
+- err = -EFAULT;
++ err = -EFSCORRUPTED;
+ goto put_page;
+ }
+ goto got_it;
+@@ -676,7 +676,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
+ }
+ if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
+ DATA_GENERIC_ENHANCE))) {
+- err = -EFAULT;
++ err = -EFSCORRUPTED;
+ goto put_page;
+ }
+ got_it:
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 404d2462a0fe..aa9b5b6d1b23 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -144,7 +144,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
+ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
+ "run fsck to fix.",
+ __func__, dn->inode->i_ino, dn->data_blkaddr);
+- return -EINVAL;
++ return -EFSCORRUPTED;
+ }
+
+ f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
+@@ -387,7 +387,7 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
+ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
+ "run fsck to fix.",
+ __func__, dir->i_ino, dn.data_blkaddr);
+- err = -EINVAL;
++ err = -EFSCORRUPTED;
+ goto out;
+ }
+
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index ccb02226dd2c..e26995975570 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -74,7 +74,7 @@ static int __written_first_block(struct f2fs_sb_info *sbi,
+ if (!__is_valid_data_blkaddr(addr))
+ return 1;
+ if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
+- return -EFAULT;
++ return -EFSCORRUPTED;
+ return 0;
+ }
+
+@@ -374,7 +374,7 @@ static int do_read_inode(struct inode *inode)
+
+ if (!sanity_check_inode(inode, node_page)) {
+ f2fs_put_page(node_page, 1);
+- return -EINVAL;
++ return -EFSCORRUPTED;
+ }
+
+ /* check data exist */
+diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
+index 18a038a2a9fa..ad024b7b1d43 100644
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -37,7 +37,7 @@ int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: out-of-range nid=%x, run fsck to fix.",
+ __func__, nid);
+- return -EINVAL;
++ return -EFSCORRUPTED;
+ }
+ return 0;
+ }
+@@ -1291,7 +1291,7 @@ static int read_node_page(struct page *page, int op_flags)
+ if (PageUptodate(page)) {
+ if (!f2fs_inode_chksum_verify(sbi, page)) {
+ ClearPageUptodate(page);
+- return -EBADMSG;
++ return -EFSBADCRC;
+ }
+ return LOCKED_PAGE;
+ }
+@@ -1375,7 +1375,7 @@ repeat:
+ }
+
+ if (!f2fs_inode_chksum_verify(sbi, page)) {
+- err = -EBADMSG;
++ err = -EFSBADCRC;
+ goto out_err;
+ }
+ page_hit:
+diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
+index e04f82b3f4fc..d728858ac95d 100644
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -557,7 +557,7 @@ retry_dn:
+ "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
+ inode->i_ino, ofs_of_node(dn.node_page),
+ ofs_of_node(page));
+- err = -EFAULT;
++ err = -EFSCORRUPTED;
+ goto err;
+ }
+
+@@ -569,13 +569,13 @@ retry_dn:
+
+ if (__is_valid_data_blkaddr(src) &&
+ !f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
+- err = -EFAULT;
++ err = -EFSCORRUPTED;
+ goto err;
+ }
+
+ if (__is_valid_data_blkaddr(dest) &&
+ !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
+- err = -EFAULT;
++ err = -EFSCORRUPTED;
+ goto err;
+ }
+
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index ce15fbcd7cff..eab59d6ea945 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -2784,7 +2784,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
+ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "Found FS corruption, run fsck to fix.");
+- return -EIO;
++ return -EFSCORRUPTED;
+ }
+
+ /* start/end segment number in main_area */
+@@ -3207,7 +3207,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
+
+ if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+- return -EFAULT;
++ return -EFSCORRUPTED;
+ }
+
+ stat_inc_inplace_blocks(fio->sbi);
+@@ -3403,11 +3403,6 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
+ seg_i = CURSEG_I(sbi, i);
+ segno = le32_to_cpu(ckpt->cur_data_segno[i]);
+ blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
+- if (blk_off > ENTRIES_IN_SUM) {
+- f2fs_bug_on(sbi, 1);
+- f2fs_put_page(page, 1);
+- return -EFAULT;
+- }
+ seg_i->next_segno = segno;
+ reset_curseg(sbi, i, 0);
+ seg_i->alloc_type = ckpt->alloc_type[i];
+@@ -4115,7 +4110,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
+ "Wrong journal entry on segno %u",
+ start);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+- err = -EINVAL;
++ err = -EFSCORRUPTED;
+ break;
+ }
+
+@@ -4156,7 +4151,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
+ "SIT is corrupted node# %u vs %u",
+ total_node_blocks, valid_node_count(sbi));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+- err = -EINVAL;
++ err = -EFSCORRUPTED;
+ }
+
+ return err;
+@@ -4247,6 +4242,41 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
+ return init_victim_secmap(sbi);
+ }
+
++static int sanity_check_curseg(struct f2fs_sb_info *sbi)
++{
++ int i;
++
++ /*
++ * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
++ * In LFS curseg, all blkaddr after .next_blkoff should be unused.
++ */
++ for (i = 0; i < NO_CHECK_TYPE; i++) {
++ struct curseg_info *curseg = CURSEG_I(sbi, i);
++ struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
++ unsigned int blkofs = curseg->next_blkoff;
++
++ if (f2fs_test_bit(blkofs, se->cur_valid_map))
++ goto out;
++
++ if (curseg->alloc_type == SSR)
++ continue;
++
++ for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
++ if (!f2fs_test_bit(blkofs, se->cur_valid_map))
++ continue;
++out:
++ f2fs_msg(sbi->sb, KERN_ERR,
++ "Current segment's next free block offset is "
++ "inconsistent with bitmap, logtype:%u, "
++ "segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
++ i, curseg->segno, curseg->alloc_type,
++ curseg->next_blkoff, blkofs);
++ return -EFSCORRUPTED;
++ }
++ }
++ return 0;
++}
++
+ /*
+ * Update min, max modified time for cost-benefit GC algorithm
+ */
+@@ -4342,6 +4372,10 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
+ if (err)
+ return err;
+
++ err = sanity_check_curseg(sbi);
++ if (err)
++ return err;
++
+ init_min_max_mtime(sbi);
+ return 0;
+ }
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index 429007b8036e..4bd151f2b954 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -697,7 +697,7 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
+ "Mismatch valid blocks %d vs. %d",
+ GET_SIT_VBLOCKS(raw_sit), valid_blocks);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+- return -EINVAL;
++ return -EFSCORRUPTED;
+ }
+
+ /* check segment usage, and check boundary of a given segment number */
+@@ -707,7 +707,7 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
+ "Wrong valid blocks %d or segno %u",
+ GET_SIT_VBLOCKS(raw_sit), segno);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+- return -EINVAL;
++ return -EFSCORRUPTED;
+ }
+ return 0;
+ }
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 4b47ac994daf..973f1e818770 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -2908,7 +2908,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
+ f2fs_msg(sb, KERN_ERR,
+ "Can't find valid F2FS filesystem in %dth superblock",
+ block + 1);
+- err = -EINVAL;
++ err = -EFSCORRUPTED;
+ brelse(bh);
+ continue;
+ }
+diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
+index e791741d193b..963242018663 100644
+--- a/fs/f2fs/xattr.c
++++ b/fs/f2fs/xattr.c
+@@ -346,7 +346,7 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
+
+ *xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name);
+ if (!*xe) {
+- err = -EFAULT;
++ err = -EFSCORRUPTED;
+ goto out;
+ }
+ check:
+@@ -622,7 +622,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
+ /* find entry with wanted name. */
+ here = __find_xattr(base_addr, last_base_addr, index, len, name);
+ if (!here) {
+- error = -EFAULT;
++ error = -EFSCORRUPTED;
+ goto exit;
+ }
+
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index 356ebd1cbe82..d6fbe487d91a 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -3840,15 +3840,28 @@ xfs_bmapi_read(
+ XFS_STATS_INC(mp, xs_blk_mapr);
+
+ ifp = XFS_IFORK_PTR(ip, whichfork);
++ if (!ifp) {
++ /* No CoW fork? Return a hole. */
++ if (whichfork == XFS_COW_FORK) {
++ mval->br_startoff = bno;
++ mval->br_startblock = HOLESTARTBLOCK;
++ mval->br_blockcount = len;
++ mval->br_state = XFS_EXT_NORM;
++ *nmap = 1;
++ return 0;
++ }
+
+- /* No CoW fork? Return a hole. */
+- if (whichfork == XFS_COW_FORK && !ifp) {
+- mval->br_startoff = bno;
+- mval->br_startblock = HOLESTARTBLOCK;
+- mval->br_blockcount = len;
+- mval->br_state = XFS_EXT_NORM;
+- *nmap = 1;
+- return 0;
++ /*
++ * A missing attr ifork implies that the inode says we're in
++ * extents or btree format but failed to pass the inode fork
++ * verifier while trying to load it. Treat that as a file
++ * corruption too.
++ */
++#ifdef DEBUG
++ xfs_alert(mp, "%s: inode %llu missing fork %d",
++ __func__, ip->i_ino, whichfork);
++#endif /* DEBUG */
++ return -EFSCORRUPTED;
+ }
+
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
+index 97ce790a5b5a..d6c89cbe127a 100644
+--- a/include/drm/drm_dp_helper.h
++++ b/include/drm/drm_dp_helper.h
+@@ -1401,6 +1401,13 @@ enum drm_dp_quirk {
+ * driver still need to implement proper handling for such device.
+ */
+ DP_DPCD_QUIRK_NO_PSR,
++ /**
++ * @DP_DPCD_QUIRK_NO_SINK_COUNT:
++ *
++ * The device does not set SINK_COUNT to a non-zero value.
++ * The driver should ignore SINK_COUNT during detection.
++ */
++ DP_DPCD_QUIRK_NO_SINK_COUNT,
+ };
+
+ /**
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index 8374b18ebe9a..185c07eac0da 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -41,7 +41,6 @@
+ #include <linux/workqueue.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+-#include <linux/wait.h>
+ #include <linux/zpool.h>
+
+ /*
+@@ -145,8 +144,6 @@ struct z3fold_header {
+ * @release_wq: workqueue for safe page release
+ * @work: work_struct for safe page release
+ * @inode: inode for z3fold pseudo filesystem
+- * @destroying: bool to stop migration once we start destruction
+- * @isolated: int to count the number of pages currently in isolation
+ *
+ * This structure is allocated at pool creation time and maintains metadata
+ * pertaining to a particular z3fold pool.
+@@ -165,11 +162,8 @@ struct z3fold_pool {
+ const struct zpool_ops *zpool_ops;
+ struct workqueue_struct *compact_wq;
+ struct workqueue_struct *release_wq;
+- struct wait_queue_head isolate_wait;
+ struct work_struct work;
+ struct inode *inode;
+- bool destroying;
+- int isolated;
+ };
+
+ /*
+@@ -777,7 +771,6 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
+ goto out_c;
+ spin_lock_init(&pool->lock);
+ spin_lock_init(&pool->stale_lock);
+- init_waitqueue_head(&pool->isolate_wait);
+ pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
+ if (!pool->unbuddied)
+ goto out_pool;
+@@ -817,15 +810,6 @@ out:
+ return NULL;
+ }
+
+-static bool pool_isolated_are_drained(struct z3fold_pool *pool)
+-{
+- bool ret;
+-
+- spin_lock(&pool->lock);
+- ret = pool->isolated == 0;
+- spin_unlock(&pool->lock);
+- return ret;
+-}
+ /**
+ * z3fold_destroy_pool() - destroys an existing z3fold pool
+ * @pool: the z3fold pool to be destroyed
+@@ -835,22 +819,6 @@ static bool pool_isolated_are_drained(struct z3fold_pool *pool)
+ static void z3fold_destroy_pool(struct z3fold_pool *pool)
+ {
+ kmem_cache_destroy(pool->c_handle);
+- /*
+- * We set pool-> destroying under lock to ensure that
+- * z3fold_page_isolate() sees any changes to destroying. This way we
+- * avoid the need for any memory barriers.
+- */
+-
+- spin_lock(&pool->lock);
+- pool->destroying = true;
+- spin_unlock(&pool->lock);
+-
+- /*
+- * We need to ensure that no pages are being migrated while we destroy
+- * these workqueues, as migration can queue work on either of the
+- * workqueues.
+- */
+- wait_event(pool->isolate_wait, !pool_isolated_are_drained(pool));
+
+ /*
+ * We need to destroy pool->compact_wq before pool->release_wq,
+@@ -1341,28 +1309,6 @@ static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
+ return atomic64_read(&pool->pages_nr);
+ }
+
+-/*
+- * z3fold_dec_isolated() expects to be called while pool->lock is held.
+- */
+-static void z3fold_dec_isolated(struct z3fold_pool *pool)
+-{
+- assert_spin_locked(&pool->lock);
+- VM_BUG_ON(pool->isolated <= 0);
+- pool->isolated--;
+-
+- /*
+- * If we have no more isolated pages, we have to see if
+- * z3fold_destroy_pool() is waiting for a signal.
+- */
+- if (pool->isolated == 0 && waitqueue_active(&pool->isolate_wait))
+- wake_up_all(&pool->isolate_wait);
+-}
+-
+-static void z3fold_inc_isolated(struct z3fold_pool *pool)
+-{
+- pool->isolated++;
+-}
+-
+ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
+ {
+ struct z3fold_header *zhdr;
+@@ -1389,34 +1335,6 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
+ spin_lock(&pool->lock);
+ if (!list_empty(&page->lru))
+ list_del(&page->lru);
+- /*
+- * We need to check for destruction while holding pool->lock, as
+- * otherwise destruction could see 0 isolated pages, and
+- * proceed.
+- */
+- if (unlikely(pool->destroying)) {
+- spin_unlock(&pool->lock);
+- /*
+- * If this page isn't stale, somebody else holds a
+- * reference to it. Let't drop our refcount so that they
+- * can call the release logic.
+- */
+- if (unlikely(kref_put(&zhdr->refcount,
+- release_z3fold_page_locked))) {
+- /*
+- * If we get here we have kref problems, so we
+- * should freak out.
+- */
+- WARN(1, "Z3fold is experiencing kref problems\n");
+- z3fold_page_unlock(zhdr);
+- return false;
+- }
+- z3fold_page_unlock(zhdr);
+- return false;
+- }
+-
+-
+- z3fold_inc_isolated(pool);
+ spin_unlock(&pool->lock);
+ z3fold_page_unlock(zhdr);
+ return true;
+@@ -1485,10 +1403,6 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
+
+ queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
+
+- spin_lock(&pool->lock);
+- z3fold_dec_isolated(pool);
+- spin_unlock(&pool->lock);
+-
+ page_mapcount_reset(page);
+ put_page(page);
+ return 0;
+@@ -1508,14 +1422,10 @@ static void z3fold_page_putback(struct page *page)
+ INIT_LIST_HEAD(&page->lru);
+ if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
+ atomic64_dec(&pool->pages_nr);
+- spin_lock(&pool->lock);
+- z3fold_dec_isolated(pool);
+- spin_unlock(&pool->lock);
+ return;
+ }
+ spin_lock(&pool->lock);
+ list_add(&page->lru, &pool->lru);
+- z3fold_dec_isolated(pool);
+ spin_unlock(&pool->lock);
+ z3fold_page_unlock(zhdr);
+ }
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 17c50a98e7f7..9e4fcf406d9c 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -5588,11 +5588,6 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
+ return send_conn_param_neg_reply(hdev, handle,
+ HCI_ERROR_UNKNOWN_CONN_ID);
+
+- if (min < hcon->le_conn_min_interval ||
+- max > hcon->le_conn_max_interval)
+- return send_conn_param_neg_reply(hdev, handle,
+- HCI_ERROR_INVALID_LL_PARAMS);
+-
+ if (hci_check_conn_params(min, max, latency, timeout))
+ return send_conn_param_neg_reply(hdev, handle,
+ HCI_ERROR_INVALID_LL_PARAMS);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 32d2be9d6858..771e3e17bb6a 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -5297,14 +5297,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
+
+ memset(&rsp, 0, sizeof(rsp));
+
+- if (min < hcon->le_conn_min_interval ||
+- max > hcon->le_conn_max_interval) {
+- BT_DBG("requested connection interval exceeds current bounds.");
+- err = -EINVAL;
+- } else {
+- err = hci_check_conn_params(min, max, latency, to_multiplier);
+- }
+-
++ err = hci_check_conn_params(min, max, latency, to_multiplier);
+ if (err)
+ rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+ else
+diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
+index 899e34ceb560..e35736b99300 100644
+--- a/net/ipv4/raw_diag.c
++++ b/net/ipv4/raw_diag.c
+@@ -24,9 +24,6 @@ raw_get_hashinfo(const struct inet_diag_req_v2 *r)
+ return &raw_v6_hashinfo;
+ #endif
+ } else {
+- pr_warn_once("Unexpected inet family %d\n",
+- r->sdiag_family);
+- WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
+index d7f3776dfd71..637ce3e8c575 100644
+--- a/net/netfilter/nft_socket.c
++++ b/net/netfilter/nft_socket.c
+@@ -47,9 +47,6 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ return;
+ }
+
+- /* So that subsequent socket matching not to require other lookups. */
+- skb->sk = sk;
+-
+ switch(priv->key) {
+ case NFT_SOCKET_TRANSPARENT:
+ nft_reg_store8(dest, inet_sk_transparent(sk));
+@@ -66,6 +63,9 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ WARN_ON(1);
+ regs->verdict.code = NFT_BREAK;
+ }
++
++ if (sk != skb->sk)
++ sock_gen_put(sk);
+ }
+
+ static const struct nla_policy nft_socket_policy[NFTA_SOCKET_MAX + 1] = {
+diff --git a/net/rds/bind.c b/net/rds/bind.c
+index 0f4398e7f2a7..93e336535d3b 100644
+--- a/net/rds/bind.c
++++ b/net/rds/bind.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
++ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+@@ -239,34 +239,33 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ goto out;
+ }
+
+- sock_set_flag(sk, SOCK_RCU_FREE);
+- ret = rds_add_bound(rs, binding_addr, &port, scope_id);
+- if (ret)
+- goto out;
+-
+- if (rs->rs_transport) { /* previously bound */
++ /* The transport can be set using SO_RDS_TRANSPORT option before the
++ * socket is bound.
++ */
++ if (rs->rs_transport) {
+ trans = rs->rs_transport;
+- if (trans->laddr_check(sock_net(sock->sk),
++ if (!trans->laddr_check ||
++ trans->laddr_check(sock_net(sock->sk),
+ binding_addr, scope_id) != 0) {
+ ret = -ENOPROTOOPT;
+- rds_remove_bound(rs);
+- } else {
+- ret = 0;
++ goto out;
+ }
+- goto out;
+- }
+- trans = rds_trans_get_preferred(sock_net(sock->sk), binding_addr,
+- scope_id);
+- if (!trans) {
+- ret = -EADDRNOTAVAIL;
+- rds_remove_bound(rs);
+- pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
+- __func__, binding_addr);
+- goto out;
++ } else {
++ trans = rds_trans_get_preferred(sock_net(sock->sk),
++ binding_addr, scope_id);
++ if (!trans) {
++ ret = -EADDRNOTAVAIL;
++ pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
++ __func__, binding_addr);
++ goto out;
++ }
++ rs->rs_transport = trans;
+ }
+
+- rs->rs_transport = trans;
+- ret = 0;
++ sock_set_flag(sk, SOCK_RCU_FREE);
++ ret = rds_add_bound(rs, binding_addr, &port, scope_id);
++ if (ret)
++ rs->rs_transport = NULL;
+
+ out:
+ release_sock(sk);
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 04faee7ccbce..1047825d9f48 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1920,6 +1920,8 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
+ cl = cops->find(q, portid);
+ if (!cl)
+ return;
++ if (!cops->tcf_block)
++ return;
+ block = cops->tcf_block(q, cl, NULL);
+ if (!block)
+ return;
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index d5342687fdca..7c2fa80b20bd 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -915,6 +915,7 @@ restart:
+ } else if (delta > 0) {
+ p = &parent->rb_right;
+ } else {
++ bool same_prefixlen = node->prefixlen == n->prefixlen;
+ struct xfrm_policy *tmp;
+
+ hlist_for_each_entry(tmp, &n->hhead, bydst) {
+@@ -922,9 +923,11 @@ restart:
+ hlist_del_rcu(&tmp->bydst);
+ }
+
++ node->prefixlen = prefixlen;
++
+ xfrm_policy_inexact_list_reinsert(net, node, family);
+
+- if (node->prefixlen == n->prefixlen) {
++ if (same_prefixlen) {
+ kfree_rcu(n, rcu);
+ return;
+ }
+@@ -932,7 +935,6 @@ restart:
+ rb_erase(*p, new);
+ kfree_rcu(n, rcu);
+ n = node;
+- n->prefixlen = prefixlen;
+ goto restart;
+ }
+ }
+diff --git a/sound/firewire/dice/dice-alesis.c b/sound/firewire/dice/dice-alesis.c
+index 218292bdace6..f5b325263b67 100644
+--- a/sound/firewire/dice/dice-alesis.c
++++ b/sound/firewire/dice/dice-alesis.c
+@@ -15,7 +15,7 @@ alesis_io14_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
+
+ static const unsigned int
+ alesis_io26_tx_pcm_chs[MAX_STREAMS][SND_DICE_RATE_MODE_COUNT] = {
+- {10, 10, 8}, /* Tx0 = Analog + S/PDIF. */
++ {10, 10, 4}, /* Tx0 = Analog + S/PDIF. */
+ {16, 8, 0}, /* Tx1 = ADAT1 + ADAT2. */
+ };
+
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 5732c31c4167..e7da1a59884a 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2514,8 +2514,7 @@ static const struct pci_device_id azx_ids[] = {
+ AZX_DCAPS_PM_RUNTIME },
+ /* AMD Raven */
+ { PCI_DEVICE(0x1022, 0x15e3),
+- .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+- AZX_DCAPS_PM_RUNTIME },
++ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
+ /* ATI HDMI */
+ { PCI_DEVICE(0x1002, 0x0002),
+ .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
+index e283966bdbb1..bc9dd8e6fd86 100644
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -357,6 +357,7 @@ static const struct hda_fixup ad1986a_fixups[] = {
+
+ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC),
++ SND_PCI_QUIRK(0x1043, 0x1153, "ASUS M9V", AD1986A_FIXUP_LAPTOP_IMIC),
+ SND_PCI_QUIRK(0x1043, 0x1443, "ASUS Z99He", AD1986A_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x1043, 0x1447, "ASUS A8JN", AD1986A_FIXUP_EAPD),
+ SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 78858918cbc1..b6f7b13768a1 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1655,6 +1655,8 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ case 0x152a: /* Thesycon devices */
+ case 0x25ce: /* Mytek devices */
+ case 0x2ab6: /* T+A devices */
++ case 0x3842: /* EVGA */
++ case 0xc502: /* HiBy devices */
+ if (fp->dsd_raw)
+ return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ break;
+diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
+index 88158239622b..20f67fcf378d 100644
+--- a/tools/objtool/Makefile
++++ b/tools/objtool/Makefile
+@@ -35,7 +35,7 @@ INCLUDES := -I$(srctree)/tools/include \
+ -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
+ -I$(srctree)/tools/objtool/arch/$(ARCH)/include
+ WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
+-CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
++CFLAGS := -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
+ LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
+
+ # Allow old libelf to be used:
+diff --git a/tools/testing/selftests/net/xfrm_policy.sh b/tools/testing/selftests/net/xfrm_policy.sh
+index 5445943bf07f..7a1bf94c5bd3 100755
+--- a/tools/testing/selftests/net/xfrm_policy.sh
++++ b/tools/testing/selftests/net/xfrm_policy.sh
+@@ -106,6 +106,13 @@ do_overlap()
+ #
+ # 10.0.0.0/24 and 10.0.1.0/24 nodes have been merged as 10.0.0.0/23.
+ ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/23 dir fwd priority 200 action block
++
++ # similar to above: add policies (with partially random address), with shrinking prefixes.
++ for p in 29 28 27;do
++ for k in $(seq 1 32); do
++ ip -net $ns xfrm policy add src 10.253.1.$((RANDOM%255))/$p dst 10.254.1.$((RANDOM%255))/$p dir fwd priority $((200+k)) action block 2>/dev/null
++ done
++ done
+ }
+
+ do_esp_policy_get_check() {
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-09-21 16:23 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-09-21 16:23 UTC (permalink / raw
To: gentoo-commits
commit: b7a9c69a82eb6aea9059b1ee493d8349aa02eb15
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Sep 21 16:22:36 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Sep 21 16:22:36 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b7a9c69a
Linux patch 5.2.17
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1016_linux-5.2.17.patch | 4122 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 4126 insertions(+)
diff --git a/0000_README b/0000_README
index c046e8a..200ad40 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,10 @@ Patch: 1015_linux-5.2.16.patch
From: https://www.kernel.org
Desc: Linux 5.2.16
+Patch: 1016_linux-5.2.17.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.17
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1016_linux-5.2.17.patch b/1016_linux-5.2.17.patch
new file mode 100644
index 0000000..8e36dc2
--- /dev/null
+++ b/1016_linux-5.2.17.patch
@@ -0,0 +1,4122 @@
+diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
+index 1da2f1668f08..845d689e0fd7 100644
+--- a/Documentation/filesystems/overlayfs.txt
++++ b/Documentation/filesystems/overlayfs.txt
+@@ -302,7 +302,7 @@ beneath or above the path of another overlay lower layer path.
+
+ Using an upper layer path and/or a workdir path that are already used by
+ another overlay mount is not allowed and may fail with EBUSY. Using
+-partially overlapping paths is not allowed but will not fail with EBUSY.
++partially overlapping paths is not allowed and may fail with EBUSY.
+ If files are accessed from two overlayfs mounts which share or overlap the
+ upper layer and/or workdir path the behavior of the overlay is undefined,
+ though it will not result in a crash or deadlock.
+diff --git a/Makefile b/Makefile
+index 3cec03e93b40..32226d81fbb5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 16
++SUBLEVEL = 17
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
+index ced1a19d5f89..46849d6ecb3e 100644
+--- a/arch/arm/boot/dts/am33xx-l4.dtsi
++++ b/arch/arm/boot/dts/am33xx-l4.dtsi
+@@ -185,7 +185,7 @@
+ uart0: serial@0 {
+ compatible = "ti,am3352-uart", "ti,omap3-uart";
+ clock-frequency = <48000000>;
+- reg = <0x0 0x2000>;
++ reg = <0x0 0x1000>;
+ interrupts = <72>;
+ status = "disabled";
+ dmas = <&edma 26 0>, <&edma 27 0>;
+@@ -934,7 +934,7 @@
+ uart1: serial@0 {
+ compatible = "ti,am3352-uart", "ti,omap3-uart";
+ clock-frequency = <48000000>;
+- reg = <0x0 0x2000>;
++ reg = <0x0 0x1000>;
+ interrupts = <73>;
+ status = "disabled";
+ dmas = <&edma 28 0>, <&edma 29 0>;
+@@ -966,7 +966,7 @@
+ uart2: serial@0 {
+ compatible = "ti,am3352-uart", "ti,omap3-uart";
+ clock-frequency = <48000000>;
+- reg = <0x0 0x2000>;
++ reg = <0x0 0x1000>;
+ interrupts = <74>;
+ status = "disabled";
+ dmas = <&edma 30 0>, <&edma 31 0>;
+@@ -1614,7 +1614,7 @@
+ uart3: serial@0 {
+ compatible = "ti,am3352-uart", "ti,omap3-uart";
+ clock-frequency = <48000000>;
+- reg = <0x0 0x2000>;
++ reg = <0x0 0x1000>;
+ interrupts = <44>;
+ status = "disabled";
+ };
+@@ -1644,7 +1644,7 @@
+ uart4: serial@0 {
+ compatible = "ti,am3352-uart", "ti,omap3-uart";
+ clock-frequency = <48000000>;
+- reg = <0x0 0x2000>;
++ reg = <0x0 0x1000>;
+ interrupts = <45>;
+ status = "disabled";
+ };
+@@ -1674,7 +1674,7 @@
+ uart5: serial@0 {
+ compatible = "ti,am3352-uart", "ti,omap3-uart";
+ clock-frequency = <48000000>;
+- reg = <0x0 0x2000>;
++ reg = <0x0 0x1000>;
+ interrupts = <46>;
+ status = "disabled";
+ };
+@@ -1758,6 +1758,8 @@
+
+ target-module@cc000 { /* 0x481cc000, ap 60 46.0 */
+ compatible = "ti,sysc-omap4", "ti,sysc";
++ reg = <0xcc020 0x4>;
++ reg-names = "rev";
+ ti,hwmods = "d_can0";
+ /* Domains (P, C): per_pwrdm, l4ls_clkdm */
+ clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>,
+@@ -1780,6 +1782,8 @@
+
+ target-module@d0000 { /* 0x481d0000, ap 62 42.0 */
+ compatible = "ti,sysc-omap4", "ti,sysc";
++ reg = <0xd0020 0x4>;
++ reg-names = "rev";
+ ti,hwmods = "d_can1";
+ /* Domains (P, C): per_pwrdm, l4ls_clkdm */
+ clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>,
+diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
+index e5c2f71a7c77..fb6b8aa12cc5 100644
+--- a/arch/arm/boot/dts/am33xx.dtsi
++++ b/arch/arm/boot/dts/am33xx.dtsi
+@@ -234,13 +234,33 @@
+ interrupt-names = "edma3_tcerrint";
+ };
+
+- mmc3: mmc@47810000 {
+- compatible = "ti,omap4-hsmmc";
++ target-module@47810000 {
++ compatible = "ti,sysc-omap2", "ti,sysc";
+ ti,hwmods = "mmc3";
+- ti,needs-special-reset;
+- interrupts = <29>;
+- reg = <0x47810000 0x1000>;
+- status = "disabled";
++ reg = <0x478102fc 0x4>,
++ <0x47810110 0x4>,
++ <0x47810114 0x4>;
++ reg-names = "rev", "sysc", "syss";
++ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
++ SYSC_OMAP2_ENAWAKEUP |
++ SYSC_OMAP2_SOFTRESET |
++ SYSC_OMAP2_AUTOIDLE)>;
++ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
++ <SYSC_IDLE_NO>,
++ <SYSC_IDLE_SMART>;
++ ti,syss-mask = <1>;
++ clocks = <&l3s_clkctrl AM3_L3S_MMC3_CLKCTRL 0>;
++ clock-names = "fck";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0x0 0x47810000 0x1000>;
++
++ mmc3: mmc@0 {
++ compatible = "ti,omap4-hsmmc";
++ ti,needs-special-reset;
++ interrupts = <29>;
++ reg = <0x0 0x1000>;
++ };
+ };
+
+ usb: usb@47400000 {
+diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
+index 55aff4db9c7c..848e2a8884e2 100644
+--- a/arch/arm/boot/dts/am4372.dtsi
++++ b/arch/arm/boot/dts/am4372.dtsi
+@@ -228,13 +228,33 @@
+ interrupt-names = "edma3_tcerrint";
+ };
+
+- mmc3: mmc@47810000 {
+- compatible = "ti,omap4-hsmmc";
+- reg = <0x47810000 0x1000>;
++ target-module@47810000 {
++ compatible = "ti,sysc-omap2", "ti,sysc";
+ ti,hwmods = "mmc3";
+- ti,needs-special-reset;
+- interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+- status = "disabled";
++ reg = <0x478102fc 0x4>,
++ <0x47810110 0x4>,
++ <0x47810114 0x4>;
++ reg-names = "rev", "sysc", "syss";
++ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
++ SYSC_OMAP2_ENAWAKEUP |
++ SYSC_OMAP2_SOFTRESET |
++ SYSC_OMAP2_AUTOIDLE)>;
++ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
++ <SYSC_IDLE_NO>,
++ <SYSC_IDLE_SMART>;
++ ti,syss-mask = <1>;
++ clocks = <&l3s_clkctrl AM4_L3S_MMC3_CLKCTRL 0>;
++ clock-names = "fck";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0x0 0x47810000 0x1000>;
++
++ mmc3: mmc@0 {
++ compatible = "ti,omap4-hsmmc";
++ ti,needs-special-reset;
++ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
++ reg = <0x0 0x1000>;
++ };
+ };
+
+ sham: sham@53100000 {
+diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi
+index 989cb60b9029..04bee4ff9dcb 100644
+--- a/arch/arm/boot/dts/am437x-l4.dtsi
++++ b/arch/arm/boot/dts/am437x-l4.dtsi
+@@ -1574,6 +1574,8 @@
+
+ target-module@cc000 { /* 0x481cc000, ap 50 46.0 */
+ compatible = "ti,sysc-omap4", "ti,sysc";
++ reg = <0xcc020 0x4>;
++ reg-names = "rev";
+ ti,hwmods = "d_can0";
+ /* Domains (P, C): per_pwrdm, l4ls_clkdm */
+ clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>;
+@@ -1593,6 +1595,8 @@
+
+ target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */
+ compatible = "ti,sysc-omap4", "ti,sysc";
++ reg = <0xd0020 0x4>;
++ reg-names = "rev";
+ ti,hwmods = "d_can1";
+ /* Domains (P, C): per_pwrdm, l4ls_clkdm */
+ clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>;
+diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
+index 1d5e99964bbf..0aaacea1d887 100644
+--- a/arch/arm/boot/dts/am571x-idk.dts
++++ b/arch/arm/boot/dts/am571x-idk.dts
+@@ -175,14 +175,9 @@
+ };
+
+ &mmc1 {
+- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
++ pinctrl-names = "default", "hs";
+ pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
+ pinctrl-1 = <&mmc1_pins_hs>;
+- pinctrl-2 = <&mmc1_pins_sdr12>;
+- pinctrl-3 = <&mmc1_pins_sdr25>;
+- pinctrl-4 = <&mmc1_pins_sdr50>;
+- pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>;
+- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
+ };
+
+ &mmc2 {
+diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
+index c65d7f6d3b5a..ea1c119feaa5 100644
+--- a/arch/arm/boot/dts/am572x-idk.dts
++++ b/arch/arm/boot/dts/am572x-idk.dts
+@@ -16,14 +16,9 @@
+ };
+
+ &mmc1 {
+- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
++ pinctrl-names = "default", "hs";
+ pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
+ pinctrl-1 = <&mmc1_pins_hs>;
+- pinctrl-2 = <&mmc1_pins_sdr12>;
+- pinctrl-3 = <&mmc1_pins_sdr25>;
+- pinctrl-4 = <&mmc1_pins_sdr50>;
+- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
+- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
+ };
+
+ &mmc2 {
+diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts
+index dc5141c35610..7935d70874ce 100644
+--- a/arch/arm/boot/dts/am574x-idk.dts
++++ b/arch/arm/boot/dts/am574x-idk.dts
+@@ -24,14 +24,9 @@
+ };
+
+ &mmc1 {
+- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
++ pinctrl-names = "default", "hs";
+ pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
+ pinctrl-1 = <&mmc1_pins_hs>;
+- pinctrl-2 = <&mmc1_pins_default>;
+- pinctrl-3 = <&mmc1_pins_hs>;
+- pinctrl-4 = <&mmc1_pins_sdr50>;
+- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_conf>;
+- pinctrl-6 = <&mmc1_pins_ddr50 &mmc1_iodelay_sdr104_conf>;
+ };
+
+ &mmc2 {
+diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+index d02f5fa61e5f..bc76f1705c0f 100644
+--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
++++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+@@ -379,7 +379,7 @@
+ };
+ };
+
+-&gpio7 {
++&gpio7_target {
+ ti,no-reset-on-init;
+ ti,no-idle-on-init;
+ };
+@@ -430,6 +430,7 @@
+
+ bus-width = <4>;
+ cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
++ no-1-8-v;
+ };
+
+ &mmc2 {
+diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
+index a374b5cd6db0..7b113b52c3fb 100644
+--- a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
++++ b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
+@@ -16,14 +16,9 @@
+ };
+
+ &mmc1 {
+- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
++ pinctrl-names = "default", "hs";
+ pinctrl-0 = <&mmc1_pins_default>;
+ pinctrl-1 = <&mmc1_pins_hs>;
+- pinctrl-2 = <&mmc1_pins_sdr12>;
+- pinctrl-3 = <&mmc1_pins_sdr25>;
+- pinctrl-4 = <&mmc1_pins_sdr50>;
+- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev11_conf>;
+- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev11_conf>;
+ vmmc-supply = <&vdd_3v3>;
+ vqmmc-supply = <&ldo1_reg>;
+ };
+diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
+index 4badd2144db9..30c500b15b21 100644
+--- a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
++++ b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
+@@ -16,14 +16,9 @@
+ };
+
+ &mmc1 {
+- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104";
++ pinctrl-names = "default", "hs";
+ pinctrl-0 = <&mmc1_pins_default>;
+ pinctrl-1 = <&mmc1_pins_hs>;
+- pinctrl-2 = <&mmc1_pins_sdr12>;
+- pinctrl-3 = <&mmc1_pins_sdr25>;
+- pinctrl-4 = <&mmc1_pins_sdr50>;
+- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
+- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
+ vmmc-supply = <&vdd_3v3>;
+ vqmmc-supply = <&ldo1_reg>;
+ };
+diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
+index 714e971b912a..de7f85efaa51 100644
+--- a/arch/arm/boot/dts/dra7-evm.dts
++++ b/arch/arm/boot/dts/dra7-evm.dts
+@@ -498,7 +498,7 @@
+ phy-supply = <&ldousb_reg>;
+ };
+
+-&gpio7 {
++&gpio7_target {
+ ti,no-reset-on-init;
+ ti,no-idle-on-init;
+ };
+diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
+index 23faedec08ab..21e5914fdd62 100644
+--- a/arch/arm/boot/dts/dra7-l4.dtsi
++++ b/arch/arm/boot/dts/dra7-l4.dtsi
+@@ -1261,7 +1261,7 @@
+ };
+ };
+
+- target-module@51000 { /* 0x48051000, ap 45 2e.0 */
++ gpio7_target: target-module@51000 { /* 0x48051000, ap 45 2e.0 */
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ ti,hwmods = "gpio7";
+ reg = <0x51000 0x4>,
+@@ -3025,7 +3025,7 @@
+
+ target-module@80000 { /* 0x48480000, ap 31 16.0 */
+ compatible = "ti,sysc-omap4", "ti,sysc";
+- reg = <0x80000 0x4>;
++ reg = <0x80020 0x4>;
+ reg-names = "rev";
+ clocks = <&l4per2_clkctrl DRA7_L4PER2_DCAN2_CLKCTRL 0>;
+ clock-names = "fck";
+@@ -4577,7 +4577,7 @@
+
+ target-module@c000 { /* 0x4ae3c000, ap 30 04.0 */
+ compatible = "ti,sysc-omap4", "ti,sysc";
+- reg = <0xc000 0x4>;
++ reg = <0xc020 0x4>;
+ reg-names = "rev";
+ clocks = <&wkupaon_clkctrl DRA7_WKUPAON_DCAN1_CLKCTRL 0>;
+ clock-names = "fck";
+diff --git a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
+index 28ebb4eb884a..214b9e6de2c3 100644
+--- a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
++++ b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
+@@ -32,7 +32,7 @@
+ *
+ * Datamanual Revisions:
+ *
+- * AM572x Silicon Revision 2.0: SPRS953B, Revised November 2016
++ * AM572x Silicon Revision 2.0: SPRS953F, Revised May 2019
+ * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016
+ *
+ */
+@@ -229,45 +229,45 @@
+
+ mmc3_pins_default: mmc3_pins_default {
+ pinctrl-single,pins = <
+- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+ >;
+ };
+
+ mmc3_pins_hs: mmc3_pins_hs {
+ pinctrl-single,pins = <
+- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+ >;
+ };
+
+ mmc3_pins_sdr12: mmc3_pins_sdr12 {
+ pinctrl-single,pins = <
+- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+ >;
+ };
+
+ mmc3_pins_sdr25: mmc3_pins_sdr25 {
+ pinctrl-single,pins = <
+- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
+- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
+- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
+- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
+- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
+- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
+ >;
+ };
+
+diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+index 81159af44862..14a6c3eb3298 100644
+--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
++++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+@@ -126,6 +126,8 @@ restart:
+ orr r11, r11, r13 @ mask all requested interrupts
+ str r11, [r12, #OMAP1510_GPIO_INT_MASK]
+
++ str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts
++
+ ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set?
+ beq hksw @ no - try next source
+
+@@ -133,7 +135,6 @@ restart:
+ @@@@@@@@@@@@@@@@@@@@@@
+ @ Keyboard clock FIQ mode interrupt handler
+ @ r10 now contains KEYBRD_CLK_MASK, use it
+- str r10, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack the interrupt
+ bic r11, r11, r10 @ unmask it
+ str r11, [r12, #OMAP1510_GPIO_INT_MASK]
+
+diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
+index 0af2bf6f9933..fd87382a3f18 100644
+--- a/arch/arm/mach-omap1/ams-delta-fiq.c
++++ b/arch/arm/mach-omap1/ams-delta-fiq.c
+@@ -69,9 +69,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
+ * interrupts default to since commit 80ac93c27441
+ * requires interrupt already acked and unmasked.
+ */
+- if (irq_chip->irq_ack)
+- irq_chip->irq_ack(d);
+- if (irq_chip->irq_unmask)
++ if (!WARN_ON_ONCE(!irq_chip->irq_unmask))
+ irq_chip->irq_unmask(d);
+ }
+ for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
+diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
+index f9c02f9f1c92..5c3845730dbf 100644
+--- a/arch/arm/mach-omap2/omap4-common.c
++++ b/arch/arm/mach-omap2/omap4-common.c
+@@ -127,6 +127,9 @@ static int __init omap4_sram_init(void)
+ struct device_node *np;
+ struct gen_pool *sram_pool;
+
++ if (!soc_is_omap44xx() && !soc_is_omap54xx())
++ return 0;
++
+ np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
+ if (!np)
+ pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
+diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+index 4a5b4aee6615..1ec21e9ba1e9 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+@@ -379,7 +379,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = {
+ static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
+ .rev_offs = 0x0,
+ .sysc_offs = 0x4,
+- .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET,
++ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
++ SYSC_HAS_RESET_STATUS,
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type2,
+ };
+diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
+index 749a5a6f6143..98e17388a563 100644
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -174,6 +174,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
+ #ifdef CONFIG_HAVE_ARCH_PFN_VALID
+ int pfn_valid(unsigned long pfn)
+ {
++ phys_addr_t addr = __pfn_to_phys(pfn);
++
++ if (__phys_to_pfn(addr) != pfn)
++ return 0;
++
+ return memblock_is_map_memory(__pfn_to_phys(pfn));
+ }
+ EXPORT_SYMBOL(pfn_valid);
+@@ -613,7 +618,8 @@ static void update_sections_early(struct section_perm perms[], int n)
+ if (t->flags & PF_KTHREAD)
+ continue;
+ for_each_thread(t, s)
+- set_section_perms(perms, n, true, s->mm);
++ if (s->mm)
++ set_section_perms(perms, n, true, s->mm);
+ }
+ set_section_perms(perms, n, true, current->active_mm);
+ set_section_perms(perms, n, true, &init_mm);
+diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+index 9f72396ba710..4c92c197aeb8 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+@@ -591,6 +591,7 @@
+ clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
+ clock-names = "ddr";
+ phys = <&usb2_phy1>;
++ phy-names = "usb2-phy";
+ dr_mode = "peripheral";
+ g-rx-fifo-size = <192>;
+ g-np-tx-fifo-size = <128>;
+diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
+index a7dc11e36fd9..071f66d8719e 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
++++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
+@@ -97,7 +97,7 @@
+ reg = <0x0 0x48000000 0x0 0x18000000>;
+ };
+
+- reg_1p8v: regulator0 {
++ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.8V";
+ regulator-min-microvolt = <1800000>;
+@@ -106,7 +106,7 @@
+ regulator-always-on;
+ };
+
+- reg_3p3v: regulator1 {
++ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+@@ -115,7 +115,7 @@
+ regulator-always-on;
+ };
+
+- reg_12p0v: regulator1 {
++ reg_12p0v: regulator-12p0v {
+ compatible = "regulator-fixed";
+ regulator-name = "D12.0V";
+ regulator-min-microvolt = <12000000>;
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index b9574d850f14..4e07aa514f60 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -214,8 +214,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
+ * Only if the new pte is valid and kernel, otherwise TLB maintenance
+ * or update_mmu_cache() have the necessary barriers.
+ */
+- if (pte_valid_not_user(pte))
++ if (pte_valid_not_user(pte)) {
+ dsb(ishst);
++ isb();
++ }
+ }
+
+ extern void __sync_icache_dcache(pte_t pteval);
+@@ -453,8 +455,10 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
+
+ WRITE_ONCE(*pmdp, pmd);
+
+- if (pmd_valid(pmd))
++ if (pmd_valid(pmd)) {
+ dsb(ishst);
++ isb();
++ }
+ }
+
+ static inline void pmd_clear(pmd_t *pmdp)
+@@ -512,8 +516,10 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
+
+ WRITE_ONCE(*pudp, pud);
+
+- if (pud_valid(pud))
++ if (pud_valid(pud)) {
+ dsb(ishst);
++ isb();
++ }
+ }
+
+ static inline void pud_clear(pud_t *pudp)
+diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
+index 273ae66a9a45..8deb432c2975 100644
+--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
+@@ -515,14 +515,6 @@ void __init radix__early_init_devtree(void)
+ mmu_psize_defs[MMU_PAGE_64K].shift = 16;
+ mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
+ found:
+-#ifdef CONFIG_SPARSEMEM_VMEMMAP
+- if (mmu_psize_defs[MMU_PAGE_2M].shift) {
+- /*
+- * map vmemmap using 2M if available
+- */
+- mmu_vmemmap_psize = MMU_PAGE_2M;
+- }
+-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+ return;
+ }
+
+@@ -587,7 +579,13 @@ void __init radix__early_init_mmu(void)
+
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+ /* vmemmap mapping */
+- mmu_vmemmap_psize = mmu_virtual_psize;
++ if (mmu_psize_defs[MMU_PAGE_2M].shift) {
++ /*
++ * map vmemmap using 2M if available
++ */
++ mmu_vmemmap_psize = MMU_PAGE_2M;
++ } else
++ mmu_vmemmap_psize = mmu_virtual_psize;
+ #endif
+ /*
+ * initialize page table size
+diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
+index c207f6634b91..15b3edaabc28 100644
+--- a/arch/riscv/include/asm/fixmap.h
++++ b/arch/riscv/include/asm/fixmap.h
+@@ -25,10 +25,6 @@ enum fixed_addresses {
+ __end_of_fixed_addresses
+ };
+
+-#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE)
+-#define FIXADDR_TOP (VMALLOC_START)
+-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+-
+ #define FIXMAP_PAGE_IO PAGE_KERNEL
+
+ #define __early_set_fixmap __set_fixmap
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index f7c3f7de15f2..e6faa469c133 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -408,14 +408,22 @@ static inline void pgtable_cache_init(void)
+ #define VMALLOC_END (PAGE_OFFSET - 1)
+ #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
+
++#define FIXADDR_TOP VMALLOC_START
++#ifdef CONFIG_64BIT
++#define FIXADDR_SIZE PMD_SIZE
++#else
++#define FIXADDR_SIZE PGDIR_SIZE
++#endif
++#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
++
+ /*
+- * Task size is 0x40000000000 for RV64 or 0xb800000 for RV32.
++ * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
+ * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
+ */
+ #ifdef CONFIG_64BIT
+ #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
+ #else
+-#define TASK_SIZE VMALLOC_START
++#define TASK_SIZE FIXADDR_START
+ #endif
+
+ #include <asm-generic/pgtable.h>
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 5e7c63033159..fd9844f947f7 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -853,7 +853,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
+ break;
+ case BPF_ALU64 | BPF_NEG: /* dst = -dst */
+ /* lcgr %dst,%dst */
+- EMIT4(0xb9130000, dst_reg, dst_reg);
++ EMIT4(0xb9030000, dst_reg, dst_reg);
+ break;
+ /*
+ * BPF_FROM_BE/LE
+@@ -1027,8 +1027,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
+ /* llgf %w1,map.max_entries(%b2) */
+ EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
+ offsetof(struct bpf_array, map.max_entries));
+- /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
+- EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
++ /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
++ EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
+ REG_W1, 0, 0xa);
+
+ /*
+@@ -1054,8 +1054,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
+ * goto out;
+ */
+
+- /* sllg %r1,%b3,3: %r1 = index * 8 */
+- EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
++ /* llgfr %r1,%b3: %r1 = (u32) index */
++ EMIT4(0xb9160000, REG_1, BPF_REG_3);
++ /* sllg %r1,%r1,3: %r1 *= 8 */
++ EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
+ /* lg %r1,prog(%b2,%r1) */
+ EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
+ REG_1, offsetof(struct bpf_array, ptrs));
+diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
+index 62f317c9113a..5b35b7ea5d72 100644
+--- a/arch/x86/events/amd/ibs.c
++++ b/arch/x86/events/amd/ibs.c
+@@ -661,10 +661,17 @@ fail:
+
+ throttle = perf_event_overflow(event, &data, ®s);
+ out:
+- if (throttle)
++ if (throttle) {
+ perf_ibs_stop(event, 0);
+- else
+- perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
++ } else {
++ period >>= 4;
++
++ if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
++ (*config & IBS_OP_CNT_CTL))
++ period |= *config & IBS_OP_CUR_CNT_RAND;
++
++ perf_ibs_enable_event(perf_ibs, hwc, period);
++ }
+
+ perf_event_update_userpage(event);
+
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 6179be624f35..2369ea1a1db7 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3572,6 +3572,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
+ return left;
+ }
+
++static u64 nhm_limit_period(struct perf_event *event, u64 left)
++{
++ return max(left, 32ULL);
++}
++
+ PMU_FORMAT_ATTR(event, "config:0-7" );
+ PMU_FORMAT_ATTR(umask, "config:8-15" );
+ PMU_FORMAT_ATTR(edge, "config:18" );
+@@ -4550,6 +4555,7 @@ __init int intel_pmu_init(void)
+ x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
+ x86_pmu.enable_all = intel_pmu_nhm_enable_all;
+ x86_pmu.extra_regs = intel_nehalem_extra_regs;
++ x86_pmu.limit_period = nhm_limit_period;
+
+ mem_attr = nhm_mem_events_attrs;
+
+diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
+index e65d7fe6489f..5208ba49c89a 100644
+--- a/arch/x86/hyperv/mmu.c
++++ b/arch/x86/hyperv/mmu.c
+@@ -37,12 +37,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
+ * Lower 12 bits encode the number of additional
+ * pages to flush (in addition to the 'cur' page).
+ */
+- if (diff >= HV_TLB_FLUSH_UNIT)
++ if (diff >= HV_TLB_FLUSH_UNIT) {
+ gva_list[gva_n] |= ~PAGE_MASK;
+- else if (diff)
++ cur += HV_TLB_FLUSH_UNIT;
++ } else if (diff) {
+ gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
++ cur = end;
++ }
+
+- cur += HV_TLB_FLUSH_UNIT;
+ gva_n++;
+
+ } while (cur < end);
+diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
+index 1392d5e6e8d6..ee26e9215f18 100644
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -252,16 +252,20 @@ struct pebs_lbr {
+ #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
+ #define IBSCTL_LVT_OFFSET_MASK 0x0F
+
+-/* ibs fetch bits/masks */
++/* IBS fetch bits/masks */
+ #define IBS_FETCH_RAND_EN (1ULL<<57)
+ #define IBS_FETCH_VAL (1ULL<<49)
+ #define IBS_FETCH_ENABLE (1ULL<<48)
+ #define IBS_FETCH_CNT 0xFFFF0000ULL
+ #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
+
+-/* ibs op bits/masks */
+-/* lower 4 bits of the current count are ignored: */
+-#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
++/*
++ * IBS op bits/masks
++ * The lower 7 bits of the current count are random bits
++ * preloaded by hardware and ignored in software
++ */
++#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
++#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
+ #define IBS_OP_CNT_CTL (1ULL<<19)
+ #define IBS_OP_VAL (1ULL<<18)
+ #define IBS_OP_ENABLE (1ULL<<17)
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index c82abd6e4ca3..869794bd0fd9 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -442,8 +442,10 @@ __pu_label: \
+ ({ \
+ int __gu_err; \
+ __inttype(*(ptr)) __gu_val; \
++ __typeof__(ptr) __gu_ptr = (ptr); \
++ __typeof__(size) __gu_size = (size); \
+ __uaccess_begin_nospec(); \
+- __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
++ __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
+ __uaccess_end(); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ __builtin_expect(__gu_err, 0); \
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index c9fec0657eea..e8c6466ef65e 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -2434,7 +2434,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
+ * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
+ * gsi_top if ioapic_dynirq_base hasn't been initialized yet.
+ */
+- return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
++ if (!ioapic_initialized)
++ return gsi_top;
++ /*
++ * For DT enabled machines ioapic_dynirq_base is irrelevant and not
++ * updated. So simply return @from if ioapic_dynirq_base == 0.
++ */
++ return ioapic_dynirq_base ? : from;
+ }
+
+ #ifdef CONFIG_X86_32
+diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
+index 2e2efa577437..8c37294f1d1e 100644
+--- a/drivers/atm/Kconfig
++++ b/drivers/atm/Kconfig
+@@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI
+ make the card work).
+
+ config ATM_NICSTAR_USE_IDT77105
+- bool "Use IDT77015 PHY driver (25Mbps)"
++ bool "Use IDT77105 PHY driver (25Mbps)"
+ depends on ATM_NICSTAR
+ help
+ Support for the PHYsical layer chip in ForeRunner LE25 cards. In
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index fee57f7f3821..81ac7805397d 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -3780,7 +3780,7 @@ static int compat_getdrvprm(int drive,
+ v.native_format = UDP->native_format;
+ mutex_unlock(&floppy_mutex);
+
+- if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
++ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
+ return -EFAULT;
+ return 0;
+ }
+@@ -3816,7 +3816,7 @@ static int compat_getdrvstat(int drive, bool poll,
+ v.bufblocks = UDRS->bufblocks;
+ mutex_unlock(&floppy_mutex);
+
+- if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
++ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
+ return -EFAULT;
+ return 0;
+ Eintr:
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index b72741668c92..0d122440d111 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -853,7 +853,7 @@ static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
+ *best_mode = SYSC_IDLE_SMART_WKUP;
+ else if (idlemodes & BIT(SYSC_IDLE_SMART))
+ *best_mode = SYSC_IDLE_SMART;
+- else if (idlemodes & SYSC_IDLE_FORCE)
++ else if (idlemodes & BIT(SYSC_IDLE_FORCE))
+ *best_mode = SYSC_IDLE_FORCE;
+ else
+ return -EINVAL;
+@@ -1127,7 +1127,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
+ SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
+ 0xffff00f0, 0),
+- SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0),
++ SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0),
++ SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0),
+ SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
+ SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
+ SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
+@@ -1388,10 +1389,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata)
+ if (error)
+ return 0;
+
+- if (val)
+- ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
+- else
+- ddata->cfg.sysc_val = ddata->cap->sysc_mask;
++ ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
+
+ return 0;
+ }
+@@ -2081,27 +2079,27 @@ static int sysc_probe(struct platform_device *pdev)
+
+ error = sysc_init_dts_quirks(ddata);
+ if (error)
+- goto unprepare;
++ return error;
+
+ error = sysc_map_and_check_registers(ddata);
+ if (error)
+- goto unprepare;
++ return error;
+
+ error = sysc_init_sysc_mask(ddata);
+ if (error)
+- goto unprepare;
++ return error;
+
+ error = sysc_init_idlemodes(ddata);
+ if (error)
+- goto unprepare;
++ return error;
+
+ error = sysc_init_syss_mask(ddata);
+ if (error)
+- goto unprepare;
++ return error;
+
+ error = sysc_init_pdata(ddata);
+ if (error)
+- goto unprepare;
++ return error;
+
+ sysc_init_early_quirks(ddata);
+
+@@ -2111,7 +2109,7 @@ static int sysc_probe(struct platform_device *pdev)
+
+ error = sysc_init_resets(ddata);
+ if (error)
+- return error;
++ goto unprepare;
+
+ error = sysc_init_module(ddata);
+ if (error)
+diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
+index 54de669c38b8..f1d89bdebdda 100644
+--- a/drivers/dma/sh/rcar-dmac.c
++++ b/drivers/dma/sh/rcar-dmac.c
+@@ -192,6 +192,7 @@ struct rcar_dmac_chan {
+ * @iomem: remapped I/O memory base
+ * @n_channels: number of available channels
+ * @channels: array of DMAC channels
++ * @channels_mask: bitfield of which DMA channels are managed by this driver
+ * @modules: bitmask of client modules in use
+ */
+ struct rcar_dmac {
+@@ -202,6 +203,7 @@ struct rcar_dmac {
+
+ unsigned int n_channels;
+ struct rcar_dmac_chan *channels;
++ unsigned int channels_mask;
+
+ DECLARE_BITMAP(modules, 256);
+ };
+@@ -438,7 +440,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
+ u16 dmaor;
+
+ /* Clear all channels and enable the DMAC globally. */
+- rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0));
++ rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
+ rcar_dmac_write(dmac, RCAR_DMAOR,
+ RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
+
+@@ -814,6 +816,9 @@ static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
+ for (i = 0; i < dmac->n_channels; ++i) {
+ struct rcar_dmac_chan *chan = &dmac->channels[i];
+
++ if (!(dmac->channels_mask & BIT(i)))
++ continue;
++
+ /* Stop and reinitialize the channel. */
+ spin_lock_irq(&chan->lock);
+ rcar_dmac_chan_halt(chan);
+@@ -1776,6 +1781,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
+ return 0;
+ }
+
++#define RCAR_DMAC_MAX_CHANNELS 32
++
+ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
+ {
+ struct device_node *np = dev->of_node;
+@@ -1787,12 +1794,16 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
+ return ret;
+ }
+
+- if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
++ /* The hardware and driver don't support more than 32 bits in CHCLR */
++ if (dmac->n_channels <= 0 ||
++ dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
+ dev_err(dev, "invalid number of channels %u\n",
+ dmac->n_channels);
+ return -EINVAL;
+ }
+
++ dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
++
+ return 0;
+ }
+
+@@ -1802,7 +1813,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
+ DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
+ DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
+ DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
+- unsigned int channels_offset = 0;
+ struct dma_device *engine;
+ struct rcar_dmac *dmac;
+ struct resource *mem;
+@@ -1831,10 +1841,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
+ * level we can't disable it selectively, so ignore channel 0 for now if
+ * the device is part of an IOMMU group.
+ */
+- if (device_iommu_mapped(&pdev->dev)) {
+- dmac->n_channels--;
+- channels_offset = 1;
+- }
++ if (device_iommu_mapped(&pdev->dev))
++ dmac->channels_mask &= ~BIT(0);
+
+ dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
+ sizeof(*dmac->channels), GFP_KERNEL);
+@@ -1892,8 +1900,10 @@ static int rcar_dmac_probe(struct platform_device *pdev)
+ INIT_LIST_HEAD(&engine->channels);
+
+ for (i = 0; i < dmac->n_channels; ++i) {
+- ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i],
+- i + channels_offset);
++ if (!(dmac->channels_mask & BIT(i)))
++ continue;
++
++ ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
+ if (ret < 0)
+ goto error;
+ }
+diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
+index baac476c8622..525dc7338fe3 100644
+--- a/drivers/dma/sprd-dma.c
++++ b/drivers/dma/sprd-dma.c
+@@ -908,6 +908,7 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct dma_slave_config *slave_cfg = &schan->slave_cfg;
+ dma_addr_t src = 0, dst = 0;
++ dma_addr_t start_src = 0, start_dst = 0;
+ struct sprd_dma_desc *sdesc;
+ struct scatterlist *sg;
+ u32 len = 0;
+@@ -954,6 +955,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ dst = sg_dma_address(sg);
+ }
+
++ if (!i) {
++ start_src = src;
++ start_dst = dst;
++ }
++
+ /*
+ * The link-list mode needs at least 2 link-list
+ * configurations. If there is only one sg, it doesn't
+@@ -970,8 +976,8 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ }
+ }
+
+- ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
+- dir, flags, slave_cfg);
++ ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src,
++ start_dst, len, dir, flags, slave_cfg);
+ if (ret) {
+ kfree(sdesc);
+ return NULL;
+diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
+index ad2f0a4cd6a4..f255056696ee 100644
+--- a/drivers/dma/ti/dma-crossbar.c
++++ b/drivers/dma/ti/dma-crossbar.c
+@@ -391,8 +391,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
+
+ ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
+ nelm * 2);
+- if (ret)
++ if (ret) {
++ kfree(rsv_events);
+ return ret;
++ }
+
+ for (i = 0; i < nelm; i++) {
+ ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
+diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
+index ba27802efcd0..d07c0d5de7a2 100644
+--- a/drivers/dma/ti/omap-dma.c
++++ b/drivers/dma/ti/omap-dma.c
+@@ -1540,8 +1540,10 @@ static int omap_dma_probe(struct platform_device *pdev)
+
+ rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
+ IRQF_SHARED, "omap-dma-engine", od);
+- if (rc)
++ if (rc) {
++ omap_dma_free(od);
+ return rc;
++ }
+ }
+
+ if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
+diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
+index fd5212c395c0..34d48618f3fc 100644
+--- a/drivers/firmware/google/vpd.c
++++ b/drivers/firmware/google/vpd.c
+@@ -92,8 +92,8 @@ static int vpd_section_check_key_name(const u8 *key, s32 key_len)
+ return VPD_OK;
+ }
+
+-static int vpd_section_attrib_add(const u8 *key, s32 key_len,
+- const u8 *value, s32 value_len,
++static int vpd_section_attrib_add(const u8 *key, u32 key_len,
++ const u8 *value, u32 value_len,
+ void *arg)
+ {
+ int ret;
+diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c
+index c62fa7063a7c..584d0d56491f 100644
+--- a/drivers/firmware/google/vpd_decode.c
++++ b/drivers/firmware/google/vpd_decode.c
+@@ -11,8 +11,8 @@
+
+ #include "vpd_decode.h"
+
+-static int vpd_decode_len(const s32 max_len, const u8 *in,
+- s32 *length, s32 *decoded_len)
++static int vpd_decode_len(const u32 max_len, const u8 *in,
++ u32 *length, u32 *decoded_len)
+ {
+ u8 more;
+ int i = 0;
+@@ -32,18 +32,39 @@ static int vpd_decode_len(const s32 max_len, const u8 *in,
+ } while (more);
+
+ *decoded_len = i;
++ return VPD_OK;
++}
++
++static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
++ u32 *_consumed, const u8 **entry, u32 *entry_len)
++{
++ u32 decoded_len;
++ u32 consumed = *_consumed;
++
++ if (vpd_decode_len(max_len - consumed, &input_buf[consumed],
++ entry_len, &decoded_len) != VPD_OK)
++ return VPD_FAIL;
++ if (max_len - consumed < decoded_len)
++ return VPD_FAIL;
++
++ consumed += decoded_len;
++ *entry = input_buf + consumed;
++
++ /* entry_len is untrusted data and must be checked again. */
++ if (max_len - consumed < *entry_len)
++ return VPD_FAIL;
+
++ consumed += decoded_len;
++ *_consumed = consumed;
+ return VPD_OK;
+ }
+
+-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
++int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
+ vpd_decode_callback callback, void *callback_arg)
+ {
+ int type;
+- int res;
+- s32 key_len;
+- s32 value_len;
+- s32 decoded_len;
++ u32 key_len;
++ u32 value_len;
+ const u8 *key;
+ const u8 *value;
+
+@@ -58,26 +79,14 @@ int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
+ case VPD_TYPE_STRING:
+ (*consumed)++;
+
+- /* key */
+- res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
+- &key_len, &decoded_len);
+- if (res != VPD_OK || *consumed + decoded_len >= max_len)
++ if (vpd_decode_entry(max_len, input_buf, consumed, &key,
++ &key_len) != VPD_OK)
+ return VPD_FAIL;
+
+- *consumed += decoded_len;
+- key = &input_buf[*consumed];
+- *consumed += key_len;
+-
+- /* value */
+- res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed],
+- &value_len, &decoded_len);
+- if (res != VPD_OK || *consumed + decoded_len > max_len)
++ if (vpd_decode_entry(max_len, input_buf, consumed, &value,
++ &value_len) != VPD_OK)
+ return VPD_FAIL;
+
+- *consumed += decoded_len;
+- value = &input_buf[*consumed];
+- *consumed += value_len;
+-
+ if (type == VPD_TYPE_STRING)
+ return callback(key, key_len, value, value_len,
+ callback_arg);
+diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h
+index cf8c2ace155a..8dbe41cac599 100644
+--- a/drivers/firmware/google/vpd_decode.h
++++ b/drivers/firmware/google/vpd_decode.h
+@@ -25,8 +25,8 @@ enum {
+ };
+
+ /* Callback for vpd_decode_string to invoke. */
+-typedef int vpd_decode_callback(const u8 *key, s32 key_len,
+- const u8 *value, s32 value_len,
++typedef int vpd_decode_callback(const u8 *key, u32 key_len,
++ const u8 *value, u32 value_len,
+ void *arg);
+
+ /*
+@@ -44,7 +44,7 @@ typedef int vpd_decode_callback(const u8 *key, s32 key_len,
+ * If one entry is successfully decoded, sends it to callback and returns the
+ * result.
+ */
+-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed,
++int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed,
+ vpd_decode_callback callback, void *callback_arg);
+
+ #endif /* __VPD_DECODE_H */
+diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
+index a13f224303c6..0221dee8dd4c 100644
+--- a/drivers/fpga/altera-ps-spi.c
++++ b/drivers/fpga/altera-ps-spi.c
+@@ -210,7 +210,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,
+ return -EIO;
+ }
+
+- if (!IS_ERR(conf->confd)) {
++ if (conf->confd) {
+ if (!gpiod_get_raw_value_cansleep(conf->confd)) {
+ dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
+ return -EIO;
+@@ -289,10 +289,13 @@ static int altera_ps_probe(struct spi_device *spi)
+ return PTR_ERR(conf->status);
+ }
+
+- conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN);
++ conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN);
+ if (IS_ERR(conf->confd)) {
+- dev_warn(&spi->dev, "Not using confd gpio: %ld\n",
+- PTR_ERR(conf->confd));
++ dev_err(&spi->dev, "Failed to get confd gpio: %ld\n",
++ PTR_ERR(conf->confd));
++ return PTR_ERR(conf->confd);
++ } else if (!conf->confd) {
++ dev_warn(&spi->dev, "Not using confd gpio");
+ }
+
+ /* Register manager with unique name */
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index a28a3d722ba2..62298ae5c81c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -535,21 +535,24 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
+ struct drm_sched_entity *entity)
+ {
+ struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
+- unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
+- struct dma_fence *other = centity->fences[idx];
++ struct dma_fence *other;
++ unsigned idx;
++ long r;
+
+- if (other) {
+- signed long r;
+- r = dma_fence_wait(other, true);
+- if (r < 0) {
+- if (r != -ERESTARTSYS)
+- DRM_ERROR("Error (%ld) waiting for fence!\n", r);
++ spin_lock(&ctx->ring_lock);
++ idx = centity->sequence & (amdgpu_sched_jobs - 1);
++ other = dma_fence_get(centity->fences[idx]);
++ spin_unlock(&ctx->ring_lock);
+
+- return r;
+- }
+- }
++ if (!other)
++ return 0;
+
+- return 0;
++ r = dma_fence_wait(other, true);
++ if (r < 0 && r != -ERESTARTSYS)
++ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
++
++ dma_fence_put(other);
++ return r;
+ }
+
+ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+index 9b9f87b84910..d98fe481cd36 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+@@ -2288,12 +2288,16 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
+ data->dpm_table.soc_table.dpm_state.soft_max_level =
+ data->dpm_table.soc_table.dpm_levels[soft_level].value;
+
+- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
++ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
++ FEATURE_DPM_UCLK_MASK |
++ FEATURE_DPM_SOCCLK_MASK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to upload boot level to highest!",
+ return ret);
+
+- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
++ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
++ FEATURE_DPM_UCLK_MASK |
++ FEATURE_DPM_SOCCLK_MASK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to upload dpm max level to highest!",
+ return ret);
+@@ -2326,12 +2330,16 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+ data->dpm_table.soc_table.dpm_state.soft_max_level =
+ data->dpm_table.soc_table.dpm_levels[soft_level].value;
+
+- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
++ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
++ FEATURE_DPM_UCLK_MASK |
++ FEATURE_DPM_SOCCLK_MASK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to upload boot level to highest!",
+ return ret);
+
+- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
++ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
++ FEATURE_DPM_UCLK_MASK |
++ FEATURE_DPM_SOCCLK_MASK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to upload dpm max level to highest!",
+ return ret);
+@@ -2342,14 +2350,54 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+
+ static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+ {
++ struct vega20_hwmgr *data =
++ (struct vega20_hwmgr *)(hwmgr->backend);
++ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
+
+- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
++ /* gfxclk soft min/max settings */
++ soft_min_level =
++ vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
++ soft_max_level =
++ vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
++
++ data->dpm_table.gfx_table.dpm_state.soft_min_level =
++ data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
++ data->dpm_table.gfx_table.dpm_state.soft_max_level =
++ data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
++
++ /* uclk soft min/max settings */
++ soft_min_level =
++ vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
++ soft_max_level =
++ vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
++
++ data->dpm_table.mem_table.dpm_state.soft_min_level =
++ data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
++ data->dpm_table.mem_table.dpm_state.soft_max_level =
++ data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
++
++ /* socclk soft min/max settings */
++ soft_min_level =
++ vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
++ soft_max_level =
++ vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
++
++ data->dpm_table.soc_table.dpm_state.soft_min_level =
++ data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
++ data->dpm_table.soc_table.dpm_state.soft_max_level =
++ data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
++
++ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
++ FEATURE_DPM_UCLK_MASK |
++ FEATURE_DPM_SOCCLK_MASK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to upload DPM Bootup Levels!",
+ return ret);
+
+- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
++ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
++ FEATURE_DPM_UCLK_MASK |
++ FEATURE_DPM_SOCCLK_MASK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to upload DPM Max Levels!",
+ return ret);
+diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
+index de0f882f0f7b..14b41de44ebc 100644
+--- a/drivers/gpu/drm/omapdrm/dss/output.c
++++ b/drivers/gpu/drm/omapdrm/dss/output.c
+@@ -4,6 +4,7 @@
+ * Author: Archit Taneja <archit@ti.com>
+ */
+
++#include <linux/bitops.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+@@ -20,7 +21,8 @@ int omapdss_device_init_output(struct omap_dss_device *out)
+ {
+ struct device_node *remote_node;
+
+- remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0);
++ remote_node = of_graph_get_remote_node(out->dev->of_node,
++ ffs(out->of_ports) - 1, 0);
+ if (!remote_node) {
+ dev_dbg(out->dev, "failed to find video sink\n");
+ return 0;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
+index b2da31310d24..09b526518f5a 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_object.c
++++ b/drivers/gpu/drm/virtio/virtgpu_object.c
+@@ -204,6 +204,7 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
++ size_t max_segment;
+
+ /* wtf swapping */
+ if (bo->pages)
+@@ -215,8 +216,13 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
+ if (!bo->pages)
+ goto out;
+
+- ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
+- nr_pages << PAGE_SHIFT, GFP_KERNEL);
++ max_segment = virtio_max_dma_size(qdev->vdev);
++ max_segment &= PAGE_MASK;
++ if (max_segment > SCATTERLIST_MAX_SEGMENT)
++ max_segment = SCATTERLIST_MAX_SEGMENT;
++ ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
++ nr_pages << PAGE_SHIFT,
++ max_segment, GFP_KERNEL);
+ if (ret)
+ goto out;
+ return 0;
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 3299b1474d1b..53bddb50aeba 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -311,14 +311,16 @@ static void wacom_feature_mapping(struct hid_device *hdev,
+ /* leave touch_max as is if predefined */
+ if (!features->touch_max) {
+ /* read manually */
+- data = kzalloc(2, GFP_KERNEL);
++ n = hid_report_len(field->report);
++ data = hid_alloc_report_buf(field->report, GFP_KERNEL);
+ if (!data)
+ break;
+ data[0] = field->report->id;
+ ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
+- data, 2, WAC_CMD_RETRIES);
+- if (ret == 2) {
+- features->touch_max = data[1];
++ data, n, WAC_CMD_RETRIES);
++ if (ret == n) {
++ ret = hid_report_raw_event(hdev,
++ HID_FEATURE_REPORT, data, n, 0);
+ } else {
+ features->touch_max = 16;
+ hid_warn(hdev, "wacom_feature_mapping: "
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 53ed51adb8ac..58719461850d 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -2510,6 +2510,7 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
++ struct wacom_features *features = &wacom->wacom_wac.features;
+
+ switch (equivalent_usage) {
+ case HID_GD_X:
+@@ -2530,6 +2531,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
+ case HID_DG_TIPSWITCH:
+ wacom_wac->hid_data.tipswitch = value;
+ break;
++ case HID_DG_CONTACTMAX:
++ features->touch_max = value;
++ return;
+ }
+
+
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index ad1681872e39..b99322d83f48 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -801,7 +801,10 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
+
+ static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
+ {
+- u32 val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
++ u32 val;
++
++ /* We do not support the SMBUS Quick command */
++ val = I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+
+ if (adap->algo->reg_slave)
+ val |= I2C_FUNC_SLAVE;
+diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
+index e7f9305b2dd9..f5f001738df5 100644
+--- a/drivers/i2c/busses/i2c-designware-slave.c
++++ b/drivers/i2c/busses/i2c-designware-slave.c
+@@ -94,6 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
+
+ dev->disable_int(dev);
+ dev->disable(dev);
++ synchronize_irq(dev->irq);
+ dev->slave = NULL;
+ pm_runtime_put(dev->dev);
+
+diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
+index 252edb433fdf..29eae1bf4f86 100644
+--- a/drivers/i2c/busses/i2c-mt65xx.c
++++ b/drivers/i2c/busses/i2c-mt65xx.c
+@@ -234,6 +234,10 @@ static const struct i2c_adapter_quirks mt7622_i2c_quirks = {
+ .max_num_msgs = 255,
+ };
+
++static const struct i2c_adapter_quirks mt8183_i2c_quirks = {
++ .flags = I2C_AQ_NO_ZERO_LEN,
++};
++
+ static const struct mtk_i2c_compatible mt2712_compat = {
+ .regs = mt_i2c_regs_v1,
+ .pmic_i2c = 0,
+@@ -298,6 +302,7 @@ static const struct mtk_i2c_compatible mt8173_compat = {
+ };
+
+ static const struct mtk_i2c_compatible mt8183_compat = {
++ .quirks = &mt8183_i2c_quirks,
+ .regs = mt_i2c_regs_v2,
+ .pmic_i2c = 0,
+ .dcm = 0,
+@@ -870,7 +875,11 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
+
+ static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
+ {
+- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
++ if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
++ return I2C_FUNC_I2C |
++ (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
++ else
++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ }
+
+ static const struct i2c_algorithm mtk_i2c_algorithm = {
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index 420efaab3860..e78c20d7df41 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -1357,7 +1357,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
+ { "ELAN0618", 0 },
+ { "ELAN0619", 0 },
+ { "ELAN061A", 0 },
+- { "ELAN061B", 0 },
++/* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */
+ { "ELAN061C", 0 },
+ { "ELAN061D", 0 },
+ { "ELAN061E", 0 },
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index dce1d8d2e8a4..3e687f18b203 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
+ iommu_completion_wait(iommu);
+ }
+
++static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
++{
++ struct iommu_cmd cmd;
++
++ build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
++ dom_id, 1);
++ iommu_queue_command(iommu, &cmd);
++
++ iommu_completion_wait(iommu);
++}
++
+ static void amd_iommu_flush_all(struct amd_iommu *iommu)
+ {
+ struct iommu_cmd cmd;
+@@ -1414,18 +1425,21 @@ static void free_pagetable(struct protection_domain *domain)
+ * another level increases the size of the address space by 9 bits to a size up
+ * to 64 bits.
+ */
+-static bool increase_address_space(struct protection_domain *domain,
++static void increase_address_space(struct protection_domain *domain,
+ gfp_t gfp)
+ {
++ unsigned long flags;
+ u64 *pte;
+
+- if (domain->mode == PAGE_MODE_6_LEVEL)
++ spin_lock_irqsave(&domain->lock, flags);
++
++ if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
+ /* address space already 64 bit large */
+- return false;
++ goto out;
+
+ pte = (void *)get_zeroed_page(gfp);
+ if (!pte)
+- return false;
++ goto out;
+
+ *pte = PM_LEVEL_PDE(domain->mode,
+ iommu_virt_to_phys(domain->pt_root));
+@@ -1433,7 +1447,10 @@ static bool increase_address_space(struct protection_domain *domain,
+ domain->mode += 1;
+ domain->updated = true;
+
+- return true;
++out:
++ spin_unlock_irqrestore(&domain->lock, flags);
++
++ return;
+ }
+
+ static u64 *alloc_pte(struct protection_domain *domain,
+@@ -1863,6 +1880,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
+ {
+ u64 pte_root = 0;
+ u64 flags = 0;
++ u32 old_domid;
+
+ if (domain->mode != PAGE_MODE_NONE)
+ pte_root = iommu_virt_to_phys(domain->pt_root);
+@@ -1912,8 +1930,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
+ flags &= ~DEV_DOMID_MASK;
+ flags |= domain->id;
+
++ old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
+ amd_iommu_dev_table[devid].data[1] = flags;
+ amd_iommu_dev_table[devid].data[0] = pte_root;
++
++ /*
++ * A kdump kernel might be replacing a domain ID that was copied from
++ * the previous kernel--if so, it needs to flush the translation cache
++ * entries for the old domain ID that is being overwritten
++ */
++ if (old_domid) {
++ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
++
++ amd_iommu_flush_tlb_domid(iommu, old_domid);
++ }
+ }
+
+ static void clear_dte_entry(u16 devid)
+diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
+index eceaa7e968ae..641dc223c97b 100644
+--- a/drivers/iommu/intel-svm.c
++++ b/drivers/iommu/intel-svm.c
+@@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
+ }
+
+ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
+- unsigned long address, unsigned long pages, int ih, int gl)
++ unsigned long address, unsigned long pages, int ih)
+ {
+ struct qi_desc desc;
+
+- if (pages == -1) {
+- /* For global kernel pages we have to flush them in *all* PASIDs
+- * because that's the only option the hardware gives us. Despite
+- * the fact that they are actually only accessible through one. */
+- if (gl)
+- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
+- QI_EIOTLB_DID(sdev->did) |
+- QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) |
+- QI_EIOTLB_TYPE;
+- else
+- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
+- QI_EIOTLB_DID(sdev->did) |
+- QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+- QI_EIOTLB_TYPE;
++ /*
++ * Do PASID granu IOTLB invalidation if page selective capability is
++ * not available.
++ */
++ if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
++ desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
++ QI_EIOTLB_DID(sdev->did) |
++ QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
++ QI_EIOTLB_TYPE;
+ desc.qw1 = 0;
+ } else {
+ int mask = ilog2(__roundup_pow_of_two(pages));
+@@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
+ QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
+ QI_EIOTLB_TYPE;
+ desc.qw1 = QI_EIOTLB_ADDR(address) |
+- QI_EIOTLB_GL(gl) |
+ QI_EIOTLB_IH(ih) |
+ QI_EIOTLB_AM(mask);
+ }
+@@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
+ }
+
+ static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
+- unsigned long pages, int ih, int gl)
++ unsigned long pages, int ih)
+ {
+ struct intel_svm_dev *sdev;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdev, &svm->devs, list)
+- intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
++ intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
+ rcu_read_unlock();
+ }
+
+@@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn,
+ struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+
+ intel_flush_svm_range(svm, start,
+- (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
++ (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
+ }
+
+ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
+@@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdev, &svm->devs, list) {
+ intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
+- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
++ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
+ }
+ rcu_read_unlock();
+
+@@ -410,7 +404,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
+ * large and has to be physically contiguous. So it's
+ * hard to be as defensive as we might like. */
+ intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
+- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
++ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
+ kfree_rcu(sdev, rcu);
+
+ if (list_empty(&svm->devs)) {
+diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
+index b9dad0accd1b..d855e9c09c08 100644
+--- a/drivers/media/platform/stm32/stm32-dcmi.c
++++ b/drivers/media/platform/stm32/stm32-dcmi.c
+@@ -1702,7 +1702,7 @@ static int dcmi_probe(struct platform_device *pdev)
+ if (irq <= 0) {
+ if (irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Could not get irq\n");
+- return irq;
++ return irq ? irq : -ENXIO;
+ }
+
+ dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
+index c659e18b358b..676d233d46d5 100644
+--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
++++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
+@@ -608,10 +608,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
+ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
+ {
+ struct technisat_usb2_state *state = d->priv;
+- u8 *buf = state->buf;
+- u8 *b;
+- int ret;
+ struct ir_raw_event ev;
++ u8 *buf = state->buf;
++ int i, ret;
+
+ buf[0] = GET_IR_DATA_VENDOR_REQUEST;
+ buf[1] = 0x08;
+@@ -647,26 +646,25 @@ unlock:
+ return 0; /* no key pressed */
+
+ /* decoding */
+- b = buf+1;
+
+ #if 0
+ deb_rc("RC: %d ", ret);
+- debug_dump(b, ret, deb_rc);
++ debug_dump(buf + 1, ret, deb_rc);
+ #endif
+
+ ev.pulse = 0;
+- while (1) {
+- ev.pulse = !ev.pulse;
+- ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000;
+- ir_raw_event_store(d->rc_dev, &ev);
+-
+- b++;
+- if (*b == 0xff) {
++ for (i = 1; i < ARRAY_SIZE(state->buf); i++) {
++ if (buf[i] == 0xff) {
+ ev.pulse = 0;
+ ev.duration = 888888*2;
+ ir_raw_event_store(d->rc_dev, &ev);
+ break;
+ }
++
++ ev.pulse = !ev.pulse;
++ ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR *
++ FIRMWARE_CLOCK_TICK) / 1000;
++ ir_raw_event_store(d->rc_dev, &ev);
+ }
+
+ ir_raw_event_handle(d->rc_dev);
+diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
+index e4d2dcd5cc0f..19c90fa9e443 100644
+--- a/drivers/media/usb/tm6000/tm6000-dvb.c
++++ b/drivers/media/usb/tm6000/tm6000-dvb.c
+@@ -97,6 +97,7 @@ static void tm6000_urb_received(struct urb *urb)
+ printk(KERN_ERR "tm6000: error %s\n", __func__);
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
++ dev->dvb->bulk_urb = NULL;
+ }
+ }
+ }
+@@ -127,6 +128,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
+ dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL);
+ if (!dvb->bulk_urb->transfer_buffer) {
+ usb_free_urb(dvb->bulk_urb);
++ dvb->bulk_urb = NULL;
+ return -ENOMEM;
+ }
+
+@@ -153,6 +155,7 @@ static int tm6000_start_stream(struct tm6000_core *dev)
+
+ kfree(dvb->bulk_urb->transfer_buffer);
+ usb_free_urb(dvb->bulk_urb);
++ dvb->bulk_urb = NULL;
+ return ret;
+ }
+
+diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
+index 75178624d3f5..fb15f255a1db 100644
+--- a/drivers/net/dsa/microchip/ksz9477_spi.c
++++ b/drivers/net/dsa/microchip/ksz9477_spi.c
+@@ -157,6 +157,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
+ { .compatible = "microchip,ksz9897" },
+ { .compatible = "microchip,ksz9893" },
+ { .compatible = "microchip,ksz9563" },
++ { .compatible = "microchip,ksz8563" },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index b41f23679a08..7ce9c69e9c44 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -469,13 +469,19 @@ static int __init xgbe_mod_init(void)
+
+ ret = xgbe_platform_init();
+ if (ret)
+- return ret;
++ goto err_platform_init;
+
+ ret = xgbe_pci_init();
+ if (ret)
+- return ret;
++ goto err_pci_init;
+
+ return 0;
++
++err_pci_init:
++ xgbe_platform_exit();
++err_platform_init:
++ unregister_netdevice_notifier(&xgbe_netdev_notifier);
++ return ret;
+ }
+
+ static void __exit xgbe_mod_exit(void)
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+index 1fff462a4175..3dbf3ff1c450 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+@@ -431,7 +431,8 @@ int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
+ if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
+ break;
+ }
+- if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
++ if (rule && rule->type == aq_rx_filter_vlan &&
++ be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
+ struct ethtool_rxnfc cmd;
+
+ cmd.fs.location = rule->aq_fsp.location;
+@@ -843,7 +844,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
+ return err;
+
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+- if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) {
++ if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) {
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
+ !(aq_nic->packet_filter & IFF_PROMISC));
+ aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+index 5315df5ff6f8..4ebf083c51c5 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+@@ -61,6 +61,10 @@ static int aq_ndev_open(struct net_device *ndev)
+ if (err < 0)
+ goto err_exit;
+
++ err = aq_filters_vlans_update(aq_nic);
++ if (err < 0)
++ goto err_exit;
++
+ err = aq_nic_start(aq_nic);
+ if (err < 0)
+ goto err_exit;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+index 41172fbebddd..1a2b09065293 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+@@ -390,7 +390,7 @@ int aq_nic_start(struct aq_nic_s *self)
+ self->aq_nic_cfg.link_irq_vec);
+ err = request_threaded_irq(irqvec, NULL,
+ aq_linkstate_threaded_isr,
+- IRQF_SHARED,
++ IRQF_SHARED | IRQF_ONESHOT,
+ self->ndev->name, self);
+ if (err < 0)
+ goto err_exit;
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+index 715685aa48c3..28892b8acd0e 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+@@ -86,6 +86,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
+ }
+ }
+
++err_exit:
+ if (!was_tx_cleaned)
+ work_done = budget;
+
+@@ -95,7 +96,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
+ 1U << self->aq_ring_param.vec_idx);
+ }
+ }
+-err_exit:
++
+ return work_done;
+ }
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+index 8c1497e7d9c5..aa31948eac64 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+@@ -79,7 +79,7 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
+ n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
+ if (n != 1) {
+ err = -EPERM;
+- goto err_irq;
++ goto err_irq_vectors;
+ }
+
+ ptp_qoriq->irq = pci_irq_vector(pdev, 0);
+@@ -103,6 +103,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
+ err_no_clock:
+ free_irq(ptp_qoriq->irq, ptp_qoriq);
+ err_irq:
++ pci_free_irq_vectors(pdev);
++err_irq_vectors:
+ iounmap(base);
+ err_ioremap:
+ kfree(ptp_qoriq);
+@@ -120,6 +122,7 @@ static void enetc_ptp_remove(struct pci_dev *pdev)
+ struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev);
+
+ ptp_qoriq_free(ptp_qoriq);
++ pci_free_irq_vectors(pdev);
+ kfree(ptp_qoriq);
+
+ pci_release_mem_regions(pdev);
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+index fe879c07ae3c..fc5ea87bd387 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+@@ -11,6 +11,7 @@
+ #include <linux/io.h>
+ #include <linux/ip.h>
+ #include <linux/ipv6.h>
++#include <linux/marvell_phy.h>
+ #include <linux/module.h>
+ #include <linux/phy.h>
+ #include <linux/platform_device.h>
+@@ -1149,6 +1150,13 @@ static void hns_nic_adjust_link(struct net_device *ndev)
+ }
+ }
+
++static int hns_phy_marvell_fixup(struct phy_device *phydev)
++{
++ phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE;
++
++ return 0;
++}
++
+ /**
+ *hns_nic_init_phy - init phy
+ *@ndev: net device
+@@ -1174,6 +1182,16 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
+ phy_dev->dev_flags = 0;
+
++ /* register the PHY fixup (for Marvell 88E1510) */
++ ret = phy_register_fixup_for_uid(MARVELL_PHY_ID_88E1510,
++ MARVELL_PHY_ID_MASK,
++ hns_phy_marvell_fixup);
++ /* we can live without it, so just issue a warning */
++ if (ret)
++ netdev_warn(ndev,
++ "Cannot register PHY fixup, ret=%d\n",
++ ret);
++
+ ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
+ h->phy_if);
+ } else {
+@@ -2429,8 +2447,11 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
+ hns_nic_uninit_ring_data(priv);
+ priv->ring_data = NULL;
+
+- if (ndev->phydev)
++ if (ndev->phydev) {
++ phy_unregister_fixup_for_uid(MARVELL_PHY_ID_88E1510,
++ MARVELL_PHY_ID_MASK);
+ phy_disconnect(ndev->phydev);
++ }
+
+ if (!IS_ERR_OR_NULL(priv->ae_handle))
+ hnae_put_handle(priv->ae_handle);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 66b691b7221f..f1e0c16263a4 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -3896,6 +3896,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
+
+ hns3_client_stop(handle);
+
++ hns3_uninit_phy(netdev);
++
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ goto out_netdev_free;
+@@ -3905,8 +3907,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
+
+ hns3_clear_all_ring(handle, true);
+
+- hns3_uninit_phy(netdev);
+-
+ hns3_nic_uninit_vector_data(priv);
+
+ ret = hns3_nic_dealloc_vector_data(priv);
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index cebd20f3128d..fa4bb940665c 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1983,6 +1983,10 @@ static void __ibmvnic_reset(struct work_struct *work)
+
+ rwi = get_next_rwi(adapter);
+ while (rwi) {
++ if (adapter->state == VNIC_REMOVING ||
++ adapter->state == VNIC_REMOVED)
++ goto out;
++
+ if (adapter->force_reset_recovery) {
+ adapter->force_reset_recovery = false;
+ rc = do_hard_reset(adapter, rwi, reset_state);
+@@ -2007,7 +2011,7 @@ static void __ibmvnic_reset(struct work_struct *work)
+ netdev_dbg(adapter->netdev, "Reset failed\n");
+ free_all_rwi(adapter);
+ }
+-
++out:
+ adapter->resetting = false;
+ if (we_lock_rtnl)
+ rtnl_unlock();
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index c93a6f9b735b..7e88446ac97a 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -4924,6 +4924,13 @@ static const struct dmi_system_id msi_blacklist[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "P6T"),
+ },
+ },
++ {
++ .ident = "ASUS P6X",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "P6X"),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index 6de23b56b294..c875a2fa7596 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -1215,7 +1215,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
+ &drv_version);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed sending drv version command\n");
+- return rc;
++ goto err4;
+ }
+ }
+
+@@ -1223,6 +1223,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
+
+ return 0;
+
++err4:
++ qed_ll2_dealloc_if(cdev);
+ err3:
+ qed_hw_stop(cdev);
+ err2:
+diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
+index 7a5e6c5abb57..276c7cae7cee 100644
+--- a/drivers/net/ethernet/seeq/sgiseeq.c
++++ b/drivers/net/ethernet/seeq/sgiseeq.c
+@@ -794,15 +794,16 @@ static int sgiseeq_probe(struct platform_device *pdev)
+ printk(KERN_ERR "Sgiseeq: Cannot register net device, "
+ "aborting.\n");
+ err = -ENODEV;
+- goto err_out_free_page;
++ goto err_out_free_attrs;
+ }
+
+ printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
+
+ return 0;
+
+-err_out_free_page:
+- free_page((unsigned long) sp->srings);
++err_out_free_attrs:
++ dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
++ sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
+ err_out_free_dev:
+ free_netdev(dev);
+
+diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
+index b41696e16bdc..c20e7ef18bc9 100644
+--- a/drivers/net/ieee802154/mac802154_hwsim.c
++++ b/drivers/net/ieee802154/mac802154_hwsim.c
+@@ -802,7 +802,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
+ err = hwsim_subscribe_all_others(phy);
+ if (err < 0) {
+ mutex_unlock(&hwsim_phys_lock);
+- goto err_reg;
++ goto err_subscribe;
+ }
+ }
+ list_add_tail(&phy->list, &hwsim_phys);
+@@ -812,6 +812,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
+
+ return idx;
+
++err_subscribe:
++ ieee802154_unregister_hw(phy->hw);
+ err_reg:
+ kfree(pib);
+ err_pib:
+@@ -901,9 +903,9 @@ static __init int hwsim_init_module(void)
+ return 0;
+
+ platform_drv:
+- genl_unregister_family(&hwsim_genl_family);
+-platform_dev:
+ platform_device_unregister(mac802154hwsim_dev);
++platform_dev:
++ genl_unregister_family(&hwsim_genl_family);
+ return rc;
+ }
+
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 1a7b7bd412f9..f2553dff5b17 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -787,8 +787,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
+ ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
+ RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+ value, index, tmp, size, 500);
++ if (ret < 0)
++ memset(data, 0xff, size);
++ else
++ memcpy(data, tmp, size);
+
+- memcpy(data, tmp, size);
+ kfree(tmp);
+
+ return ret;
+diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
+index 653d347a9a19..580387f9f12a 100644
+--- a/drivers/net/wireless/marvell/mwifiex/ie.c
++++ b/drivers/net/wireless/marvell/mwifiex/ie.c
+@@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
+ }
+
+ vs_ie = (struct ieee_types_header *)vendor_ie;
++ if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
++ IEEE_MAX_IE_SIZE)
++ return -EINVAL;
+ memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
+ vs_ie, vs_ie->len + 2);
+ le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
+diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+index 18f7d9bf30b2..0939a8c8f3ab 100644
+--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
++++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+@@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
+
+ rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
+ if (rate_ie) {
++ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
++ return;
+ memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
+ rate_len = rate_ie->len;
+ }
+@@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
+ rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
+ params->beacon.tail,
+ params->beacon.tail_len);
+- if (rate_ie)
++ if (rate_ie) {
++ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
++ return;
+ memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
++ }
+
+ return;
+ }
+@@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
+ params->beacon.tail_len);
+ if (vendor_ie) {
+ wmm_ie = vendor_ie;
++ if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
++ return;
+ memcpy(&bss_cfg->wmm_info, wmm_ie +
+ sizeof(struct ieee_types_header), *(wmm_ie + 1));
+ priv->wmm_enabled = 1;
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 8d33970a2950..5f5722bf6762 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -906,7 +906,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
+ __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
+ }
+ if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+- queue->rx.rsp_cons = ++cons;
++ queue->rx.rsp_cons = ++cons + skb_queue_len(list);
+ kfree_skb(nskb);
+ return ~0U;
+ }
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index 8ffba67568ec..b7f6b1324395 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -61,6 +61,7 @@
+ USB2_OBINT_IDDIGCHG)
+
+ /* VBCTRL */
++#define USB2_VBCTRL_OCCLREN BIT(16)
+ #define USB2_VBCTRL_DRVVBUSSEL BIT(8)
+
+ /* LINECTRL1 */
+@@ -374,6 +375,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
+ writel(val, usb2_base + USB2_LINECTRL1);
+
+ val = readl(usb2_base + USB2_VBCTRL);
++ val &= ~USB2_VBCTRL_OCCLREN;
+ writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
+ val = readl(usb2_base + USB2_ADPCTRL);
+ writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 0b4f36905321..8e667967928a 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -1400,7 +1400,6 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
+
+ atmel_port->hd_start_rx = false;
+ atmel_start_rx(port);
+- return;
+ }
+
+ atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
+diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
+index 73d71a4e6c0c..f49b7d6fbc88 100644
+--- a/drivers/tty/serial/sprd_serial.c
++++ b/drivers/tty/serial/sprd_serial.c
+@@ -609,7 +609,7 @@ static inline void sprd_rx(struct uart_port *port)
+
+ if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE |
+ SPRD_LSR_FE | SPRD_LSR_OE))
+- if (handle_lsr_errors(port, &lsr, &flag))
++ if (handle_lsr_errors(port, &flag, &lsr))
+ continue;
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 9d6cb709ca7b..151a74a54386 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -921,7 +921,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ struct usb_bos_descriptor *bos;
+ struct usb_dev_cap_header *cap;
+ struct usb_ssp_cap_descriptor *ssp_cap;
+- unsigned char *buffer;
++ unsigned char *buffer, *buffer0;
+ int length, total_len, num, i, ssac;
+ __u8 cap_type;
+ int ret;
+@@ -966,10 +966,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ ret = -ENOMSG;
+ goto err;
+ }
++
++ buffer0 = buffer;
+ total_len -= length;
++ buffer += length;
+
+ for (i = 0; i < num; i++) {
+- buffer += length;
+ cap = (struct usb_dev_cap_header *)buffer;
+
+ if (total_len < sizeof(*cap) || total_len < cap->bLength) {
+@@ -983,8 +985,6 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ break;
+ }
+
+- total_len -= length;
+-
+ if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
+ dev_warn(ddev, "descriptor type invalid, skip\n");
+ continue;
+@@ -1019,7 +1019,11 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ default:
+ break;
+ }
++
++ total_len -= length;
++ buffer += length;
+ }
++ dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0);
+
+ return 0;
+
+diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
+index 294158113d62..77142f9bf26a 100644
+--- a/drivers/usb/host/xhci-tegra.c
++++ b/drivers/usb/host/xhci-tegra.c
+@@ -1217,6 +1217,16 @@ static int tegra_xusb_probe(struct platform_device *pdev)
+
+ tegra_xusb_config(tegra, regs);
+
++ /*
++ * The XUSB Falcon microcontroller can only address 40 bits, so set
++ * the DMA mask accordingly.
++ */
++ err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
++ if (err < 0) {
++ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
++ goto put_rpm;
++ }
++
+ err = tegra_xusb_load_firmware(tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 18c7c6b2fe08..85b2107e8a3d 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -2961,6 +2961,7 @@ static int
+ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
+ {
+ int rc = 0;
++ int is_domain = 0;
+ const char *delim, *payload;
+ char *desc;
+ ssize_t len;
+@@ -3008,6 +3009,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
+ rc = PTR_ERR(key);
+ goto out_err;
+ }
++ is_domain = 1;
+ }
+
+ down_read(&key->sem);
+@@ -3065,6 +3067,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
+ goto out_key_put;
+ }
+
++ /*
++ * If we have a domain key then we must set the domainName in the
++ * for the request.
++ */
++ if (is_domain && ses->domainName) {
++ vol->domainname = kstrndup(ses->domainName,
++ strlen(ses->domainName),
++ GFP_KERNEL);
++ if (!vol->domainname) {
++ cifs_dbg(FYI, "Unable to allocate %zd bytes for "
++ "domain\n", len);
++ rc = -ENOMEM;
++ kfree(vol->username);
++ vol->username = NULL;
++ kzfree(vol->password);
++ vol->password = NULL;
++ goto out_key_put;
++ }
++ }
++
+ out_key_put:
+ up_read(&key->sem);
+ key_put(key);
+diff --git a/fs/fs_parser.c b/fs/fs_parser.c
+index 0d388faa25d1..460ea4206fa2 100644
+--- a/fs/fs_parser.c
++++ b/fs/fs_parser.c
+@@ -264,6 +264,7 @@ int fs_lookup_param(struct fs_context *fc,
+ return invalf(fc, "%s: not usable as path", param->key);
+ }
+
++ f->refcnt++; /* filename_lookup() drops our ref. */
+ ret = filename_lookup(param->dirfd, f, flags, _path, NULL);
+ if (ret < 0) {
+ errorf(fc, "%s: Lookup failure for '%s'", param->key, f->name);
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 9f44ddc34c7b..3321cc7a7ead 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1483,7 +1483,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
+ if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
+ nfs_file_set_open_context(file, ctx);
+ else
+- err = -ESTALE;
++ err = -EOPENSTALE;
+ out:
+ return err;
+ }
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index c67cdbb36ce7..38d915814221 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -8,6 +8,7 @@
+ */
+
+ #include <linux/nfs_fs.h>
++#include <linux/nfs_mount.h>
+ #include <linux/nfs_page.h>
+ #include <linux/module.h>
+ #include <linux/sched/mm.h>
+@@ -928,7 +929,9 @@ retry:
+ pgm = &pgio->pg_mirrors[0];
+ pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
+
+- pgio->pg_maxretrans = io_maxretrans;
++ if (NFS_SERVER(pgio->pg_inode)->flags &
++ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
++ pgio->pg_maxretrans = io_maxretrans;
+ return;
+ out_nolseg:
+ if (pgio->pg_error < 0)
+@@ -936,6 +939,7 @@ out_nolseg:
+ out_mds:
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
++ pgio->pg_maxretrans = 0;
+ nfs_pageio_reset_read_mds(pgio);
+ }
+
+@@ -996,12 +1000,15 @@ retry:
+ pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
+ }
+
+- pgio->pg_maxretrans = io_maxretrans;
++ if (NFS_SERVER(pgio->pg_inode)->flags &
++ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
++ pgio->pg_maxretrans = io_maxretrans;
+ return;
+
+ out_mds:
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
++ pgio->pg_maxretrans = 0;
+ nfs_pageio_reset_write_mds(pgio);
+ }
+
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 81e2fdff227e..9ab9427405f3 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -773,3 +773,13 @@ static inline bool nfs_error_is_fatal(int err)
+ }
+ }
+
++static inline bool nfs_error_is_fatal_on_server(int err)
++{
++ switch (err) {
++ case 0:
++ case -ERESTARTSYS:
++ case -EINTR:
++ return false;
++ }
++ return nfs_error_is_fatal(err);
++}
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index 3a507c42c1ca..336643b82188 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ switch (err) {
+- case -EPERM:
+- case -EACCES:
+- case -EDQUOT:
+- case -ENOSPC:
+- case -EROFS:
+- goto out_put_ctx;
+ default:
++ goto out_put_ctx;
++ case -ENOENT:
++ case -ESTALE:
++ case -EISDIR:
++ case -ENOTDIR:
++ case -ELOOP:
+ goto out_drop;
+ }
+ }
+@@ -187,7 +187,11 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
+ bool same_inode = false;
+ int ret;
+
+- if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
++ /* NFS does not support deduplication. */
++ if (remap_flags & REMAP_FILE_DEDUP)
++ return -EOPNOTSUPP;
++
++ if (remap_flags & ~REMAP_FILE_ADVISORY)
+ return -EINVAL;
+
+ /* check alignment w.r.t. clone_blksize */
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 8b6211753228..eae584dbfa08 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -590,7 +590,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
+ }
+
+ hdr->res.fattr = &hdr->fattr;
+- hdr->res.count = count;
++ hdr->res.count = 0;
+ hdr->res.eof = 0;
+ hdr->res.verf = &hdr->verf;
+ nfs_fattr_init(&hdr->fattr);
+diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
+index 5552fa8b6e12..0f7288b94633 100644
+--- a/fs/nfs/proc.c
++++ b/fs/nfs/proc.c
+@@ -594,7 +594,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
+ /* Emulate the eof flag, which isn't normally needed in NFSv2
+ * as it is guaranteed to always return the file attributes
+ */
+- if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
++ if ((hdr->res.count == 0 && hdr->args.count > 0) ||
++ hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
+ hdr->res.eof = 1;
+ }
+ return 0;
+@@ -615,8 +616,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
+
+ static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
+ {
+- if (task->tk_status >= 0)
++ if (task->tk_status >= 0) {
++ hdr->res.count = hdr->args.count;
+ nfs_writeback_update_inode(hdr);
++ }
+ return 0;
+ }
+
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index c19841c82b6a..cfe0b586eadd 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -91,19 +91,25 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
+ }
+ EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
+
+-static void nfs_readpage_release(struct nfs_page *req)
++static void nfs_readpage_release(struct nfs_page *req, int error)
+ {
+ struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
++ struct page *page = req->wb_page;
+
+ dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
+ (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
+ (long long)req_offset(req));
+
++ if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
++ SetPageError(page);
+ if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
+- if (PageUptodate(req->wb_page))
+- nfs_readpage_to_fscache(inode, req->wb_page, 0);
++ struct address_space *mapping = page_file_mapping(page);
+
+- unlock_page(req->wb_page);
++ if (PageUptodate(page))
++ nfs_readpage_to_fscache(inode, page, 0);
++ else if (!PageError(page) && !PagePrivate(page))
++ generic_error_remove_page(mapping, page);
++ unlock_page(page);
+ }
+ nfs_release_request(req);
+ }
+@@ -131,7 +137,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
+ &nfs_async_read_completion_ops);
+ if (!nfs_pageio_add_request(&pgio, new)) {
+ nfs_list_remove_request(new);
+- nfs_readpage_release(new);
++ nfs_readpage_release(new, pgio.pg_error);
+ }
+ nfs_pageio_complete(&pgio);
+
+@@ -153,6 +159,7 @@ static void nfs_page_group_set_uptodate(struct nfs_page *req)
+ static void nfs_read_completion(struct nfs_pgio_header *hdr)
+ {
+ unsigned long bytes = 0;
++ int error;
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+ goto out;
+@@ -179,14 +186,19 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
+ zero_user_segment(page, start, end);
+ }
+ }
++ error = 0;
+ bytes += req->wb_bytes;
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+ if (bytes <= hdr->good_bytes)
+ nfs_page_group_set_uptodate(req);
++ else {
++ error = hdr->error;
++ xchg(&nfs_req_openctx(req)->error, error);
++ }
+ } else
+ nfs_page_group_set_uptodate(req);
+ nfs_list_remove_request(req);
+- nfs_readpage_release(req);
++ nfs_readpage_release(req, error);
+ }
+ out:
+ hdr->release(hdr);
+@@ -213,7 +225,7 @@ nfs_async_read_error(struct list_head *head, int error)
+ while (!list_empty(head)) {
+ req = nfs_list_entry(head->next);
+ nfs_list_remove_request(req);
+- nfs_readpage_release(req);
++ nfs_readpage_release(req, error);
+ }
+ }
+
+@@ -337,8 +349,13 @@ int nfs_readpage(struct file *file, struct page *page)
+ goto out;
+ }
+
++ xchg(&ctx->error, 0);
+ error = nfs_readpage_async(ctx, inode, page);
+-
++ if (!error) {
++ error = wait_on_page_locked_killable(page);
++ if (!PageUptodate(page) && !error)
++ error = xchg(&ctx->error, 0);
++ }
+ out:
+ put_nfs_open_context(ctx);
+ return error;
+@@ -372,8 +389,8 @@ readpage_async_filler(void *data, struct page *page)
+ zero_user_segment(page, len, PAGE_SIZE);
+ if (!nfs_pageio_add_request(desc->pgio, new)) {
+ nfs_list_remove_request(new);
+- nfs_readpage_release(new);
+ error = desc->pgio->pg_error;
++ nfs_readpage_release(new, error);
+ goto out;
+ }
+ return 0;
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 059a7c38bc4f..ee6932c9819e 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -57,6 +57,7 @@ static const struct rpc_call_ops nfs_commit_ops;
+ static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
+ static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
+ static const struct nfs_rw_ops nfs_rw_write_ops;
++static void nfs_inode_remove_request(struct nfs_page *req);
+ static void nfs_clear_request_commit(struct nfs_page *req);
+ static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
+ struct inode *inode);
+@@ -591,23 +592,13 @@ release_request:
+
+ static void nfs_write_error(struct nfs_page *req, int error)
+ {
++ nfs_set_pageerror(page_file_mapping(req->wb_page));
+ nfs_mapping_set_error(req->wb_page, error);
++ nfs_inode_remove_request(req);
+ nfs_end_page_writeback(req);
+ nfs_release_request(req);
+ }
+
+-static bool
+-nfs_error_is_fatal_on_server(int err)
+-{
+- switch (err) {
+- case 0:
+- case -ERESTARTSYS:
+- case -EINTR:
+- return false;
+- }
+- return nfs_error_is_fatal(err);
+-}
+-
+ /*
+ * Find an associated nfs write request, and prepare to flush it out
+ * May return an error if the user signalled nfs_wait_on_request().
+@@ -615,7 +606,6 @@ nfs_error_is_fatal_on_server(int err)
+ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
+ struct page *page)
+ {
+- struct address_space *mapping;
+ struct nfs_page *req;
+ int ret = 0;
+
+@@ -630,12 +620,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
+ WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
+
+ /* If there is a fatal error that covers this write, just exit */
+- ret = 0;
+- mapping = page_file_mapping(page);
+- if (test_bit(AS_ENOSPC, &mapping->flags) ||
+- test_bit(AS_EIO, &mapping->flags))
++ ret = pgio->pg_error;
++ if (nfs_error_is_fatal_on_server(ret))
+ goto out_launder;
+
++ ret = 0;
+ if (!nfs_pageio_add_request(pgio, req)) {
+ ret = pgio->pg_error;
+ /*
+@@ -647,6 +636,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
+ } else
+ ret = -EAGAIN;
+ nfs_redirty_request(req);
++ pgio->pg_error = 0;
+ } else
+ nfs_add_stats(page_file_mapping(page)->host,
+ NFSIOS_WRITEPAGES, 1);
+@@ -666,7 +656,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
+ ret = nfs_page_async_flush(pgio, page);
+ if (ret == -EAGAIN) {
+ redirty_page_for_writepage(wbc, page);
+- ret = 0;
++ ret = AOP_WRITEPAGE_ACTIVATE;
+ }
+ return ret;
+ }
+@@ -685,10 +675,11 @@ static int nfs_writepage_locked(struct page *page,
+ nfs_pageio_init_write(&pgio, inode, 0,
+ false, &nfs_async_write_completion_ops);
+ err = nfs_do_writepage(page, wbc, &pgio);
++ pgio.pg_error = 0;
+ nfs_pageio_complete(&pgio);
+ if (err < 0)
+ return err;
+- if (pgio.pg_error < 0)
++ if (nfs_error_is_fatal(pgio.pg_error))
+ return pgio.pg_error;
+ return 0;
+ }
+@@ -698,7 +689,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
+ int ret;
+
+ ret = nfs_writepage_locked(page, wbc);
+- unlock_page(page);
++ if (ret != AOP_WRITEPAGE_ACTIVATE)
++ unlock_page(page);
+ return ret;
+ }
+
+@@ -707,7 +699,8 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
+ int ret;
+
+ ret = nfs_do_writepage(page, wbc, data);
+- unlock_page(page);
++ if (ret != AOP_WRITEPAGE_ACTIVATE)
++ unlock_page(page);
+ return ret;
+ }
+
+@@ -734,6 +727,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
+ &nfs_async_write_completion_ops);
+ pgio.pg_io_completion = ioc;
+ err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
++ pgio.pg_error = 0;
+ nfs_pageio_complete(&pgio);
+ nfs_io_completion_put(ioc);
+
+@@ -742,7 +736,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
+ if (err < 0)
+ goto out_err;
+ err = pgio.pg_error;
+- if (err < 0)
++ if (nfs_error_is_fatal(err))
+ goto out_err;
+ return 0;
+ out_err:
+diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
+index 28a2d12a1029..a8279280e88d 100644
+--- a/fs/overlayfs/ovl_entry.h
++++ b/fs/overlayfs/ovl_entry.h
+@@ -66,6 +66,7 @@ struct ovl_fs {
+ bool workdir_locked;
+ /* Traps in ovl inode cache */
+ struct inode *upperdir_trap;
++ struct inode *workbasedir_trap;
+ struct inode *workdir_trap;
+ struct inode *indexdir_trap;
+ /* Inode numbers in all layers do not use the high xino_bits */
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index b368e2e102fa..afbcb116a7f1 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -212,6 +212,7 @@ static void ovl_free_fs(struct ovl_fs *ofs)
+ {
+ unsigned i;
+
++ iput(ofs->workbasedir_trap);
+ iput(ofs->indexdir_trap);
+ iput(ofs->workdir_trap);
+ iput(ofs->upperdir_trap);
+@@ -1003,6 +1004,25 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
+ return 0;
+ }
+
++/*
++ * Determine how we treat concurrent use of upperdir/workdir based on the
++ * index feature. This is papering over mount leaks of container runtimes,
++ * for example, an old overlay mount is leaked and now its upperdir is
++ * attempted to be used as a lower layer in a new overlay mount.
++ */
++static int ovl_report_in_use(struct ovl_fs *ofs, const char *name)
++{
++ if (ofs->config.index) {
++ pr_err("overlayfs: %s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n",
++ name);
++ return -EBUSY;
++ } else {
++ pr_warn("overlayfs: %s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n",
++ name);
++ return 0;
++ }
++}
++
+ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
+ struct path *upperpath)
+ {
+@@ -1040,14 +1060,12 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
+ upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
+ ofs->upper_mnt = upper_mnt;
+
+- err = -EBUSY;
+ if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) {
+ ofs->upperdir_locked = true;
+- } else if (ofs->config.index) {
+- pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
+- goto out;
+ } else {
+- pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
++ err = ovl_report_in_use(ofs, "upperdir");
++ if (err)
++ goto out;
+ }
+
+ err = 0;
+@@ -1157,16 +1175,19 @@ static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
+
+ ofs->workbasedir = dget(workpath.dentry);
+
+- err = -EBUSY;
+ if (ovl_inuse_trylock(ofs->workbasedir)) {
+ ofs->workdir_locked = true;
+- } else if (ofs->config.index) {
+- pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
+- goto out;
+ } else {
+- pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
++ err = ovl_report_in_use(ofs, "workdir");
++ if (err)
++ goto out;
+ }
+
++ err = ovl_setup_trap(sb, ofs->workbasedir, &ofs->workbasedir_trap,
++ "workdir");
++ if (err)
++ goto out;
++
+ err = ovl_make_workdir(sb, ofs, &workpath);
+
+ out:
+@@ -1313,16 +1334,16 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
+ if (err < 0)
+ goto out;
+
+- err = -EBUSY;
+- if (ovl_is_inuse(stack[i].dentry)) {
+- pr_err("overlayfs: lowerdir is in-use as upperdir/workdir\n");
+- goto out;
+- }
+-
+ err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir");
+ if (err)
+ goto out;
+
++ if (ovl_is_inuse(stack[i].dentry)) {
++ err = ovl_report_in_use(ofs, "lowerdir");
++ if (err)
++ goto out;
++ }
++
+ mnt = clone_private_mount(&stack[i]);
+ err = PTR_ERR(mnt);
+ if (IS_ERR(mnt)) {
+@@ -1469,8 +1490,8 @@ out_err:
+ * - another layer of this overlayfs instance
+ * - upper/work dir of any overlayfs instance
+ */
+-static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
+- const char *name)
++static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
++ struct dentry *dentry, const char *name)
+ {
+ struct dentry *next = dentry, *parent;
+ int err = 0;
+@@ -1482,13 +1503,11 @@ static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
+
+ /* Walk back ancestors to root (inclusive) looking for traps */
+ while (!err && parent != next) {
+- if (ovl_is_inuse(parent)) {
+- err = -EBUSY;
+- pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n",
+- name);
+- } else if (ovl_lookup_trap_inode(sb, parent)) {
++ if (ovl_lookup_trap_inode(sb, parent)) {
+ err = -ELOOP;
+ pr_err("overlayfs: overlapping %s path\n", name);
++ } else if (ovl_is_inuse(parent)) {
++ err = ovl_report_in_use(ofs, name);
+ }
+ next = parent;
+ parent = dget_parent(next);
+@@ -1509,7 +1528,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
+ int i, err;
+
+ if (ofs->upper_mnt) {
+- err = ovl_check_layer(sb, ofs->upper_mnt->mnt_root, "upperdir");
++ err = ovl_check_layer(sb, ofs, ofs->upper_mnt->mnt_root,
++ "upperdir");
+ if (err)
+ return err;
+
+@@ -1520,13 +1540,14 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
+ * workbasedir. In that case, we already have their traps in
+ * inode cache and we will catch that case on lookup.
+ */
+- err = ovl_check_layer(sb, ofs->workbasedir, "workdir");
++ err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir");
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ofs->numlower; i++) {
+- err = ovl_check_layer(sb, ofs->lower_layers[i].mnt->mnt_root,
++ err = ovl_check_layer(sb, ofs,
++ ofs->lower_layers[i].mnt->mnt_root,
+ "lowerdir");
+ if (err)
+ return err;
+diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
+index 6a8dd4af0147..ba8dc520cc79 100644
+--- a/include/linux/intel-iommu.h
++++ b/include/linux/intel-iommu.h
+@@ -346,7 +346,6 @@ enum {
+ #define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
+
+ #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+-#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
+ #define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
+ #define QI_EIOTLB_AM(am) (((u64)am))
+ #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
+@@ -378,8 +377,6 @@ enum {
+ #define QI_RESP_INVALID 0x1
+ #define QI_RESP_FAILURE 0xf
+
+-#define QI_GRAN_ALL_ALL 0
+-#define QI_GRAN_NONG_ALL 1
+ #define QI_GRAN_NONG_PASID 2
+ #define QI_GRAN_PSI_PASID 3
+
+diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
+index a16fbe9a2a67..aa99c73c3fbd 100644
+--- a/include/net/pkt_sched.h
++++ b/include/net/pkt_sched.h
+@@ -118,7 +118,12 @@ void __qdisc_run(struct Qdisc *q);
+ static inline void qdisc_run(struct Qdisc *q)
+ {
+ if (qdisc_run_begin(q)) {
+- __qdisc_run(q);
++ /* NOLOCK qdisc must check 'state' under the qdisc seqlock
++ * to avoid racing with dev_qdisc_reset()
++ */
++ if (!(q->flags & TCQ_F_NOLOCK) ||
++ likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
++ __qdisc_run(q);
+ qdisc_run_end(q);
+ }
+ }
+diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
+index 8a5f70c7cdf2..5e69fba181bc 100644
+--- a/include/net/sock_reuseport.h
++++ b/include/net/sock_reuseport.h
+@@ -21,7 +21,8 @@ struct sock_reuseport {
+ unsigned int synq_overflow_ts;
+ /* ID stays the same even after the size of socks[] grows. */
+ unsigned int reuseport_id;
+- bool bind_inany;
++ unsigned int bind_inany:1;
++ unsigned int has_conns:1;
+ struct bpf_prog __rcu *prog; /* optional BPF sock selector */
+ struct sock *socks[0]; /* array of sock pointers */
+ };
+@@ -35,6 +36,24 @@ extern struct sock *reuseport_select_sock(struct sock *sk,
+ struct sk_buff *skb,
+ int hdr_len);
+ extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
++
++static inline bool reuseport_has_conns(struct sock *sk, bool set)
++{
++ struct sock_reuseport *reuse;
++ bool ret = false;
++
++ rcu_read_lock();
++ reuse = rcu_dereference(sk->sk_reuseport_cb);
++ if (reuse) {
++ if (set)
++ reuse->has_conns = 1;
++ ret = reuse->has_conns;
++ }
++ rcu_read_unlock();
++
++ return ret;
++}
++
+ int reuseport_get_id(struct sock_reuseport *reuse);
+
+ #endif /* _SOCK_REUSEPORT_H */
+diff --git a/include/uapi/linux/netfilter/xt_nfacct.h b/include/uapi/linux/netfilter/xt_nfacct.h
+index 5c8a4d760ee3..b5123ab8d54a 100644
+--- a/include/uapi/linux/netfilter/xt_nfacct.h
++++ b/include/uapi/linux/netfilter/xt_nfacct.h
+@@ -11,4 +11,9 @@ struct xt_nfacct_match_info {
+ struct nf_acct *nfacct;
+ };
+
++struct xt_nfacct_match_info_v1 {
++ char name[NFACCT_NAME_MAX];
++ struct nf_acct *nfacct __attribute__((aligned(8)));
++};
++
+ #endif /* _XT_NFACCT_MATCH_H */
+diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
+index 95a260f9214b..136ce049c4ad 100644
+--- a/kernel/kallsyms.c
++++ b/kernel/kallsyms.c
+@@ -263,8 +263,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
+ {
+ char namebuf[KSYM_NAME_LEN];
+
+- if (is_ksym_addr(addr))
+- return !!get_symbol_pos(addr, symbolsize, offset);
++ if (is_ksym_addr(addr)) {
++ get_symbol_pos(addr, symbolsize, offset);
++ return 1;
++ }
+ return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
+ !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
+ }
+diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
+index fad95ef64e01..bc06e3cdfa84 100644
+--- a/net/batman-adv/bat_v_ogm.c
++++ b/net/batman-adv/bat_v_ogm.c
+@@ -631,17 +631,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
+ * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
+ * @buff_pos: current position in the skb
+ * @packet_len: total length of the skb
+- * @tvlv_len: tvlv length of the previously considered OGM
++ * @ogm2_packet: potential OGM2 in buffer
+ *
+ * Return: true if there is enough space for another OGM, false otherwise.
+ */
+-static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
+- __be16 tvlv_len)
++static bool
++batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
++ const struct batadv_ogm2_packet *ogm2_packet)
+ {
+ int next_buff_pos = 0;
+
+- next_buff_pos += buff_pos + BATADV_OGM2_HLEN;
+- next_buff_pos += ntohs(tvlv_len);
++ /* check if there is enough space for the header */
++ next_buff_pos += buff_pos + sizeof(*ogm2_packet);
++ if (next_buff_pos > packet_len)
++ return false;
++
++ /* check if there is enough space for the optional TVLV */
++ next_buff_pos += ntohs(ogm2_packet->tvlv_len);
+
+ return (next_buff_pos <= packet_len) &&
+ (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
+@@ -818,7 +824,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
+ ogm_packet = (struct batadv_ogm2_packet *)skb->data;
+
+ while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
+- ogm_packet->tvlv_len)) {
++ ogm_packet)) {
+ batadv_v_ogm_process(skb, ogm_offset, if_incoming);
+
+ ogm_offset += BATADV_OGM2_HLEN;
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index c8177a89f52c..4096d8a74a2b 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -221,7 +221,7 @@ unsigned int ebt_do_table(struct sk_buff *skb,
+ return NF_DROP;
+ }
+
+- ADD_COUNTER(*(counter_base + i), 1, skb->len);
++ ADD_COUNTER(*(counter_base + i), skb->len, 1);
+
+ /* these should only watch: not modify, nor tell us
+ * what to do with the packet
+@@ -959,8 +959,8 @@ static void get_counters(const struct ebt_counter *oldcounters,
+ continue;
+ counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
+ for (i = 0; i < nentries; i++)
+- ADD_COUNTER(counters[i], counter_base[i].pcnt,
+- counter_base[i].bcnt);
++ ADD_COUNTER(counters[i], counter_base[i].bcnt,
++ counter_base[i].pcnt);
+ }
+ }
+
+@@ -1280,7 +1280,7 @@ static int do_update_counters(struct net *net, const char *name,
+
+ /* we add to the counters of the first cpu */
+ for (i = 0; i < num_counters; i++)
+- ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt);
++ ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
+
+ write_unlock_bh(&t->lock);
+ ret = 0;
+diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
+index 5d6724cee38f..4f75df40fb12 100644
+--- a/net/ceph/crypto.c
++++ b/net/ceph/crypto.c
+@@ -136,8 +136,10 @@ void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
+ if (key) {
+ kfree(key->key);
+ key->key = NULL;
+- crypto_free_sync_skcipher(key->tfm);
+- key->tfm = NULL;
++ if (key->tfm) {
++ crypto_free_sync_skcipher(key->tfm);
++ key->tfm = NULL;
++ }
+ }
+ }
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 2ff556906b5d..828ecca03c07 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3475,18 +3475,22 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
+ qdisc_calculate_pkt_len(skb, q);
+
+ if (q->flags & TCQ_F_NOLOCK) {
+- if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+- __qdisc_drop(skb, &to_free);
+- rc = NET_XMIT_DROP;
+- } else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
+- qdisc_run_begin(q)) {
++ if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
++ qdisc_run_begin(q)) {
++ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
++ &q->state))) {
++ __qdisc_drop(skb, &to_free);
++ rc = NET_XMIT_DROP;
++ goto end_run;
++ }
+ qdisc_bstats_cpu_update(q, skb);
+
++ rc = NET_XMIT_SUCCESS;
+ if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
+ __qdisc_run(q);
+
++end_run:
+ qdisc_run_end(q);
+- rc = NET_XMIT_SUCCESS;
+ } else {
+ rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+ qdisc_run(q);
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 534c310bb089..7aee6f368754 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -8553,13 +8553,13 @@ sk_reuseport_is_valid_access(int off, int size,
+ return size == size_default;
+
+ /* Fields that allow narrowing */
+- case offsetof(struct sk_reuseport_md, eth_protocol):
++ case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
+ if (size < FIELD_SIZEOF(struct sk_buff, protocol))
+ return false;
+ /* fall through */
+- case offsetof(struct sk_reuseport_md, ip_protocol):
+- case offsetof(struct sk_reuseport_md, bind_inany):
+- case offsetof(struct sk_reuseport_md, len):
++ case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
++ case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
++ case bpf_ctx_range(struct sk_reuseport_md, len):
+ bpf_ctx_record_field_size(info, size_default);
+ return bpf_ctx_narrow_access_ok(off, size, size_default);
+
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index edd622956083..b15c0c0f6e55 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -138,8 +138,8 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
+ mutex_unlock(&flow_dissector_mutex);
+ return -ENOENT;
+ }
+- bpf_prog_put(attached);
+ RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
++ bpf_prog_put(attached);
+ mutex_unlock(&flow_dissector_mutex);
+ return 0;
+ }
+diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
+index dc4aefdf2a08..2f89777763ad 100644
+--- a/net/core/sock_reuseport.c
++++ b/net/core/sock_reuseport.c
+@@ -295,8 +295,19 @@ struct sock *reuseport_select_sock(struct sock *sk,
+
+ select_by_hash:
+ /* no bpf or invalid bpf result: fall back to hash usage */
+- if (!sk2)
+- sk2 = reuse->socks[reciprocal_scale(hash, socks)];
++ if (!sk2) {
++ int i, j;
++
++ i = j = reciprocal_scale(hash, socks);
++ while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
++ i++;
++ if (i >= reuse->num_socks)
++ i = 0;
++ if (i == j)
++ goto out;
++ }
++ sk2 = reuse->socks[i];
++ }
+ }
+
+ out:
+diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
+index 820dd8da57fc..1739b98a8f4b 100644
+--- a/net/dsa/dsa2.c
++++ b/net/dsa/dsa2.c
+@@ -577,6 +577,8 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
+ tag_protocol = ds->ops->get_tag_protocol(ds, dp->index);
+ tag_ops = dsa_tag_driver_get(tag_protocol);
+ if (IS_ERR(tag_ops)) {
++ if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
++ return -EPROBE_DEFER;
+ dev_warn(ds->dev, "No tagger for this switch\n");
+ return PTR_ERR(tag_ops);
+ }
+diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
+index 7bd29e694603..9a0fe0c2fa02 100644
+--- a/net/ipv4/datagram.c
++++ b/net/ipv4/datagram.c
+@@ -15,6 +15,7 @@
+ #include <net/sock.h>
+ #include <net/route.h>
+ #include <net/tcp_states.h>
++#include <net/sock_reuseport.h>
+
+ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ {
+@@ -69,6 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
+ }
+ inet->inet_daddr = fl4->daddr;
+ inet->inet_dport = usin->sin_port;
++ reuseport_has_conns(sk, true);
+ sk->sk_state = TCP_ESTABLISHED;
+ sk_set_txhash(sk);
+ inet->inet_id = jiffies;
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index eed59c847722..acab7738f733 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -434,12 +434,13 @@ static struct sock *udp4_lib_lookup2(struct net *net,
+ score = compute_score(sk, net, saddr, sport,
+ daddr, hnum, dif, sdif, exact_dif);
+ if (score > badness) {
+- if (sk->sk_reuseport) {
++ if (sk->sk_reuseport &&
++ sk->sk_state != TCP_ESTABLISHED) {
+ hash = udp_ehashfn(net, daddr, hnum,
+ saddr, sport);
+ result = reuseport_select_sock(sk, hash, skb,
+ sizeof(struct udphdr));
+- if (result)
++ if (result && !reuseport_has_conns(sk, false))
+ return result;
+ }
+ badness = score;
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 9d78c907b918..694168e2302e 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -27,6 +27,7 @@
+ #include <net/ip6_route.h>
+ #include <net/tcp_states.h>
+ #include <net/dsfield.h>
++#include <net/sock_reuseport.h>
+
+ #include <linux/errqueue.h>
+ #include <linux/uaccess.h>
+@@ -254,6 +255,7 @@ ipv4_connected:
+ goto out;
+ }
+
++ reuseport_has_conns(sk, true);
+ sk->sk_state = TCP_ESTABLISHED;
+ sk_set_txhash(sk);
+ out:
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index dd2d0b963260..d5779d6a6065 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -968,7 +968,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
+ if (unlikely(!tun_info ||
+ !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+ ip_tunnel_info_af(tun_info) != AF_INET6))
+- return -EINVAL;
++ goto tx_err;
+
+ key = &tun_info->key;
+ memset(&fl6, 0, sizeof(fl6));
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 70b01bd95022..1258be19e186 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -168,13 +168,14 @@ static struct sock *udp6_lib_lookup2(struct net *net,
+ score = compute_score(sk, net, saddr, sport,
+ daddr, hnum, dif, sdif, exact_dif);
+ if (score > badness) {
+- if (sk->sk_reuseport) {
++ if (sk->sk_reuseport &&
++ sk->sk_state != TCP_ESTABLISHED) {
+ hash = udp6_ehashfn(net, daddr, hnum,
+ saddr, sport);
+
+ result = reuseport_select_sock(sk, hash, skb,
+ sizeof(struct udphdr));
+- if (result)
++ if (result && !reuseport_has_conns(sk, false))
+ return result;
+ }
+ result = sk;
+diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
+index 8c6c11bab5b6..b5df6c4c159c 100644
+--- a/net/netfilter/nf_conntrack_ftp.c
++++ b/net/netfilter/nf_conntrack_ftp.c
+@@ -322,7 +322,7 @@ static int find_pattern(const char *data, size_t dlen,
+ i++;
+ }
+
+- pr_debug("Skipped up to `%c'!\n", skip);
++ pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
+
+ *numoff = i;
+ *numlen = getnum(data + i, dlen - i, cmd, term, numoff);
+diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
+index e0d392cb3075..0006503d2da9 100644
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -1037,8 +1037,13 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
+ table[NF_SYSCTL_CT_COUNT].data = &net->ct.count;
+ table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
+ table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
++ table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
++ table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper;
+ #ifdef CONFIG_NF_CONNTRACK_EVENTS
+ table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
++#endif
++#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
++ table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp;
+ #endif
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout;
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout;
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index 49248fe5847a..55106bebf2b5 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -218,7 +218,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
+ return err;
+ }
+
+- flow->timeout = (u32)jiffies;
++ flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(flow_offload_add);
+diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
+index d68c801dd614..b9e7dd6e60ce 100644
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -228,7 +228,6 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
+ {
+ skb_orphan(skb);
+ skb_dst_set_noref(skb, dst);
+- skb->tstamp = 0;
+ dst_output(state->net, state->sk, skb);
+ return NF_STOLEN;
+ }
+@@ -284,6 +283,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+ iph = ip_hdr(skb);
+ ip_decrease_ttl(iph);
++ skb->tstamp = 0;
+
+ if (unlikely(dst_xfrm(&rt->dst))) {
+ memset(skb->cb, 0, sizeof(struct inet_skb_parm));
+@@ -512,6 +512,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+ flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+ ip6h = ipv6_hdr(skb);
+ ip6h->hop_limit--;
++ skb->tstamp = 0;
+
+ if (unlikely(dst_xfrm(&rt->dst))) {
+ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index 060a4ed46d5e..01705ad74a9a 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -149,6 +149,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
+ return nft_chain_validate_hooks(ctx->chain, hook_mask);
+ }
+
++static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
++ [NFTA_FLOW_TABLE_NAME] = { .type = NLA_STRING,
++ .len = NFT_NAME_MAXLEN - 1 },
++};
++
+ static int nft_flow_offload_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+@@ -207,6 +212,7 @@ static const struct nft_expr_ops nft_flow_offload_ops = {
+ static struct nft_expr_type nft_flow_offload_type __read_mostly = {
+ .name = "flow_offload",
+ .ops = &nft_flow_offload_ops,
++ .policy = nft_flow_offload_policy,
+ .maxattr = NFTA_FLOW_MAX,
+ .owner = THIS_MODULE,
+ };
+diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
+index d0ab1adf5bff..5aab6df74e0f 100644
+--- a/net/netfilter/xt_nfacct.c
++++ b/net/netfilter/xt_nfacct.c
+@@ -54,25 +54,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par)
+ nfnl_acct_put(info->nfacct);
+ }
+
+-static struct xt_match nfacct_mt_reg __read_mostly = {
+- .name = "nfacct",
+- .family = NFPROTO_UNSPEC,
+- .checkentry = nfacct_mt_checkentry,
+- .match = nfacct_mt,
+- .destroy = nfacct_mt_destroy,
+- .matchsize = sizeof(struct xt_nfacct_match_info),
+- .usersize = offsetof(struct xt_nfacct_match_info, nfacct),
+- .me = THIS_MODULE,
++static struct xt_match nfacct_mt_reg[] __read_mostly = {
++ {
++ .name = "nfacct",
++ .revision = 0,
++ .family = NFPROTO_UNSPEC,
++ .checkentry = nfacct_mt_checkentry,
++ .match = nfacct_mt,
++ .destroy = nfacct_mt_destroy,
++ .matchsize = sizeof(struct xt_nfacct_match_info),
++ .usersize = offsetof(struct xt_nfacct_match_info, nfacct),
++ .me = THIS_MODULE,
++ },
++ {
++ .name = "nfacct",
++ .revision = 1,
++ .family = NFPROTO_UNSPEC,
++ .checkentry = nfacct_mt_checkentry,
++ .match = nfacct_mt,
++ .destroy = nfacct_mt_destroy,
++ .matchsize = sizeof(struct xt_nfacct_match_info_v1),
++ .usersize = offsetof(struct xt_nfacct_match_info_v1, nfacct),
++ .me = THIS_MODULE,
++ },
+ };
+
+ static int __init nfacct_mt_init(void)
+ {
+- return xt_register_match(&nfacct_mt_reg);
++ return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
+ }
+
+ static void __exit nfacct_mt_exit(void)
+ {
+- xt_unregister_match(&nfacct_mt_reg);
++ xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
+ }
+
+ module_init(nfacct_mt_init);
+diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
+index ead7c6022208..b92b22ce8abd 100644
+--- a/net/netfilter/xt_physdev.c
++++ b/net/netfilter/xt_physdev.c
+@@ -101,11 +101,9 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
+ if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) &&
+ (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
+ info->invert & XT_PHYSDEV_OP_BRIDGED) &&
+- par->hook_mask & ((1 << NF_INET_LOCAL_OUT) |
+- (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
++ par->hook_mask & (1 << NF_INET_LOCAL_OUT)) {
+ pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
+- if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
+- return -EINVAL;
++ return -EINVAL;
+ }
+
+ if (!brnf_probed) {
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index ac28f6a5d70e..17bd8f539bc7 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -985,6 +985,9 @@ static void qdisc_destroy(struct Qdisc *qdisc)
+
+ void qdisc_put(struct Qdisc *qdisc)
+ {
++ if (!qdisc)
++ return;
++
+ if (qdisc->flags & TCQ_F_BUILTIN ||
+ !refcount_dec_and_test(&qdisc->refcnt))
+ return;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index a680d28c231e..fbb85ea24ea0 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2301,7 +2301,7 @@ call_status(struct rpc_task *task)
+ case -ECONNABORTED:
+ case -ENOTCONN:
+ rpc_force_rebind(clnt);
+- /* fall through */
++ break;
+ case -EADDRINUSE:
+ rpc_delay(task, 3*HZ);
+ /* fall through */
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 88a1de9def11..b28aaddbe08e 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -10640,9 +10640,11 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
+ hyst = wdev->cqm_config->rssi_hyst;
+ n = wdev->cqm_config->n_rssi_thresholds;
+
+- for (i = 0; i < n; i++)
++ for (i = 0; i < n; i++) {
++ i = array_index_nospec(i, n);
+ if (last < wdev->cqm_config->rssi_thresholds[i])
+ break;
++ }
+
+ low_index = i - 1;
+ if (low_index >= 0) {
+diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
+index 9c6de4f114f8..9bd7b96027c1 100644
+--- a/net/xdp/xdp_umem.c
++++ b/net/xdp/xdp_umem.c
+@@ -368,7 +368,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
+ umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
+ if (!umem->pages) {
+ err = -ENOMEM;
+- goto out_account;
++ goto out_pin;
+ }
+
+ for (i = 0; i < umem->npgs; i++)
+@@ -376,6 +376,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
+
+ return 0;
+
++out_pin:
++ xdp_umem_unpin_pages(umem);
+ out_account:
+ xdp_umem_unaccount_pages(umem);
+ return err;
+diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
+index a7a36209a193..6c2c05a75b54 100755
+--- a/scripts/decode_stacktrace.sh
++++ b/scripts/decode_stacktrace.sh
+@@ -85,7 +85,7 @@ parse_symbol() {
+ fi
+
+ # Strip out the base of the path
+- code=${code//^$basepath/""}
++ code=${code#$basepath/}
+
+ # In the case of inlines, move everything to same line
+ code=${code//$'\n'/' '}
+diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
+index e45b5cf3b97f..8491becb5727 100644
+--- a/security/keys/request_key_auth.c
++++ b/security/keys/request_key_auth.c
+@@ -66,6 +66,9 @@ static void request_key_auth_describe(const struct key *key,
+ {
+ struct request_key_auth *rka = get_request_key_auth(key);
+
++ if (!rka)
++ return;
++
+ seq_puts(m, "key:");
+ seq_puts(m, key->description);
+ if (key_is_positive(key))
+@@ -83,6 +86,9 @@ static long request_key_auth_read(const struct key *key,
+ size_t datalen;
+ long ret;
+
++ if (!rka)
++ return -EKEYREVOKED;
++
+ datalen = rka->callout_len;
+ ret = datalen;
+
+diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
+index 7a4e21a31523..d41651afe5f6 100644
+--- a/tools/bpf/bpftool/prog.c
++++ b/tools/bpf/bpftool/prog.c
+@@ -362,7 +362,9 @@ static int do_show(int argc, char **argv)
+ if (fd < 0)
+ return -1;
+
+- return show_prog(fd);
++ err = show_prog(fd);
++ close(fd);
++ return err;
+ }
+
+ if (argc)
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 1cd28ebf8443..5c0154cf190c 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -506,6 +506,7 @@ unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAU
+ unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
+
+ #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
++#define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME)
+ #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
+ #define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT)
+ #define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
+@@ -1287,6 +1288,14 @@ delta_core(struct core_data *new, struct core_data *old)
+ }
+ }
+
++int soft_c1_residency_display(int bic)
++{
++ if (!DO_BIC(BIC_CPU_c1) || use_c1_residency_msr)
++ return 0;
++
++ return DO_BIC_READ(bic);
++}
++
+ /*
+ * old = new - old
+ */
+@@ -1322,7 +1331,8 @@ delta_thread(struct thread_data *new, struct thread_data *old,
+
+ old->c1 = new->c1 - old->c1;
+
+- if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) {
++ if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
++ soft_c1_residency_display(BIC_Avg_MHz)) {
+ if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
+ old->aperf = new->aperf - old->aperf;
+ old->mperf = new->mperf - old->mperf;
+@@ -1774,7 +1784,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+ retry:
+ t->tsc = rdtsc(); /* we are running on local CPU of interest */
+
+- if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) {
++ if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
++ soft_c1_residency_display(BIC_Avg_MHz)) {
+ unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time;
+
+ /*
+@@ -1851,20 +1862,20 @@ retry:
+ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
+ goto done;
+
+- if (DO_BIC(BIC_CPU_c3)) {
++ if (DO_BIC(BIC_CPU_c3) || soft_c1_residency_display(BIC_CPU_c3)) {
+ if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
+ return -6;
+ }
+
+- if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) {
++ if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
+ if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
+ return -7;
+- } else if (do_knl_cstates) {
++ } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
+ if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
+ return -7;
+ }
+
+- if (DO_BIC(BIC_CPU_c7))
++ if (DO_BIC(BIC_CPU_c7) || soft_c1_residency_display(BIC_CPU_c7))
+ if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
+ return -8;
+
+@@ -2912,6 +2923,7 @@ int snapshot_cpu_lpi_us(void)
+ if (retval != 1) {
+ fprintf(stderr, "Disabling Low Power Idle CPU output\n");
+ BIC_NOT_PRESENT(BIC_CPU_LPI);
++ fclose(fp);
+ return -1;
+ }
+
+@@ -2938,6 +2950,7 @@ int snapshot_sys_lpi_us(void)
+ if (retval != 1) {
+ fprintf(stderr, "Disabling Low Power Idle System output\n");
+ BIC_NOT_PRESENT(BIC_SYS_LPI);
++ fclose(fp);
+ return -1;
+ }
+ fclose(fp);
+@@ -3209,6 +3222,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
+ break;
+ case INTEL_FAM6_HASWELL_CORE: /* HSW */
+ case INTEL_FAM6_HASWELL_X: /* HSX */
++ case INTEL_FAM6_HASWELL_ULT: /* HSW */
+ case INTEL_FAM6_HASWELL_GT3E: /* HSW */
+ case INTEL_FAM6_BROADWELL_CORE: /* BDW */
+ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
+@@ -3405,6 +3419,7 @@ int has_config_tdp(unsigned int family, unsigned int model)
+ case INTEL_FAM6_IVYBRIDGE: /* IVB */
+ case INTEL_FAM6_HASWELL_CORE: /* HSW */
+ case INTEL_FAM6_HASWELL_X: /* HSX */
++ case INTEL_FAM6_HASWELL_ULT: /* HSW */
+ case INTEL_FAM6_HASWELL_GT3E: /* HSW */
+ case INTEL_FAM6_BROADWELL_CORE: /* BDW */
+ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
+@@ -3841,6 +3856,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model)
+ case INTEL_FAM6_SANDYBRIDGE:
+ case INTEL_FAM6_IVYBRIDGE:
+ case INTEL_FAM6_HASWELL_CORE: /* HSW */
++ case INTEL_FAM6_HASWELL_ULT: /* HSW */
+ case INTEL_FAM6_HASWELL_GT3E: /* HSW */
+ case INTEL_FAM6_BROADWELL_CORE: /* BDW */
+ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
+@@ -4032,6 +4048,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
+
+ switch (model) {
+ case INTEL_FAM6_HASWELL_CORE: /* HSW */
++ case INTEL_FAM6_HASWELL_ULT: /* HSW */
+ case INTEL_FAM6_HASWELL_GT3E: /* HSW */
+ do_gfx_perf_limit_reasons = 1;
+ case INTEL_FAM6_HASWELL_X: /* HSX */
+@@ -4251,6 +4268,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
+ case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
+ case INTEL_FAM6_HASWELL_CORE: /* HSW */
+ case INTEL_FAM6_HASWELL_X: /* HSW */
++ case INTEL_FAM6_HASWELL_ULT: /* HSW */
+ case INTEL_FAM6_HASWELL_GT3E: /* HSW */
+ case INTEL_FAM6_BROADWELL_CORE: /* BDW */
+ case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
+@@ -4284,7 +4302,7 @@ int has_hsw_msrs(unsigned int family, unsigned int model)
+ return 0;
+
+ switch (model) {
+- case INTEL_FAM6_HASWELL_CORE:
++ case INTEL_FAM6_HASWELL_ULT: /* HSW */
+ case INTEL_FAM6_BROADWELL_CORE: /* BDW */
+ case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
+ case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
+@@ -4568,9 +4586,6 @@ unsigned int intel_model_duplicates(unsigned int model)
+ case INTEL_FAM6_XEON_PHI_KNM:
+ return INTEL_FAM6_XEON_PHI_KNL;
+
+- case INTEL_FAM6_HASWELL_ULT:
+- return INTEL_FAM6_HASWELL_CORE;
+-
+ case INTEL_FAM6_BROADWELL_X:
+ case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
+ return INTEL_FAM6_BROADWELL_X;
+@@ -4582,6 +4597,7 @@ unsigned int intel_model_duplicates(unsigned int model)
+ return INTEL_FAM6_SKYLAKE_MOBILE;
+
+ case INTEL_FAM6_ICELAKE_MOBILE:
++ case INTEL_FAM6_ICELAKE_NNPI:
+ return INTEL_FAM6_CANNONLAKE_MOBILE;
+ }
+ return model;
+@@ -5123,7 +5139,7 @@ int initialize_counters(int cpu_id)
+
+ void allocate_output_buffer()
+ {
+- output_buffer = calloc(1, (1 + topo.num_cpus) * 1024);
++ output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
+ outp = output_buffer;
+ if (outp == NULL)
+ err(-1, "calloc output buffer");
+diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+index 34a796b303fe..3fe1eed900d4 100644
+--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
++++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+@@ -545,7 +545,7 @@ void cmdline(int argc, char **argv)
+
+ progname = argv[0];
+
+- while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw",
++ while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:",
+ long_options, &option_index)) != -1) {
+ switch (opt) {
+ case 'a':
+@@ -1259,6 +1259,15 @@ void probe_dev_msr(void)
+ if (system("/sbin/modprobe msr > /dev/null 2>&1"))
+ err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
+ }
++
++static void get_cpuid_or_exit(unsigned int leaf,
++ unsigned int *eax, unsigned int *ebx,
++ unsigned int *ecx, unsigned int *edx)
++{
++ if (!__get_cpuid(leaf, eax, ebx, ecx, edx))
++ errx(1, "Processor not supported\n");
++}
++
+ /*
+ * early_cpuid()
+ * initialize turbo_is_enabled, has_hwp, has_epb
+@@ -1266,15 +1275,10 @@ void probe_dev_msr(void)
+ */
+ void early_cpuid(void)
+ {
+- unsigned int eax, ebx, ecx, edx, max_level;
++ unsigned int eax, ebx, ecx, edx;
+ unsigned int fms, family, model;
+
+- __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
+-
+- if (max_level < 6)
+- errx(1, "Processor not supported\n");
+-
+- __get_cpuid(1, &fms, &ebx, &ecx, &edx);
++ get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
+ family = (fms >> 8) & 0xf;
+ model = (fms >> 4) & 0xf;
+ if (family == 6 || family == 0xf)
+@@ -1288,7 +1292,7 @@ void early_cpuid(void)
+ bdx_highest_ratio = msr & 0xFF;
+ }
+
+- __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
++ get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
+ turbo_is_enabled = (eax >> 1) & 1;
+ has_hwp = (eax >> 7) & 1;
+ has_epb = (ecx >> 3) & 1;
+@@ -1306,7 +1310,7 @@ void parse_cpuid(void)
+
+ eax = ebx = ecx = edx = 0;
+
+- __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
++ get_cpuid_or_exit(0, &max_level, &ebx, &ecx, &edx);
+
+ if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
+ genuine_intel = 1;
+@@ -1315,7 +1319,7 @@ void parse_cpuid(void)
+ fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
+ (char *)&ebx, (char *)&edx, (char *)&ecx);
+
+- __get_cpuid(1, &fms, &ebx, &ecx, &edx);
++ get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
+ family = (fms >> 8) & 0xf;
+ model = (fms >> 4) & 0xf;
+ stepping = fms & 0xf;
+@@ -1340,7 +1344,7 @@ void parse_cpuid(void)
+ errx(1, "CPUID: no MSR");
+
+
+- __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
++ get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
+ /* turbo_is_enabled already set */
+ /* has_hwp already set */
+ has_hwp_notify = eax & (1 << 8);
+diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
+index f7a0744db31e..5dc109f4c097 100644
+--- a/tools/testing/selftests/bpf/config
++++ b/tools/testing/selftests/bpf/config
+@@ -34,3 +34,4 @@ CONFIG_NET_MPLS_GSO=m
+ CONFIG_MPLS_ROUTING=m
+ CONFIG_MPLS_IPTUNNEL=m
+ CONFIG_IPV6_SIT=m
++CONFIG_BPF_JIT=y
+diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
+index 2fc4625c1a15..655729004391 100644
+--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
++++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
+@@ -20,9 +20,9 @@ int main(int argc, char **argv)
+ BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_local_storage),
+- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
+- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
++ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+
+ BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
+ BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
+@@ -30,7 +30,7 @@ int main(int argc, char **argv)
+ BPF_FUNC_get_local_storage),
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
+index fb679ac3d4b0..0e6652733462 100644
+--- a/tools/testing/selftests/bpf/test_sock.c
++++ b/tools/testing/selftests/bpf/test_sock.c
+@@ -13,6 +13,7 @@
+ #include <bpf/bpf.h>
+
+ #include "cgroup_helpers.h"
++#include "bpf_endian.h"
+ #include "bpf_rlimit.h"
+ #include "bpf_util.h"
+
+@@ -232,7 +233,8 @@ static struct sock_test tests[] = {
+ /* if (ip == expected && port == expected) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_ip6[3])),
+- BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4),
++ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
++ __bpf_constant_ntohl(0x00000001), 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_port)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
+@@ -261,7 +263,8 @@ static struct sock_test tests[] = {
+ /* if (ip == expected && port == expected) */
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_ip4)),
+- BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4),
++ BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
++ __bpf_constant_ntohl(0x7F000001), 4),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock, src_port)),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
+diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
+index 5294abb3f178..8ffd07e2a160 100644
+--- a/virt/kvm/coalesced_mmio.c
++++ b/virt/kvm/coalesced_mmio.c
+@@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
+ return 1;
+ }
+
+-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
++static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
+ {
+ struct kvm_coalesced_mmio_ring *ring;
+ unsigned avail;
+@@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
+ * there is always one unused entry in the buffer
+ */
+ ring = dev->kvm->coalesced_mmio_ring;
+- avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
++ avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
+ if (avail == 0) {
+ /* full */
+ return 0;
+@@ -67,25 +67,28 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
+ {
+ struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
+ struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
++ __u32 insert;
+
+ if (!coalesced_mmio_in_range(dev, addr, len))
+ return -EOPNOTSUPP;
+
+ spin_lock(&dev->kvm->ring_lock);
+
+- if (!coalesced_mmio_has_room(dev)) {
++ insert = READ_ONCE(ring->last);
++ if (!coalesced_mmio_has_room(dev, insert) ||
++ insert >= KVM_COALESCED_MMIO_MAX) {
+ spin_unlock(&dev->kvm->ring_lock);
+ return -EOPNOTSUPP;
+ }
+
+ /* copy data in first free entry of the ring */
+
+- ring->coalesced_mmio[ring->last].phys_addr = addr;
+- ring->coalesced_mmio[ring->last].len = len;
+- memcpy(ring->coalesced_mmio[ring->last].data, val, len);
+- ring->coalesced_mmio[ring->last].pio = dev->zone.pio;
++ ring->coalesced_mmio[insert].phys_addr = addr;
++ ring->coalesced_mmio[insert].len = len;
++ memcpy(ring->coalesced_mmio[insert].data, val, len);
++ ring->coalesced_mmio[insert].pio = dev->zone.pio;
+ smp_wmb();
+- ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
++ ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
+ spin_unlock(&dev->kvm->ring_lock);
+ return 0;
+ }
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-09-19 12:05 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-09-19 12:05 UTC (permalink / raw
To: gentoo-commits
commit: e9a9d2c81f69b0ea3c3ccdc99992d8440943e754
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Sep 19 12:03:16 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Sep 19 12:03:16 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e9a9d2c8
Add FILE_LOCKING to GENTOO_LINUX config. See bug #694688.
Thanks to Marius Stoica for reporting
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
4567_distro-Gentoo-Kconfig.patch | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index d6e791a..42585b8 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -6,9 +6,9 @@
source "lib/Kconfig.debug"
+
+source "distro/Kconfig"
---- /dev/null 2018-12-28 10:40:34.089999934 -0500
-+++ b/distro/Kconfig 2018-12-28 18:54:40.467970759 -0500
-@@ -0,0 +1,147 @@
+--- /dev/null 2019-09-19 03:42:24.710222248 -0400
++++ b/distro/Kconfig 2019-09-19 07:50:44.167224787 -0400
+@@ -0,0 +1,149 @@
+menu "Gentoo Linux"
+
+config GENTOO_LINUX
@@ -91,6 +91,7 @@
+ depends on GENTOO_LINUX
+
+ select BINFMT_SCRIPT
++ select FILE_LOCKING
+
+ help
+ The init system is the first thing that loads after the kernel booted.
@@ -123,6 +124,7 @@
+ select EPOLL
+ select FANOTIFY
+ select FHANDLE
++ select FILE_LOCKING
+ select INOTIFY_USER
+ select IPV6
+ select NET
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-09-19 10:05 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-09-19 10:05 UTC (permalink / raw
To: gentoo-commits
commit: 94f39221cd730a186a590b0140ba24cc3c3334c5
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Sep 19 10:05:30 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Sep 19 10:05:30 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=94f39221
Linux ptach 5.2.16
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1015_linux-5.2.16.patch | 3120 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 3124 insertions(+)
diff --git a/0000_README b/0000_README
index e8d3287..c046e8a 100644
--- a/0000_README
+++ b/0000_README
@@ -103,6 +103,10 @@ Patch: 1014_linux-5.2.15.patch
From: https://www.kernel.org
Desc: Linux 5.2.15
+Patch: 1015_linux-5.2.16.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.16
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1015_linux-5.2.16.patch b/1015_linux-5.2.16.patch
new file mode 100644
index 0000000..7eee1f4
--- /dev/null
+++ b/1015_linux-5.2.16.patch
@@ -0,0 +1,3120 @@
+diff --git a/Makefile b/Makefile
+index 3c977aa66650..3cec03e93b40 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index 76f34346b642..8b03eb44e876 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -312,6 +312,7 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
+ {
+ unsigned long ret;
+
++ barrier_nospec();
+ allow_user_access(to, from, n);
+ ret = __copy_tofrom_user(to, from, n);
+ prevent_user_access(to, from, n);
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 9dde4d7d8704..149525b5df1b 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -1978,6 +1978,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
+ case KVM_S390_MCHK:
+ irq->u.mchk.mcic = s390int->parm64;
+ break;
++ case KVM_S390_INT_PFAULT_INIT:
++ irq->u.ext.ext_params = s390int->parm;
++ irq->u.ext.ext_params2 = s390int->parm64;
++ break;
++ case KVM_S390_RESTART:
++ case KVM_S390_INT_CLOCK_COMP:
++ case KVM_S390_INT_CPU_TIMER:
++ break;
++ default:
++ return -EINVAL;
+ }
+ return 0;
+ }
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 28ebd647784c..4934141689d2 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -1013,6 +1013,8 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
+ /* mark all the pages in active slots as dirty */
+ for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
+ ms = slots->memslots + slotnr;
++ if (!ms->dirty_bitmap)
++ return -EINVAL;
+ /*
+ * The second half of the bitmap is only used on x86,
+ * and would be wasted otherwise, so we put it to good
+@@ -4325,7 +4327,7 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
+ }
+ case KVM_S390_INTERRUPT: {
+ struct kvm_s390_interrupt s390int;
+- struct kvm_s390_irq s390irq;
++ struct kvm_s390_irq s390irq = {};
+
+ if (copy_from_user(&s390int, argp, sizeof(s390int)))
+ return -EFAULT;
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index 9825ca6a6020..5cdca4208647 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -336,25 +336,28 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
+ {
+ long err;
+
++ if (!IS_ENABLED(CONFIG_SYSVIPC))
++ return -ENOSYS;
++
+ /* No need for backward compatibility. We can start fresh... */
+ if (call <= SEMTIMEDOP) {
+ switch (call) {
+ case SEMOP:
+- err = sys_semtimedop(first, ptr,
+- (unsigned int)second, NULL);
++ err = ksys_semtimedop(first, ptr,
++ (unsigned int)second, NULL);
+ goto out;
+ case SEMTIMEDOP:
+- err = sys_semtimedop(first, ptr, (unsigned int)second,
++ err = ksys_semtimedop(first, ptr, (unsigned int)second,
+ (const struct __kernel_timespec __user *)
+- (unsigned long) fifth);
++ (unsigned long) fifth);
+ goto out;
+ case SEMGET:
+- err = sys_semget(first, (int)second, (int)third);
++ err = ksys_semget(first, (int)second, (int)third);
+ goto out;
+ case SEMCTL: {
+- err = sys_semctl(first, second,
+- (int)third | IPC_64,
+- (unsigned long) ptr);
++ err = ksys_old_semctl(first, second,
++ (int)third | IPC_64,
++ (unsigned long) ptr);
+ goto out;
+ }
+ default:
+@@ -365,18 +368,18 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
+ if (call <= MSGCTL) {
+ switch (call) {
+ case MSGSND:
+- err = sys_msgsnd(first, ptr, (size_t)second,
++ err = ksys_msgsnd(first, ptr, (size_t)second,
+ (int)third);
+ goto out;
+ case MSGRCV:
+- err = sys_msgrcv(first, ptr, (size_t)second, fifth,
++ err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
+ (int)third);
+ goto out;
+ case MSGGET:
+- err = sys_msgget((key_t)first, (int)second);
++ err = ksys_msgget((key_t)first, (int)second);
+ goto out;
+ case MSGCTL:
+- err = sys_msgctl(first, (int)second | IPC_64, ptr);
++ err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
+ goto out;
+ default:
+ err = -ENOSYS;
+@@ -396,13 +399,13 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
+ goto out;
+ }
+ case SHMDT:
+- err = sys_shmdt(ptr);
++ err = ksys_shmdt(ptr);
+ goto out;
+ case SHMGET:
+- err = sys_shmget(first, (size_t)second, (int)third);
++ err = ksys_shmget(first, (size_t)second, (int)third);
+ goto out;
+ case SHMCTL:
+- err = sys_shmctl(first, (int)second | IPC_64, ptr);
++ err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
+ goto out;
+ default:
+ err = -ENOSYS;
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 56e748a7679f..94df0868804b 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -38,6 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
+
+ REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
+ REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
++REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
+ REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
+ export REALMODE_CFLAGS
+
+diff --git a/arch/x86/boot/compressed/acpi.c b/arch/x86/boot/compressed/acpi.c
+index ad84239e595e..15255f388a85 100644
+--- a/arch/x86/boot/compressed/acpi.c
++++ b/arch/x86/boot/compressed/acpi.c
+@@ -44,17 +44,109 @@ static acpi_physical_address get_acpi_rsdp(void)
+ return addr;
+ }
+
+-/* Search EFI system tables for RSDP. */
+-static acpi_physical_address efi_get_rsdp_addr(void)
++/*
++ * Search EFI system tables for RSDP. If both ACPI_20_TABLE_GUID and
++ * ACPI_TABLE_GUID are found, take the former, which has more features.
++ */
++static acpi_physical_address
++__efi_get_rsdp_addr(unsigned long config_tables, unsigned int nr_tables,
++ bool efi_64)
+ {
+ acpi_physical_address rsdp_addr = 0;
+
+ #ifdef CONFIG_EFI
+- unsigned long systab, systab_tables, config_tables;
++ int i;
++
++ /* Get EFI tables from systab. */
++ for (i = 0; i < nr_tables; i++) {
++ acpi_physical_address table;
++ efi_guid_t guid;
++
++ if (efi_64) {
++ efi_config_table_64_t *tbl = (efi_config_table_64_t *)config_tables + i;
++
++ guid = tbl->guid;
++ table = tbl->table;
++
++ if (!IS_ENABLED(CONFIG_X86_64) && table >> 32) {
++ debug_putstr("Error getting RSDP address: EFI config table located above 4GB.\n");
++ return 0;
++ }
++ } else {
++ efi_config_table_32_t *tbl = (efi_config_table_32_t *)config_tables + i;
++
++ guid = tbl->guid;
++ table = tbl->table;
++ }
++
++ if (!(efi_guidcmp(guid, ACPI_TABLE_GUID)))
++ rsdp_addr = table;
++ else if (!(efi_guidcmp(guid, ACPI_20_TABLE_GUID)))
++ return table;
++ }
++#endif
++ return rsdp_addr;
++}
++
++/* EFI/kexec support is 64-bit only. */
++#ifdef CONFIG_X86_64
++static struct efi_setup_data *get_kexec_setup_data_addr(void)
++{
++ struct setup_data *data;
++ u64 pa_data;
++
++ pa_data = boot_params->hdr.setup_data;
++ while (pa_data) {
++ data = (struct setup_data *)pa_data;
++ if (data->type == SETUP_EFI)
++ return (struct efi_setup_data *)(pa_data + sizeof(struct setup_data));
++
++ pa_data = data->next;
++ }
++ return NULL;
++}
++
++static acpi_physical_address kexec_get_rsdp_addr(void)
++{
++ efi_system_table_64_t *systab;
++ struct efi_setup_data *esd;
++ struct efi_info *ei;
++ char *sig;
++
++ esd = (struct efi_setup_data *)get_kexec_setup_data_addr();
++ if (!esd)
++ return 0;
++
++ if (!esd->tables) {
++ debug_putstr("Wrong kexec SETUP_EFI data.\n");
++ return 0;
++ }
++
++ ei = &boot_params->efi_info;
++ sig = (char *)&ei->efi_loader_signature;
++ if (strncmp(sig, EFI64_LOADER_SIGNATURE, 4)) {
++ debug_putstr("Wrong kexec EFI loader signature.\n");
++ return 0;
++ }
++
++ /* Get systab from boot params. */
++ systab = (efi_system_table_64_t *) (ei->efi_systab | ((__u64)ei->efi_systab_hi << 32));
++ if (!systab)
++ error("EFI system table not found in kexec boot_params.");
++
++ return __efi_get_rsdp_addr((unsigned long)esd->tables, systab->nr_tables, true);
++}
++#else
++static acpi_physical_address kexec_get_rsdp_addr(void) { return 0; }
++#endif /* CONFIG_X86_64 */
++
++static acpi_physical_address efi_get_rsdp_addr(void)
++{
++#ifdef CONFIG_EFI
++ unsigned long systab, config_tables;
+ unsigned int nr_tables;
+ struct efi_info *ei;
+ bool efi_64;
+- int size, i;
+ char *sig;
+
+ ei = &boot_params->efi_info;
+@@ -88,49 +180,20 @@ static acpi_physical_address efi_get_rsdp_addr(void)
+
+ config_tables = stbl->tables;
+ nr_tables = stbl->nr_tables;
+- size = sizeof(efi_config_table_64_t);
+ } else {
+ efi_system_table_32_t *stbl = (efi_system_table_32_t *)systab;
+
+ config_tables = stbl->tables;
+ nr_tables = stbl->nr_tables;
+- size = sizeof(efi_config_table_32_t);
+ }
+
+ if (!config_tables)
+ error("EFI config tables not found.");
+
+- /* Get EFI tables from systab. */
+- for (i = 0; i < nr_tables; i++) {
+- acpi_physical_address table;
+- efi_guid_t guid;
+-
+- config_tables += size;
+-
+- if (efi_64) {
+- efi_config_table_64_t *tbl = (efi_config_table_64_t *)config_tables;
+-
+- guid = tbl->guid;
+- table = tbl->table;
+-
+- if (!IS_ENABLED(CONFIG_X86_64) && table >> 32) {
+- debug_putstr("Error getting RSDP address: EFI config table located above 4GB.\n");
+- return 0;
+- }
+- } else {
+- efi_config_table_32_t *tbl = (efi_config_table_32_t *)config_tables;
+-
+- guid = tbl->guid;
+- table = tbl->table;
+- }
+-
+- if (!(efi_guidcmp(guid, ACPI_TABLE_GUID)))
+- rsdp_addr = table;
+- else if (!(efi_guidcmp(guid, ACPI_20_TABLE_GUID)))
+- return table;
+- }
++ return __efi_get_rsdp_addr(config_tables, nr_tables, efi_64);
++#else
++ return 0;
+ #endif
+- return rsdp_addr;
+ }
+
+ static u8 compute_checksum(u8 *buffer, u32 length)
+@@ -220,6 +283,14 @@ acpi_physical_address get_rsdp_addr(void)
+ if (!pa)
+ pa = boot_params->acpi_rsdp_addr;
+
++ /*
++ * Try to get EFI data from setup_data. This can happen when we're a
++ * kexec'ed kernel and kexec(1) has passed all the required EFI info to
++ * us.
++ */
++ if (!pa)
++ pa = kexec_get_rsdp_addr();
++
+ if (!pa)
+ pa = efi_get_rsdp_addr();
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 921c609c2af7..65d49452e6e0 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -333,6 +333,7 @@ struct kvm_mmu_page {
+ int root_count; /* Currently serving as active root */
+ unsigned int unsync_children;
+ struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
++ unsigned long mmu_valid_gen;
+ DECLARE_BITMAP(unsync_child_bitmap, 512);
+
+ #ifdef CONFIG_X86_32
+@@ -851,6 +852,7 @@ struct kvm_arch {
+ unsigned long n_requested_mmu_pages;
+ unsigned long n_max_mmu_pages;
+ unsigned int indirect_shadow_pages;
++ unsigned long mmu_valid_gen;
+ struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
+ /*
+ * Hash table of struct kvm_mmu_page.
+diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c
+index 64b973f0e985..4c407833faca 100644
+--- a/arch/x86/kernel/ima_arch.c
++++ b/arch/x86/kernel/ima_arch.c
+@@ -11,10 +11,11 @@ extern struct boot_params boot_params;
+ static enum efi_secureboot_mode get_sb_mode(void)
+ {
+ efi_char16_t efi_SecureBoot_name[] = L"SecureBoot";
++ efi_char16_t efi_SetupMode_name[] = L"SecureBoot";
+ efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
+ efi_status_t status;
+ unsigned long size;
+- u8 secboot;
++ u8 secboot, setupmode;
+
+ size = sizeof(secboot);
+
+@@ -36,7 +37,14 @@ static enum efi_secureboot_mode get_sb_mode(void)
+ return efi_secureboot_mode_unknown;
+ }
+
+- if (secboot == 0) {
++ size = sizeof(setupmode);
++ status = efi.get_variable(efi_SetupMode_name, &efi_variable_guid,
++ NULL, &size, &setupmode);
++
++ if (status != EFI_SUCCESS) /* ignore unknown SetupMode */
++ setupmode = 0;
++
++ if (secboot == 0 || setupmode == 1) {
+ pr_info("ima: secureboot mode disabled\n");
+ return efi_secureboot_mode_disabled;
+ }
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 01f04db1fa61..66055ca29b6b 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -2066,6 +2066,12 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
+ if (!direct)
+ sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
+ set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
++
++ /*
++ * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
++ * depends on valid pages being added to the head of the list. See
++ * comments in kvm_zap_obsolete_pages().
++ */
+ list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
+ kvm_mod_used_mmu_pages(vcpu->kvm, +1);
+ return sp;
+@@ -2215,7 +2221,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
+ #define for_each_valid_sp(_kvm, _sp, _gfn) \
+ hlist_for_each_entry(_sp, \
+ &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
+- if ((_sp)->role.invalid) { \
++ if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) { \
+ } else
+
+ #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
+@@ -2272,6 +2278,11 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
+ static void mmu_audit_disable(void) { }
+ #endif
+
++static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
++{
++ return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
++}
++
+ static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+ struct list_head *invalid_list)
+ {
+@@ -2496,6 +2507,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
+ if (level > PT_PAGE_TABLE_LEVEL && need_sync)
+ flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
+ }
++ sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
+ clear_page(sp->spt);
+ trace_kvm_mmu_get_page(sp, true);
+
+@@ -4229,6 +4241,13 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
+ return false;
+
+ if (cached_root_available(vcpu, new_cr3, new_role)) {
++ /*
++ * It is possible that the cached previous root page is
++ * obsolete because of a change in the MMU generation
++ * number. However, changing the generation number is
++ * accompanied by KVM_REQ_MMU_RELOAD, which will free
++ * the root set here and allocate a new one.
++ */
+ kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
+ if (!skip_tlb_flush) {
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+@@ -5645,11 +5664,89 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
+ return alloc_mmu_pages(vcpu);
+ }
+
++
++static void kvm_zap_obsolete_pages(struct kvm *kvm)
++{
++ struct kvm_mmu_page *sp, *node;
++ LIST_HEAD(invalid_list);
++ int ign;
++
++restart:
++ list_for_each_entry_safe_reverse(sp, node,
++ &kvm->arch.active_mmu_pages, link) {
++ /*
++ * No obsolete valid page exists before a newly created page
++ * since active_mmu_pages is a FIFO list.
++ */
++ if (!is_obsolete_sp(kvm, sp))
++ break;
++
++ /*
++ * Do not repeatedly zap a root page to avoid unnecessary
++ * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
++ * progress:
++ * vcpu 0 vcpu 1
++ * call vcpu_enter_guest():
++ * 1): handle KVM_REQ_MMU_RELOAD
++ * and require mmu-lock to
++ * load mmu
++ * repeat:
++ * 1): zap root page and
++ * send KVM_REQ_MMU_RELOAD
++ *
++ * 2): if (cond_resched_lock(mmu-lock))
++ *
++ * 2): hold mmu-lock and load mmu
++ *
++ * 3): see KVM_REQ_MMU_RELOAD bit
++ * on vcpu->requests is set
++ * then return 1 to call
++ * vcpu_enter_guest() again.
++ * goto repeat;
++ *
++ * Since we are reversely walking the list and the invalid
++ * list will be moved to the head, skip the invalid page
++ * can help us to avoid the infinity list walking.
++ */
++ if (sp->role.invalid)
++ continue;
++
++ if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
++ kvm_mmu_commit_zap_page(kvm, &invalid_list);
++ cond_resched_lock(&kvm->mmu_lock);
++ goto restart;
++ }
++
++ if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
++ goto restart;
++ }
++
++ kvm_mmu_commit_zap_page(kvm, &invalid_list);
++}
++
++/*
++ * Fast invalidate all shadow pages and use lock-break technique
++ * to zap obsolete pages.
++ *
++ * It's required when memslot is being deleted or VM is being
++ * destroyed, in these cases, we should ensure that KVM MMU does
++ * not use any resource of the being-deleted slot or all slots
++ * after calling the function.
++ */
++static void kvm_mmu_zap_all_fast(struct kvm *kvm)
++{
++ spin_lock(&kvm->mmu_lock);
++ kvm->arch.mmu_valid_gen++;
++
++ kvm_zap_obsolete_pages(kvm);
++ spin_unlock(&kvm->mmu_lock);
++}
++
+ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ struct kvm_page_track_notifier_node *node)
+ {
+- kvm_mmu_zap_all(kvm);
++ kvm_mmu_zap_all_fast(kvm);
+ }
+
+ void kvm_mmu_init_vm(struct kvm *kvm)
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 2c7daa3b968d..4ca86e70d3b4 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -7116,13 +7116,41 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
+
+ static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
+ {
+- bool is_user, smap;
+-
+- is_user = svm_get_cpl(vcpu) == 3;
+- smap = !kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
++ unsigned long cr4 = kvm_read_cr4(vcpu);
++ bool smep = cr4 & X86_CR4_SMEP;
++ bool smap = cr4 & X86_CR4_SMAP;
++ bool is_user = svm_get_cpl(vcpu) == 3;
+
+ /*
+- * Detect and workaround Errata 1096 Fam_17h_00_0Fh
++ * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
++ *
++ * Errata:
++ * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
++ * possible that CPU microcode implementing DecodeAssist will fail
++ * to read bytes of instruction which caused #NPF. In this case,
++ * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
++ * return 0 instead of the correct guest instruction bytes.
++ *
++ * This happens because CPU microcode reading instruction bytes
++ * uses a special opcode which attempts to read data using CPL=0
++ * priviledges. The microcode reads CS:RIP and if it hits a SMAP
++ * fault, it gives up and returns no instruction bytes.
++ *
++ * Detection:
++ * We reach here in case CPU supports DecodeAssist, raised #NPF and
++ * returned 0 in GuestIntrBytes field of the VMCB.
++ * First, errata can only be triggered in case vCPU CR4.SMAP=1.
++ * Second, if vCPU CR4.SMEP=1, errata could only be triggered
++ * in case vCPU CPL==3 (Because otherwise guest would have triggered
++ * a SMEP fault instead of #NPF).
++ * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL.
++ * As most guests enable SMAP if they have also enabled SMEP, use above
++ * logic in order to attempt minimize false-positive of detecting errata
++ * while still preserving all cases semantic correctness.
++ *
++ * Workaround:
++ * To determine what instruction the guest was executing, the hypervisor
++ * will have to decode the instruction at the instruction pointer.
+ *
+ * In non SEV guest, hypervisor will be able to read the guest
+ * memory to decode the instruction pointer when insn_len is zero
+@@ -7133,11 +7161,11 @@ static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
+ * instruction pointer so we will not able to workaround it. Lets
+ * print the error and request to kill the guest.
+ */
+- if (is_user && smap) {
++ if (smap && (!smep || is_user)) {
+ if (!sev_guest(vcpu->kvm))
+ return true;
+
+- pr_err_ratelimited("KVM: Guest triggered AMD Erratum 1096\n");
++ pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+ }
+
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index b96723294b2f..74ac35bbf1ef 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4411,6 +4411,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
+ int len;
+ gva_t gva = 0;
+ struct vmcs12 *vmcs12;
++ struct x86_exception e;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+@@ -4451,7 +4452,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
+ vmx_instruction_info, true, len, &gva))
+ return 1;
+ /* _system ok, nested_vmx_check_permission has verified cpl=0 */
+- kvm_write_guest_virt_system(vcpu, gva, &field_value, len, NULL);
++ if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
++ kvm_inject_page_fault(vcpu, &e);
+ }
+
+ return nested_vmx_succeed(vcpu);
+@@ -4706,13 +4708,11 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+
+ switch (type) {
+ case VMX_EPT_EXTENT_GLOBAL:
++ case VMX_EPT_EXTENT_CONTEXT:
+ /*
+- * TODO: track mappings and invalidate
+- * single context requests appropriately
++ * TODO: Sync the necessary shadow EPT roots here, rather than
++ * at the next emulated VM-entry.
+ */
+- case VMX_EPT_EXTENT_CONTEXT:
+- kvm_mmu_sync_roots(vcpu);
+- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ break;
+ default:
+ BUG_ON(1);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 1f80fd560ede..4000bcff47b0 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5265,6 +5265,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
+ /* kvm_write_guest_virt_system can pull in tons of pages. */
+ vcpu->arch.l1tf_flush_l1d = true;
+
++ /*
++ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
++ * is returned, but our callers are not ready for that and they blindly
++ * call kvm_inject_page_fault. Ensure that they at least do not leak
++ * uninitialized kernel stack memory into cr2 and error code.
++ */
++ memset(exception, 0, sizeof(*exception));
+ return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
+ PFERR_WRITE_MASK, exception);
+ }
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index 8901a1f89cf5..10fb42da0007 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -18,37 +18,40 @@ targets += purgatory.ro
+ KASAN_SANITIZE := n
+ KCOV_INSTRUMENT := n
+
++# These are adjustments to the compiler flags used for objects that
++# make up the standalone purgatory.ro
++
++PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
++PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
++
+ # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
+ # in turn leaves some undefined symbols like __fentry__ in purgatory and not
+ # sure how to relocate those.
+ ifdef CONFIG_FUNCTION_TRACER
+-CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE)
+-CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE)
+-CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE)
+-CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE)
++PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_FTRACE)
+ endif
+
+ ifdef CONFIG_STACKPROTECTOR
+-CFLAGS_REMOVE_sha256.o += -fstack-protector
+-CFLAGS_REMOVE_purgatory.o += -fstack-protector
+-CFLAGS_REMOVE_string.o += -fstack-protector
+-CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector
++PURGATORY_CFLAGS_REMOVE += -fstack-protector
+ endif
+
+ ifdef CONFIG_STACKPROTECTOR_STRONG
+-CFLAGS_REMOVE_sha256.o += -fstack-protector-strong
+-CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong
+-CFLAGS_REMOVE_string.o += -fstack-protector-strong
+-CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong
++PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong
+ endif
+
+ ifdef CONFIG_RETPOLINE
+-CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS)
+-CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS)
+-CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS)
+-CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS)
++PURGATORY_CFLAGS_REMOVE += $(RETPOLINE_CFLAGS)
+ endif
+
++CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE)
++CFLAGS_purgatory.o += $(PURGATORY_CFLAGS)
++
++CFLAGS_REMOVE_sha256.o += $(PURGATORY_CFLAGS_REMOVE)
++CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
++
++CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
++CFLAGS_string.o += $(PURGATORY_CFLAGS)
++
+ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+ $(call if_changed,ld)
+
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index eaf3aa0cb803..2dc0123cbba1 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -1820,12 +1820,63 @@ static inline struct kobject *get_glue_dir(struct device *dev)
+ */
+ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
+ {
++ unsigned int ref;
++
+ /* see if we live in a "glue" directory */
+ if (!live_in_glue_dir(glue_dir, dev))
+ return;
+
+ mutex_lock(&gdp_mutex);
+- if (!kobject_has_children(glue_dir))
++ /**
++ * There is a race condition between removing glue directory
++ * and adding a new device under the glue directory.
++ *
++ * CPU1: CPU2:
++ *
++ * device_add()
++ * get_device_parent()
++ * class_dir_create_and_add()
++ * kobject_add_internal()
++ * create_dir() // create glue_dir
++ *
++ * device_add()
++ * get_device_parent()
++ * kobject_get() // get glue_dir
++ *
++ * device_del()
++ * cleanup_glue_dir()
++ * kobject_del(glue_dir)
++ *
++ * kobject_add()
++ * kobject_add_internal()
++ * create_dir() // in glue_dir
++ * sysfs_create_dir_ns()
++ * kernfs_create_dir_ns(sd)
++ *
++ * sysfs_remove_dir() // glue_dir->sd=NULL
++ * sysfs_put() // free glue_dir->sd
++ *
++ * // sd is freed
++ * kernfs_new_node(sd)
++ * kernfs_get(glue_dir)
++ * kernfs_add_one()
++ * kernfs_put()
++ *
++ * Before CPU1 remove last child device under glue dir, if CPU2 add
++ * a new device under glue dir, the glue_dir kobject reference count
++ * will be increase to 2 in kobject_get(k). And CPU2 has been called
++ * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
++ * and sysfs_put(). This result in glue_dir->sd is freed.
++ *
++ * Then the CPU2 will see a stale "empty" but still potentially used
++ * glue dir around in kernfs_new_node().
++ *
++ * In order to avoid this happening, we also should make sure that
++ * kernfs_node for glue_dir is released in CPU1 only when refcount
++ * for glue_dir kobj is 1.
++ */
++ ref = kref_read(&glue_dir->kref);
++ if (!kobject_has_children(glue_dir) && !--ref)
+ kobject_del(glue_dir);
+ kobject_put(glue_dir);
+ mutex_unlock(&gdp_mutex);
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 6d61f5aafc78..7954a7924923 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -1162,10 +1162,6 @@ static int btusb_open(struct hci_dev *hdev)
+ }
+
+ data->intf->needs_remote_wakeup = 1;
+- /* device specific wakeup source enabled and required for USB
+- * remote wakeup while host is suspended
+- */
+- device_wakeup_enable(&data->udev->dev);
+
+ if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
+ goto done;
+@@ -1229,7 +1225,6 @@ static int btusb_close(struct hci_dev *hdev)
+ goto failed;
+
+ data->intf->needs_remote_wakeup = 0;
+- device_wakeup_disable(&data->udev->dev);
+ usb_autopm_put_interface(data->intf);
+
+ failed:
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 3a4961dc5831..77d1d3894f8d 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -3020,15 +3020,49 @@ static int clk_flags_show(struct seq_file *s, void *data)
+ }
+ DEFINE_SHOW_ATTRIBUTE(clk_flags);
+
++static void possible_parent_show(struct seq_file *s, struct clk_core *core,
++ unsigned int i, char terminator)
++{
++ struct clk_core *parent;
++
++ /*
++ * Go through the following options to fetch a parent's name.
++ *
++ * 1. Fetch the registered parent clock and use its name
++ * 2. Use the global (fallback) name if specified
++ * 3. Use the local fw_name if provided
++ * 4. Fetch parent clock's clock-output-name if DT index was set
++ *
++ * This may still fail in some cases, such as when the parent is
++ * specified directly via a struct clk_hw pointer, but it isn't
++ * registered (yet).
++ */
++ parent = clk_core_get_parent_by_index(core, i);
++ if (parent)
++ seq_printf(s, "%s", parent->name);
++ else if (core->parents[i].name)
++ seq_printf(s, "%s", core->parents[i].name);
++ else if (core->parents[i].fw_name)
++ seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
++ else if (core->parents[i].index >= 0)
++ seq_printf(s, "%s",
++ of_clk_get_parent_name(core->of_node,
++ core->parents[i].index));
++ else
++ seq_puts(s, "(missing)");
++
++ seq_putc(s, terminator);
++}
++
+ static int possible_parents_show(struct seq_file *s, void *data)
+ {
+ struct clk_core *core = s->private;
+ int i;
+
+ for (i = 0; i < core->num_parents - 1; i++)
+- seq_printf(s, "%s ", core->parents[i].name);
++ possible_parent_show(s, core, i, ' ');
+
+- seq_printf(s, "%s\n", core->parents[i].name);
++ possible_parent_show(s, core, i, '\n');
+
+ return 0;
+ }
+diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
+index c61f4d3e52e2..2a841d38f8a7 100644
+--- a/drivers/clk/rockchip/clk-mmc-phase.c
++++ b/drivers/clk/rockchip/clk-mmc-phase.c
+@@ -52,10 +52,8 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
+ u32 delay_num = 0;
+
+ /* See the comment for rockchip_mmc_set_phase below */
+- if (!rate) {
+- pr_err("%s: invalid clk rate\n", __func__);
++ if (!rate)
+ return -EINVAL;
+- }
+
+ raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
+
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 710e09e28227..f9d7d6aaf3db 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -994,11 +994,13 @@ static void talitos_sg_unmap(struct device *dev,
+
+ static void ipsec_esp_unmap(struct device *dev,
+ struct talitos_edesc *edesc,
+- struct aead_request *areq)
++ struct aead_request *areq, bool encrypt)
+ {
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
++ unsigned int authsize = crypto_aead_authsize(aead);
++ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
+ bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
+ struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
+
+@@ -1007,7 +1009,7 @@ static void ipsec_esp_unmap(struct device *dev,
+ DMA_FROM_DEVICE);
+ unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
+
+- talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
++ talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
+ areq->assoclen);
+
+ if (edesc->dma_len)
+@@ -1018,7 +1020,7 @@ static void ipsec_esp_unmap(struct device *dev,
+ unsigned int dst_nents = edesc->dst_nents ? : 1;
+
+ sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
+- areq->assoclen + areq->cryptlen - ivsize);
++ areq->assoclen + cryptlen - ivsize);
+ }
+ }
+
+@@ -1040,7 +1042,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
+
+ edesc = container_of(desc, struct talitos_edesc, desc);
+
+- ipsec_esp_unmap(dev, edesc, areq);
++ ipsec_esp_unmap(dev, edesc, areq, true);
+
+ /* copy the generated ICV to dst */
+ if (edesc->icv_ool) {
+@@ -1074,7 +1076,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
+
+ edesc = container_of(desc, struct talitos_edesc, desc);
+
+- ipsec_esp_unmap(dev, edesc, req);
++ ipsec_esp_unmap(dev, edesc, req, false);
+
+ if (!err) {
+ char icvdata[SHA512_DIGEST_SIZE];
+@@ -1120,7 +1122,7 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
+
+ edesc = container_of(desc, struct talitos_edesc, desc);
+
+- ipsec_esp_unmap(dev, edesc, req);
++ ipsec_esp_unmap(dev, edesc, req, false);
+
+ /* check ICV auth status */
+ if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
+@@ -1223,6 +1225,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ * fill in and submit ipsec_esp descriptor
+ */
+ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
++ bool encrypt,
+ void (*callback)(struct device *dev,
+ struct talitos_desc *desc,
+ void *context, int error))
+@@ -1232,7 +1235,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ struct talitos_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ struct talitos_desc *desc = &edesc->desc;
+- unsigned int cryptlen = areq->cryptlen;
++ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int tbl_off = 0;
+ int sg_count, ret;
+@@ -1359,7 +1362,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+
+ ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
+ if (ret != -EINPROGRESS) {
+- ipsec_esp_unmap(dev, edesc, areq);
++ ipsec_esp_unmap(dev, edesc, areq, encrypt);
+ kfree(edesc);
+ }
+ return ret;
+@@ -1473,9 +1476,10 @@ static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
+ unsigned int authsize = crypto_aead_authsize(authenc);
+ struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ unsigned int ivsize = crypto_aead_ivsize(authenc);
++ unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
+
+ return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
+- iv, areq->assoclen, areq->cryptlen,
++ iv, areq->assoclen, cryptlen,
+ authsize, ivsize, icv_stashing,
+ areq->base.flags, encrypt);
+ }
+@@ -1494,7 +1498,7 @@ static int aead_encrypt(struct aead_request *req)
+ /* set encrypt */
+ edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
+
+- return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
++ return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
+ }
+
+ static int aead_decrypt(struct aead_request *req)
+@@ -1506,14 +1510,13 @@ static int aead_decrypt(struct aead_request *req)
+ struct talitos_edesc *edesc;
+ void *icvdata;
+
+- req->cryptlen -= authsize;
+-
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, req->iv, 1, false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+- if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
++ if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
++ (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
+ ((!edesc->src_nents && !edesc->dst_nents) ||
+ priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
+
+@@ -1524,7 +1527,8 @@ static int aead_decrypt(struct aead_request *req)
+
+ /* reset integrity check result bits */
+
+- return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
++ return ipsec_esp(edesc, req, false,
++ ipsec_esp_decrypt_hwauth_done);
+ }
+
+ /* Have to check the ICV with software */
+@@ -1540,7 +1544,7 @@ static int aead_decrypt(struct aead_request *req)
+ sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
+ req->assoclen + req->cryptlen - authsize);
+
+- return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
++ return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
+ }
+
+ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+@@ -1591,6 +1595,18 @@ static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
+ return ablkcipher_setkey(cipher, key, keylen);
+ }
+
++static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
++ const u8 *key, unsigned int keylen)
++{
++ if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
++ keylen == AES_KEYSIZE_256)
++ return ablkcipher_setkey(cipher, key, keylen);
++
++ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++
++ return -EINVAL;
++}
++
+ static void common_nonsnoop_unmap(struct device *dev,
+ struct talitos_edesc *edesc,
+ struct ablkcipher_request *areq)
+@@ -1713,6 +1729,14 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct talitos_edesc *edesc;
++ unsigned int blocksize =
++ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
++
++ if (!areq->nbytes)
++ return 0;
++
++ if (areq->nbytes % blocksize)
++ return -EINVAL;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(areq, true);
+@@ -1730,6 +1754,14 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ struct talitos_edesc *edesc;
++ unsigned int blocksize =
++ crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
++
++ if (!areq->nbytes)
++ return 0;
++
++ if (areq->nbytes % blocksize)
++ return -EINVAL;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(areq, false);
+@@ -2752,6 +2784,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
++ .setkey = ablkcipher_aes_setkey,
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+@@ -2768,6 +2801,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
++ .setkey = ablkcipher_aes_setkey,
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+@@ -2778,13 +2812,13 @@ static struct talitos_alg_template driver_algs[] = {
+ .alg.crypto = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-talitos",
+- .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_blocksize = 1,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+- .ivsize = AES_BLOCK_SIZE,
++ .setkey = ablkcipher_aes_setkey,
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
+@@ -2818,7 +2852,6 @@ static struct talitos_alg_template driver_algs[] = {
+ .cra_ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+- .ivsize = DES_BLOCK_SIZE,
+ .setkey = ablkcipher_des_setkey,
+ }
+ },
+@@ -2854,7 +2887,6 @@ static struct talitos_alg_template driver_algs[] = {
+ .cra_ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+- .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = ablkcipher_des3_setkey,
+ }
+ },
+diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
+index ef93406ace1b..36ce11a67235 100644
+--- a/drivers/firmware/ti_sci.c
++++ b/drivers/firmware/ti_sci.c
+@@ -466,9 +466,9 @@ static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
+ struct ti_sci_xfer *xfer;
+ int ret;
+
+- /* No need to setup flags since it is expected to respond */
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
+- 0x0, sizeof(struct ti_sci_msg_hdr),
++ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
++ sizeof(struct ti_sci_msg_hdr),
+ sizeof(*rev_info));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+@@ -596,9 +596,9 @@ static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+- /* Response is expected, so need of any flags */
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
+- 0, sizeof(*req), sizeof(*resp));
++ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
++ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
+index b6a4efce7c92..be8590d386b1 100644
+--- a/drivers/gpio/gpio-mockup.c
++++ b/drivers/gpio/gpio-mockup.c
+@@ -309,6 +309,7 @@ static const struct file_operations gpio_mockup_debugfs_ops = {
+ .read = gpio_mockup_debugfs_read,
+ .write = gpio_mockup_debugfs_write,
+ .llseek = no_llseek,
++ .release = single_release,
+ };
+
+ static void gpio_mockup_debugfs_setup(struct device *dev,
+diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
+index c9fc9e232aaf..4d5c285c46f8 100644
+--- a/drivers/gpio/gpiolib-acpi.c
++++ b/drivers/gpio/gpiolib-acpi.c
+@@ -7,6 +7,7 @@
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
++#include <linux/dmi.h>
+ #include <linux/errno.h>
+ #include <linux/gpio/consumer.h>
+ #include <linux/gpio/driver.h>
+@@ -19,6 +20,11 @@
+
+ #include "gpiolib.h"
+
++static int run_edge_events_on_boot = -1;
++module_param(run_edge_events_on_boot, int, 0444);
++MODULE_PARM_DESC(run_edge_events_on_boot,
++ "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
++
+ /**
+ * struct acpi_gpio_event - ACPI GPIO event handler data
+ *
+@@ -170,10 +176,13 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
+ event->irq_requested = true;
+
+ /* Make sure we trigger the initial state of edge-triggered IRQs */
+- value = gpiod_get_raw_value_cansleep(event->desc);
+- if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+- ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+- event->handler(event->irq, event);
++ if (run_edge_events_on_boot &&
++ (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
++ value = gpiod_get_raw_value_cansleep(event->desc);
++ if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
++ ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
++ event->handler(event->irq, event);
++ }
+ }
+
+ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
+@@ -1283,3 +1292,28 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
+ }
+ /* We must use _sync so that this runs after the first deferred_probe run */
+ late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
++
++static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
++ {
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
++ }
++ },
++ {} /* Terminating entry */
++};
++
++static int acpi_gpio_setup_params(void)
++{
++ if (run_edge_events_on_boot < 0) {
++ if (dmi_check_system(run_edge_events_on_boot_blacklist))
++ run_edge_events_on_boot = 0;
++ else
++ run_edge_events_on_boot = 1;
++ }
++
++ return 0;
++}
++
++/* Directly after dmi_setup() which runs as core_initcall() */
++postcore_initcall(acpi_gpio_setup_params);
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index f272b5143997..e806cd9a14ba 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -535,6 +535,14 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
+ if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
+ return -EINVAL;
+
++ /*
++ * Do not allow both INPUT & OUTPUT flags to be set as they are
++ * contradictory.
++ */
++ if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
++ (lflags & GPIOHANDLE_REQUEST_OUTPUT))
++ return -EINVAL;
++
+ /*
+ * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
+ * the hardware actually supports enabling both at the same time the
+@@ -926,7 +934,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
+ }
+
+ /* This is just wrong: we don't look for events on output lines */
+- if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
++ if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
++ (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
++ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
+ ret = -EINVAL;
+ goto out_free_label;
+ }
+@@ -940,10 +950,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
+
+ if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
+ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+- if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
+- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+- if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
+- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+
+ ret = gpiod_direction_input(desc);
+ if (ret)
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index d8a0bcd02f34..ffd95bfeaa94 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -90,6 +90,12 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+ };
+
++static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
++ .width = 720,
++ .height = 1280,
++ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
++};
++
+ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
+ .width = 800,
+ .height = 1280,
+@@ -123,6 +129,12 @@ static const struct dmi_system_id orientation_data[] = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+ },
+ .driver_data = (void *)&gpd_micropc,
++ }, { /* GPD MicroPC (later BIOS versions with proper DMI strings) */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"),
++ },
++ .driver_data = (void *)&lcd720x1280_rightside_up,
+ }, { /*
+ * GPD Pocket, note that the the DMI data is less generic then
+ * it seems, devices with a board-vendor of "AMI Corporation"
+diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
+index d89120dcac67..8e6a7b8dffca 100644
+--- a/drivers/gpu/drm/i915/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
+@@ -125,7 +125,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
+ limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
+
+ limits.min_bpp = intel_dp_min_bpp(pipe_config);
+- limits.max_bpp = pipe_config->pipe_bpp;
++ /*
++ * FIXME: If all the streams can't fit into the link with
++ * their current pipe_bpp we should reduce pipe_bpp across
++ * the board until things start to fit. Until then we
++ * limit to <= 8bpc since that's what was hardcoded for all
++ * MST streams previously. This hack should be removed once
++ * we have the proper retry logic in place.
++ */
++ limits.max_bpp = min(pipe_config->pipe_bpp, 24);
+
+ intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
+
+diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
+index edd57a5e0495..b50a7c3f22bf 100644
+--- a/drivers/gpu/drm/i915/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/intel_workarounds.c
+@@ -294,11 +294,6 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
+ FLOW_CONTROL_ENABLE |
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+- /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
+- if (!IS_COFFEELAKE(i915))
+- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+- GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
+-
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
+ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
+ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
+index 477c0f766663..b609dc030d6c 100644
+--- a/drivers/gpu/drm/lima/lima_gem.c
++++ b/drivers/gpu/drm/lima/lima_gem.c
+@@ -342,7 +342,7 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
+ timeout = drm_timeout_abs_to_jiffies(timeout_ns);
+
+ ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
+- if (ret == 0)
++ if (ret == -ETIME)
+ ret = timeout ? -ETIMEDOUT : -EBUSY;
+
+ return ret;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index c021d4c8324f..7f5408cb2377 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -567,12 +567,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
+ comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
+ if (!comp) {
+ ret = -ENOMEM;
++ of_node_put(node);
+ goto err_node;
+ }
+
+ ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
+- if (ret)
++ if (ret) {
++ of_node_put(node);
+ goto err_node;
++ }
+
+ private->ddp_comp[comp_id] = comp;
+ }
+diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
+index d90427b93a51..2cccbcf5b53c 100644
+--- a/drivers/gpu/drm/meson/meson_plane.c
++++ b/drivers/gpu/drm/meson/meson_plane.c
+@@ -153,6 +153,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+ OSD_COLOR_MATRIX_32_ARGB;
+ break;
++ case DRM_FORMAT_XBGR8888:
++ /* For XRGB, replace the pixel's alpha by 0xFF */
++ writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN,
++ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
++ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
++ OSD_COLOR_MATRIX_32_ABGR;
++ break;
+ case DRM_FORMAT_ARGB8888:
+ /* For ARGB, use the pixel's alpha */
+ writel_bits_relaxed(OSD_REPLACE_EN, 0,
+@@ -160,6 +167,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
+ OSD_COLOR_MATRIX_32_ARGB;
+ break;
++ case DRM_FORMAT_ABGR8888:
++ /* For ARGB, use the pixel's alpha */
++ writel_bits_relaxed(OSD_REPLACE_EN, 0,
++ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
++ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_32 |
++ OSD_COLOR_MATRIX_32_ABGR;
++ break;
+ case DRM_FORMAT_RGB888:
+ priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_24 |
+ OSD_COLOR_MATRIX_24_RGB;
+@@ -346,7 +360,9 @@ static const struct drm_plane_funcs meson_plane_funcs = {
+
+ static const uint32_t supported_drm_formats[] = {
+ DRM_FORMAT_ARGB8888,
++ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
++ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ };
+diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
+index 588907cc3b6b..6b90a40882f2 100644
+--- a/drivers/iio/adc/stm32-dfsdm-adc.c
++++ b/drivers/iio/adc/stm32-dfsdm-adc.c
+@@ -39,9 +39,16 @@
+ #define DFSDM_MAX_INT_OVERSAMPLING 256
+ #define DFSDM_MAX_FL_OVERSAMPLING 1024
+
+-/* Max sample resolutions */
+-#define DFSDM_MAX_RES BIT(31)
+-#define DFSDM_DATA_RES BIT(23)
++/* Limit filter output resolution to 31 bits. (i.e. sample range is +/-2^30) */
++#define DFSDM_DATA_MAX BIT(30)
++/*
++ * Data are output as two's complement data in a 24 bit field.
++ * Data from filters are in the range +/-2^(n-1)
++ * 2^(n-1) maximum positive value cannot be coded in 2's complement n bits
++ * An extra bit is required to avoid wrap-around of the binary code for 2^(n-1)
++ * So, the resolution of samples from filter is actually limited to 23 bits
++ */
++#define DFSDM_DATA_RES 24
+
+ /* Filter configuration */
+ #define DFSDM_CR1_CFG_MASK (DFSDM_CR1_RCH_MASK | DFSDM_CR1_RCONT_MASK | \
+@@ -181,14 +188,15 @@ static int stm32_dfsdm_get_jextsel(struct iio_dev *indio_dev,
+ return -EINVAL;
+ }
+
+-static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
+- unsigned int fast, unsigned int oversamp)
++static int stm32_dfsdm_compute_osrs(struct stm32_dfsdm_filter *fl,
++ unsigned int fast, unsigned int oversamp)
+ {
+ unsigned int i, d, fosr, iosr;
+- u64 res;
+- s64 delta;
++ u64 res, max;
++ int bits, shift;
+ unsigned int m = 1; /* multiplication factor */
+ unsigned int p = fl->ford; /* filter order (ford) */
++ struct stm32_dfsdm_filter_osr *flo = &fl->flo;
+
+ pr_debug("%s: Requested oversampling: %d\n", __func__, oversamp);
+ /*
+@@ -207,11 +215,9 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
+
+ /*
+ * Look for filter and integrator oversampling ratios which allows
+- * to reach 24 bits data output resolution.
+- * Leave as soon as if exact resolution if reached.
+- * Otherwise the higher resolution below 32 bits is kept.
++ * to maximize data output resolution.
+ */
+- fl->res = 0;
++ flo->res = 0;
+ for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
+ for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
+ if (fast)
+@@ -236,32 +242,68 @@ static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
+ res = fosr;
+ for (i = p - 1; i > 0; i--) {
+ res = res * (u64)fosr;
+- if (res > DFSDM_MAX_RES)
++ if (res > DFSDM_DATA_MAX)
+ break;
+ }
+- if (res > DFSDM_MAX_RES)
++ if (res > DFSDM_DATA_MAX)
+ continue;
++
+ res = res * (u64)m * (u64)iosr;
+- if (res > DFSDM_MAX_RES)
++ if (res > DFSDM_DATA_MAX)
+ continue;
+
+- delta = res - DFSDM_DATA_RES;
+-
+- if (res >= fl->res) {
+- fl->res = res;
+- fl->fosr = fosr;
+- fl->iosr = iosr;
+- fl->fast = fast;
+- pr_debug("%s: fosr = %d, iosr = %d\n",
+- __func__, fl->fosr, fl->iosr);
++ if (res >= flo->res) {
++ flo->res = res;
++ flo->fosr = fosr;
++ flo->iosr = iosr;
++
++ bits = fls(flo->res);
++ /* 8 LBSs in data register contain chan info */
++ max = flo->res << 8;
++
++ /* if resolution is not a power of two */
++ if (flo->res > BIT(bits - 1))
++ bits++;
++ else
++ max--;
++
++ shift = DFSDM_DATA_RES - bits;
++ /*
++ * Compute right/left shift
++ * Right shift is performed by hardware
++ * when transferring samples to data register.
++ * Left shift is done by software on buffer
++ */
++ if (shift > 0) {
++ /* Resolution is lower than 24 bits */
++ flo->rshift = 0;
++ flo->lshift = shift;
++ } else {
++ /*
++ * If resolution is 24 bits or more,
++ * max positive value may be ambiguous
++ * (equal to max negative value as sign
++ * bit is dropped).
++ * Reduce resolution to 23 bits (rshift)
++ * to keep the sign on bit 23 and treat
++ * saturation before rescaling on 24
++ * bits (lshift).
++ */
++ flo->rshift = 1 - shift;
++ flo->lshift = 1;
++ max >>= flo->rshift;
++ }
++ flo->max = (s32)max;
++
++ pr_debug("%s: fast %d, fosr %d, iosr %d, res 0x%llx/%d bits, rshift %d, lshift %d\n",
++ __func__, fast, flo->fosr, flo->iosr,
++ flo->res, bits, flo->rshift,
++ flo->lshift);
+ }
+-
+- if (!delta)
+- return 0;
+ }
+ }
+
+- if (!fl->res)
++ if (!flo->res)
+ return -EINVAL;
+
+ return 0;
+@@ -384,6 +426,36 @@ static int stm32_dfsdm_filter_set_trig(struct stm32_dfsdm_adc *adc,
+ return 0;
+ }
+
++static int stm32_dfsdm_channels_configure(struct stm32_dfsdm_adc *adc,
++ unsigned int fl_id,
++ struct iio_trigger *trig)
++{
++ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
++ struct regmap *regmap = adc->dfsdm->regmap;
++ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[fl_id];
++ struct stm32_dfsdm_filter_osr *flo = &fl->flo;
++ const struct iio_chan_spec *chan;
++ unsigned int bit;
++ int ret;
++
++ if (!flo->res)
++ return -EINVAL;
++
++ for_each_set_bit(bit, &adc->smask,
++ sizeof(adc->smask) * BITS_PER_BYTE) {
++ chan = indio_dev->channels + bit;
++
++ ret = regmap_update_bits(regmap,
++ DFSDM_CHCFGR2(chan->channel),
++ DFSDM_CHCFGR2_DTRBS_MASK,
++ DFSDM_CHCFGR2_DTRBS(flo->rshift));
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
+ static int stm32_dfsdm_filter_configure(struct stm32_dfsdm_adc *adc,
+ unsigned int fl_id,
+ struct iio_trigger *trig)
+@@ -391,6 +463,7 @@ static int stm32_dfsdm_filter_configure(struct stm32_dfsdm_adc *adc,
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+ struct regmap *regmap = adc->dfsdm->regmap;
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[fl_id];
++ struct stm32_dfsdm_filter_osr *flo = &fl->flo;
+ u32 cr1;
+ const struct iio_chan_spec *chan;
+ unsigned int bit, jchg = 0;
+@@ -398,13 +471,13 @@ static int stm32_dfsdm_filter_configure(struct stm32_dfsdm_adc *adc,
+
+ /* Average integrator oversampling */
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_IOSR_MASK,
+- DFSDM_FCR_IOSR(fl->iosr - 1));
++ DFSDM_FCR_IOSR(flo->iosr - 1));
+ if (ret)
+ return ret;
+
+ /* Filter order and Oversampling */
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FOSR_MASK,
+- DFSDM_FCR_FOSR(fl->fosr - 1));
++ DFSDM_FCR_FOSR(flo->fosr - 1));
+ if (ret)
+ return ret;
+
+@@ -573,7 +646,7 @@ static int dfsdm_adc_set_samp_freq(struct iio_dev *indio_dev,
+ "Rate not accurate. requested (%u), actual (%u)\n",
+ sample_freq, spi_freq / oversamp);
+
+- ret = stm32_dfsdm_set_osrs(fl, 0, oversamp);
++ ret = stm32_dfsdm_compute_osrs(fl, 0, oversamp);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "No filter parameters that match!\n");
+ return ret;
+@@ -623,6 +696,10 @@ static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc,
+ struct regmap *regmap = adc->dfsdm->regmap;
+ int ret;
+
++ ret = stm32_dfsdm_channels_configure(adc, adc->fl_id, trig);
++ if (ret < 0)
++ return ret;
++
+ ret = stm32_dfsdm_start_channel(adc);
+ if (ret < 0)
+ return ret;
+@@ -729,6 +806,8 @@ static void stm32_dfsdm_dma_buffer_done(void *data)
+ {
+ struct iio_dev *indio_dev = data;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
++ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
++ struct stm32_dfsdm_filter_osr *flo = &fl->flo;
+ int available = stm32_dfsdm_adc_dma_residue(adc);
+ size_t old_pos;
+
+@@ -751,10 +830,19 @@ static void stm32_dfsdm_dma_buffer_done(void *data)
+ old_pos = adc->bufi;
+
+ while (available >= indio_dev->scan_bytes) {
+- u32 *buffer = (u32 *)&adc->rx_buf[adc->bufi];
++ s32 *buffer = (s32 *)&adc->rx_buf[adc->bufi];
+
+ /* Mask 8 LSB that contains the channel ID */
+- *buffer = (*buffer & 0xFFFFFF00) << 8;
++ *buffer &= 0xFFFFFF00;
++ /* Convert 2^(n-1) sample to 2^(n-1)-1 to avoid wrap-around */
++ if (*buffer > flo->max)
++ *buffer -= 1;
++ /*
++ * Samples from filter are retrieved with 23 bits resolution
++ * or less. Shift left to align MSB on 24 bits.
++ */
++ *buffer <<= flo->lshift;
++
+ available -= indio_dev->scan_bytes;
+ adc->bufi += indio_dev->scan_bytes;
+ if (adc->bufi >= adc->buf_sz) {
+@@ -1078,7 +1166,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+- ret = stm32_dfsdm_set_osrs(fl, 0, val);
++ ret = stm32_dfsdm_compute_osrs(fl, 0, val);
+ if (!ret)
+ adc->oversamp = val;
+ iio_device_release_direct_mode(indio_dev);
+@@ -1277,11 +1365,11 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
+
+ if (adc->dev_data->type == DFSDM_AUDIO) {
+- ch->scan_type.sign = 's';
+ ch->ext_info = dfsdm_adc_audio_ext_info;
+ } else {
+- ch->scan_type.sign = 'u';
++ ch->scan_type.shift = 8;
+ }
++ ch->scan_type.sign = 's';
+ ch->scan_type.realbits = 24;
+ ch->scan_type.storagebits = 32;
+
+@@ -1327,8 +1415,8 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
+ int ret, chan_idx;
+
+ adc->oversamp = DFSDM_DEFAULT_OVERSAMPLING;
+- ret = stm32_dfsdm_set_osrs(&adc->dfsdm->fl_list[adc->fl_id], 0,
+- adc->oversamp);
++ ret = stm32_dfsdm_compute_osrs(&adc->dfsdm->fl_list[adc->fl_id], 0,
++ adc->oversamp);
+ if (ret < 0)
+ return ret;
+
+diff --git a/drivers/iio/adc/stm32-dfsdm.h b/drivers/iio/adc/stm32-dfsdm.h
+index 8708394b0725..18b06ee6ed7b 100644
+--- a/drivers/iio/adc/stm32-dfsdm.h
++++ b/drivers/iio/adc/stm32-dfsdm.h
+@@ -243,19 +243,33 @@ enum stm32_dfsdm_sinc_order {
+ };
+
+ /**
+- * struct stm32_dfsdm_filter - structure relative to stm32 FDSDM filter
++ * struct stm32_dfsdm_filter_osr - DFSDM filter settings linked to oversampling
+ * @iosr: integrator oversampling
+ * @fosr: filter oversampling
+- * @ford: filter order
++ * @rshift: output sample right shift (hardware shift)
++ * @lshift: output sample left shift (software shift)
+ * @res: output sample resolution
++ * @max: output sample maximum positive value
++ */
++struct stm32_dfsdm_filter_osr {
++ unsigned int iosr;
++ unsigned int fosr;
++ unsigned int rshift;
++ unsigned int lshift;
++ u64 res;
++ s32 max;
++};
++
++/**
++ * struct stm32_dfsdm_filter - structure relative to stm32 FDSDM filter
++ * @ford: filter order
++ * @flo: filter oversampling structure
+ * @sync_mode: filter synchronized with filter 0
+ * @fast: filter fast mode
+ */
+ struct stm32_dfsdm_filter {
+- unsigned int iosr;
+- unsigned int fosr;
+ enum stm32_dfsdm_sinc_order ford;
+- u64 res;
++ struct stm32_dfsdm_filter_osr flo;
+ unsigned int sync_mode;
+ unsigned int fast;
+ };
+diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
+index 3c3ad42f22bf..c92b405b7646 100644
+--- a/drivers/isdn/capi/capi.c
++++ b/drivers/isdn/capi/capi.c
+@@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
+ if (!cdev->ap.applid)
+ return -ENODEV;
+
++ if (count < CAPIMSG_BASELEN)
++ return -EINVAL;
++
+ skb = alloc_skb(count, GFP_USER);
+ if (!skb)
+ return -ENOMEM;
+@@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
+ }
+ mlen = CAPIMSG_LEN(skb->data);
+ if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
+- if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
++ if (count < CAPI_DATA_B3_REQ_LEN ||
++ (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+@@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
+ CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
+
+ if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
++ if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
++ kfree_skb(skb);
++ return -EINVAL;
++ }
+ mutex_lock(&cdev->lock);
+ capincci_free(cdev, CAPIMSG_NCCI(skb->data));
+ mutex_unlock(&cdev->lock);
+diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
+index 7e0d3a49c06d..bb31e13648d6 100644
+--- a/drivers/mmc/host/bcm2835.c
++++ b/drivers/mmc/host/bcm2835.c
+@@ -597,7 +597,7 @@ static void bcm2835_finish_request(struct bcm2835_host *host)
+ struct dma_chan *terminate_chan = NULL;
+ struct mmc_request *mrq;
+
+- cancel_delayed_work_sync(&host->timeout_work);
++ cancel_delayed_work(&host->timeout_work);
+
+ mrq = host->mrq;
+
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
+index 9dc4548271b4..19944b0049db 100644
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
+@@ -432,7 +432,6 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
+ mmc_hostname(host->mmc));
+ host->flags &= ~SDHCI_SIGNALING_330;
+ host->flags |= SDHCI_SIGNALING_180;
+- host->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
+ host->mmc->caps2 |= MMC_CAP2_NO_SD;
+ host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
+ pci_write_config_dword(chip->pdev,
+@@ -682,6 +681,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = {
+ const struct sdhci_pci_fixes sdhci_o2 = {
+ .probe = sdhci_pci_o2_probe,
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
++ .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
+ .probe_slot = sdhci_pci_o2_probe_slot,
+ #ifdef CONFIG_PM_SLEEP
+ .resume = sdhci_pci_o2_resume,
+diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
+index c5ba13fae399..2f0b092d6dcc 100644
+--- a/drivers/mmc/host/tmio_mmc.h
++++ b/drivers/mmc/host/tmio_mmc.h
+@@ -163,6 +163,7 @@ struct tmio_mmc_host {
+ unsigned long last_req_ts;
+ struct mutex ios_lock; /* protect set_ios() context */
+ bool native_hotplug;
++ bool runtime_synced;
+ bool sdio_irq_enabled;
+
+ /* Mandatory callback */
+diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
+index 84cb7d2aacdf..29ec78486e69 100644
+--- a/drivers/mmc/host/tmio_mmc_core.c
++++ b/drivers/mmc/host/tmio_mmc_core.c
+@@ -1258,20 +1258,22 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
+ /* See if we also get DMA */
+ tmio_mmc_request_dma(_host, pdata);
+
+- pm_runtime_set_active(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
++ pm_runtime_get_sync(&pdev->dev);
+
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto remove_host;
+
+ dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
++ pm_runtime_put(&pdev->dev);
+
+ return 0;
+
+ remove_host:
++ pm_runtime_put_noidle(&pdev->dev);
+ tmio_mmc_host_remove(_host);
+ return ret;
+ }
+@@ -1282,12 +1284,11 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
+ struct platform_device *pdev = host->pdev;
+ struct mmc_host *mmc = host->mmc;
+
++ pm_runtime_get_sync(&pdev->dev);
++
+ if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
+ sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
+
+- if (!host->native_hotplug)
+- pm_runtime_get_sync(&pdev->dev);
+-
+ dev_pm_qos_hide_latency_limit(&pdev->dev);
+
+ mmc_remove_host(mmc);
+@@ -1296,6 +1297,8 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
+ tmio_mmc_release_dma(host);
+
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
++ if (host->native_hotplug)
++ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ }
+@@ -1340,6 +1343,11 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
+ {
+ struct tmio_mmc_host *host = dev_get_drvdata(dev);
+
++ if (!host->runtime_synced) {
++ host->runtime_synced = true;
++ return 0;
++ }
++
+ tmio_mmc_clk_enable(host);
+ tmio_mmc_hw_reset(host->mmc);
+
+diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
+index 23fe19397315..d6a1354f4f62 100644
+--- a/drivers/mtd/nand/raw/mtk_nand.c
++++ b/drivers/mtd/nand/raw/mtk_nand.c
+@@ -853,19 +853,21 @@ static int mtk_nfc_write_oob_std(struct nand_chip *chip, int page)
+ return mtk_nfc_write_page_raw(chip, NULL, 1, page);
+ }
+
+-static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
++static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
++ u32 sectors)
+ {
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+ struct mtk_ecc_stats stats;
++ u32 reg_size = mtk_nand->fdm.reg_size;
+ int rc, i;
+
+ rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
+ if (rc) {
+ memset(buf, 0xff, sectors * chip->ecc.size);
+ for (i = 0; i < sectors; i++)
+- memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
++ memset(oob_ptr(chip, start + i), 0xff, reg_size);
+ return 0;
+ }
+
+@@ -885,7 +887,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ u32 spare = mtk_nand->spare_per_sector;
+ u32 column, sectors, start, end, reg;
+ dma_addr_t addr;
+- int bitflips;
++ int bitflips = 0;
+ size_t len;
+ u8 *buf;
+ int rc;
+@@ -952,14 +954,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ if (rc < 0) {
+ dev_err(nfc->dev, "subpage done timeout\n");
+ bitflips = -EIO;
+- } else {
+- bitflips = 0;
+- if (!raw) {
+- rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
+- bitflips = rc < 0 ? -ETIMEDOUT :
+- mtk_nfc_update_ecc_stats(mtd, buf, sectors);
+- mtk_nfc_read_fdm(chip, start, sectors);
+- }
++ } else if (!raw) {
++ rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
++ bitflips = rc < 0 ? -ETIMEDOUT :
++ mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
++ mtk_nfc_read_fdm(chip, start, sectors);
+ }
+
+ dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index f7c049559c1a..f9f473ae4abe 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -36,6 +36,7 @@
+ #include <net/vxlan.h>
+ #include <net/mpls.h>
+ #include <net/xdp_sock.h>
++#include <net/xfrm.h>
+
+ #include "ixgbe.h"
+ #include "ixgbe_common.h"
+@@ -2621,7 +2622,7 @@ adjust_by_size:
+ /* 16K ints/sec to 9.2K ints/sec */
+ avg_wire_size *= 15;
+ avg_wire_size += 11452;
+- } else if (avg_wire_size <= 1980) {
++ } else if (avg_wire_size < 1968) {
+ /* 9.2K ints/sec to 8K ints/sec */
+ avg_wire_size *= 5;
+ avg_wire_size += 22420;
+@@ -2654,6 +2655,8 @@ adjust_by_size:
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ case IXGBE_LINK_SPEED_10_FULL:
++ if (avg_wire_size > 8064)
++ avg_wire_size = 8064;
+ itr += DIV_ROUND_UP(avg_wire_size,
+ IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
+ IXGBE_ITR_ADAPTIVE_MIN_INC;
+@@ -8691,7 +8694,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
+ #endif /* IXGBE_FCOE */
+
+ #ifdef CONFIG_IXGBE_IPSEC
+- if (secpath_exists(skb) &&
++ if (xfrm_offload(skb) &&
+ !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
+ goto out_drop;
+ #endif
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+index bfe95ce0bd7f..1f5fe115bd99 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+@@ -679,19 +679,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
+ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring *tx_ring, int napi_budget)
+ {
++ u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
+ unsigned int total_packets = 0, total_bytes = 0;
+- u32 i = tx_ring->next_to_clean, xsk_frames = 0;
+- unsigned int budget = q_vector->tx.work_limit;
+ struct xdp_umem *umem = tx_ring->xsk_umem;
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct ixgbe_tx_buffer *tx_bi;
+- bool xmit_done;
++ u32 xsk_frames = 0;
+
+- tx_bi = &tx_ring->tx_buffer_info[i];
+- tx_desc = IXGBE_TX_DESC(tx_ring, i);
+- i -= tx_ring->count;
++ tx_bi = &tx_ring->tx_buffer_info[ntc];
++ tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
+
+- do {
++ while (ntc != ntu) {
+ if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+ break;
+
+@@ -708,22 +706,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
+
+ tx_bi++;
+ tx_desc++;
+- i++;
+- if (unlikely(!i)) {
+- i -= tx_ring->count;
++ ntc++;
++ if (unlikely(ntc == tx_ring->count)) {
++ ntc = 0;
+ tx_bi = tx_ring->tx_buffer_info;
+ tx_desc = IXGBE_TX_DESC(tx_ring, 0);
+ }
+
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
++ }
+
+- /* update budget accounting */
+- budget--;
+- } while (likely(budget));
+-
+- i += tx_ring->count;
+- tx_ring->next_to_clean = i;
++ tx_ring->next_to_clean = ntc;
+
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+@@ -735,8 +729,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
+ if (xsk_frames)
+ xsk_umem_complete_tx(umem, xsk_frames);
+
+- xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
+- return budget > 0 && xmit_done;
++ return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
+ }
+
+ int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index d189ed247665..ac6c18821958 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -30,6 +30,7 @@
+ #include <linux/bpf.h>
+ #include <linux/bpf_trace.h>
+ #include <linux/atomic.h>
++#include <net/xfrm.h>
+
+ #include "ixgbevf.h"
+
+@@ -4158,7 +4159,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
+ first->protocol = vlan_get_protocol(skb);
+
+ #ifdef CONFIG_IXGBEVF_IPSEC
+- if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
++ if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
+ goto out_drop;
+ #endif
+ tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index c45ee6e3fe01..a094d7197015 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -356,8 +356,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat
+ * Local device Link partner
+ * Pause AsymDir Pause AsymDir Result
+ * 1 X 1 X TX+RX
+- * 0 1 1 1 RX
+- * 1 1 0 1 TX
++ * 0 1 1 1 TX
++ * 1 1 0 1 RX
+ */
+ static void phylink_resolve_flow(struct phylink *pl,
+ struct phylink_link_state *state)
+@@ -378,7 +378,7 @@ static void phylink_resolve_flow(struct phylink *pl,
+ new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
+ else if (pause & MLO_PAUSE_ASYM)
+ new_pause = state->pause & MLO_PAUSE_SYM ?
+- MLO_PAUSE_RX : MLO_PAUSE_TX;
++ MLO_PAUSE_TX : MLO_PAUSE_RX;
+ } else {
+ new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
+ }
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 192ac47fd055..3f42cd433605 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -788,7 +788,8 @@ static void tun_detach_all(struct net_device *dev)
+ }
+
+ static int tun_attach(struct tun_struct *tun, struct file *file,
+- bool skip_filter, bool napi, bool napi_frags)
++ bool skip_filter, bool napi, bool napi_frags,
++ bool publish_tun)
+ {
+ struct tun_file *tfile = file->private_data;
+ struct net_device *dev = tun->dev;
+@@ -871,7 +872,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
+ * initialized tfile; otherwise we risk using half-initialized
+ * object.
+ */
+- rcu_assign_pointer(tfile->tun, tun);
++ if (publish_tun)
++ rcu_assign_pointer(tfile->tun, tun);
+ rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
+ tun->numqueues++;
+ tun_set_real_num_queues(tun);
+@@ -2731,7 +2733,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
+
+ err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
+ ifr->ifr_flags & IFF_NAPI,
+- ifr->ifr_flags & IFF_NAPI_FRAGS);
++ ifr->ifr_flags & IFF_NAPI_FRAGS, true);
+ if (err < 0)
+ return err;
+
+@@ -2830,13 +2832,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
+
+ INIT_LIST_HEAD(&tun->disabled);
+ err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
+- ifr->ifr_flags & IFF_NAPI_FRAGS);
++ ifr->ifr_flags & IFF_NAPI_FRAGS, false);
+ if (err < 0)
+ goto err_free_flow;
+
+ err = register_netdevice(tun->dev);
+ if (err < 0)
+ goto err_detach;
++ /* free_netdev() won't check refcnt, to aovid race
++ * with dev_put() we need publish tun after registration.
++ */
++ rcu_assign_pointer(tfile->tun, tun);
+ }
+
+ netif_carrier_on(tun->dev);
+@@ -2979,7 +2985,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
+ if (ret < 0)
+ goto unlock;
+ ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
+- tun->flags & IFF_NAPI_FRAGS);
++ tun->flags & IFF_NAPI_FRAGS, true);
+ } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
+ tun = rtnl_dereference(tfile->tun);
+ if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index 8458e88c18e9..32f53de5b1fe 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -206,7 +206,15 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
+ goto bad_desc;
+ }
+ skip:
+- if (rndis && header.usb_cdc_acm_descriptor &&
++ /* Communcation class functions with bmCapabilities are not
++ * RNDIS. But some Wireless class RNDIS functions use
++ * bmCapabilities for their own purpose. The failsafe is
++ * therefore applied only to Communication class RNDIS
++ * functions. The rndis test is redundant, but a cheap
++ * optimization.
++ */
++ if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
++ header.usb_cdc_acm_descriptor &&
+ header.usb_cdc_acm_descriptor->bmCapabilities) {
+ dev_dbg(&intf->dev,
+ "ACM capabilities %02x, not really RNDIS?\n",
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+index 80e6b211f60b..8d7a47d1b205 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+@@ -77,11 +77,12 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
+ goto out;
+ }
+
+- mvif->omac_idx = get_omac_idx(vif->type, dev->omac_mask);
+- if (mvif->omac_idx < 0) {
++ idx = get_omac_idx(vif->type, dev->omac_mask);
++ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
++ mvif->omac_idx = idx;
+
+ /* TODO: DBDC support. Use band 0 and wmm 0 for now */
+ mvif->band_idx = 0;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+index ea67c6022fe6..dc1301effa24 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+@@ -1270,7 +1270,6 @@ int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
+ 0, NULL);
+ memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
+- dev_kfree_skb(skb);
+
+ req.omac_idx = mvif->omac_idx;
+ req.enable = en;
+@@ -1281,6 +1280,7 @@ int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + tim_off);
+
++ dev_kfree_skb(skb);
+ skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_BCN_OFFLOAD,
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+index 40c0d536e20d..9d4426f6905f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+@@ -59,6 +59,11 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
+ dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
+ }
+
++ if (is_mt7630(dev)) {
++ dev->mt76.cap.has_5ghz = false;
++ dev_dbg(dev->mt76.dev, "mask out 5GHz support\n");
++ }
++
+ if (!mt76x02_field_valid(nic_conf1 & 0xff))
+ nic_conf1 &= 0xff00;
+
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+index 621cd4ce69e2..5673dd858811 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+@@ -4156,24 +4156,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
+ switch (rt2x00dev->default_ant.rx_chain_num) {
+ case 3:
+ /* Turn on tertiary LNAs */
+- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN,
+- rf->channel > 14);
+- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN,
+- rf->channel <= 14);
++ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
++ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
+ /* fall-through */
+ case 2:
+ /* Turn on secondary LNAs */
+- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN,
+- rf->channel > 14);
+- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN,
+- rf->channel <= 14);
++ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
++ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
+ /* fall-through */
+ case 1:
+ /* Turn on primary LNAs */
+- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN,
+- rf->channel > 14);
+- rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN,
+- rf->channel <= 14);
++ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
++ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
+ break;
+ }
+
+diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
+index f5048d4b8cb6..760eaffeebd6 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
++++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
+@@ -645,7 +645,6 @@ fail_rx:
+ kfree(rsi_dev->tx_buffer);
+
+ fail_eps:
+- kfree(rsi_dev);
+
+ return status;
+ }
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 74c3df250d9c..9c8d619d5979 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -399,7 +399,8 @@ void __weak pcibios_free_irq(struct pci_dev *dev)
+ #ifdef CONFIG_PCI_IOV
+ static inline bool pci_device_can_probe(struct pci_dev *pdev)
+ {
+- return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe);
++ return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe ||
++ pdev->driver_override);
+ }
+ #else
+ static inline bool pci_device_can_probe(struct pci_dev *pdev)
+diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c
+index 7a8cbfb5d213..d35a73a24b3c 100644
+--- a/drivers/platform/x86/pcengines-apuv2.c
++++ b/drivers/platform/x86/pcengines-apuv2.c
+@@ -93,7 +93,7 @@ struct gpiod_lookup_table gpios_led_table = {
+
+ static struct gpio_keys_button apu2_keys_buttons[] = {
+ {
+- .code = KEY_SETUP,
++ .code = KEY_RESTART,
+ .active_low = 1,
+ .desc = "front button",
+ .type = EV_KEY,
+diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
+index be802fd2182d..551ed44dd361 100644
+--- a/drivers/platform/x86/pmc_atom.c
++++ b/drivers/platform/x86/pmc_atom.c
+@@ -412,6 +412,14 @@ static const struct dmi_system_id critclk_systems[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
+ },
+ },
++ {
++ /* pmc_plt_clk* - are used for ethernet controllers */
++ .ident = "Beckhoff CB4063",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
++ DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
++ },
++ },
+ {
+ /* pmc_plt_clk* - are used for ethernet controllers */
+ .ident = "Beckhoff CB6263",
+diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
+index 6fa15b2d6fb3..866b4dd01da9 100644
+--- a/drivers/regulator/twl-regulator.c
++++ b/drivers/regulator/twl-regulator.c
+@@ -359,6 +359,17 @@ static const u16 VINTANA2_VSEL_table[] = {
+ 2500, 2750,
+ };
+
++/* 600mV to 1450mV in 12.5 mV steps */
++static const struct regulator_linear_range VDD1_ranges[] = {
++ REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500)
++};
++
++/* 600mV to 1450mV in 12.5 mV steps, everything above = 1500mV */
++static const struct regulator_linear_range VDD2_ranges[] = {
++ REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500),
++ REGULATOR_LINEAR_RANGE(1500000, 69, 69, 12500)
++};
++
+ static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
+ {
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+@@ -427,6 +438,8 @@ static int twl4030smps_get_voltage(struct regulator_dev *rdev)
+ }
+
+ static const struct regulator_ops twl4030smps_ops = {
++ .list_voltage = regulator_list_voltage_linear_range,
++
+ .set_voltage = twl4030smps_set_voltage,
+ .get_voltage = twl4030smps_get_voltage,
+ };
+@@ -466,7 +479,8 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
+ }, \
+ }
+
+-#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
++#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf, \
++ n_volt) \
+ static const struct twlreg_info TWL4030_INFO_##label = { \
+ .base = offset, \
+ .id = num, \
+@@ -479,6 +493,9 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
+ .owner = THIS_MODULE, \
+ .enable_time = turnon_delay, \
+ .of_map_mode = twl4030reg_map_mode, \
++ .n_voltages = n_volt, \
++ .n_linear_ranges = ARRAY_SIZE(label ## _ranges), \
++ .linear_ranges = label ## _ranges, \
+ }, \
+ }
+
+@@ -518,8 +535,8 @@ TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9, 100, 0x00);
+ TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08);
+ TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08);
+ TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08);
+-TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08);
+-TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08);
++TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08, 68);
++TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08, 69);
+ /* VUSBCP is managed *only* by the USB subchip */
+ TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08);
+ TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 6c8297bcfeb7..1bfd7e34f31e 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4985,7 +4985,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ BTRFS_I(inode),
+ LOG_OTHER_INODE_ALL,
+ 0, LLONG_MAX, ctx);
+- iput(inode);
++ btrfs_add_delayed_iput(inode);
+ }
+ }
+ continue;
+@@ -5000,7 +5000,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
+ LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
+ if (ret) {
+- iput(inode);
++ btrfs_add_delayed_iput(inode);
+ continue;
+ }
+
+@@ -5009,7 +5009,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ key.offset = 0;
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0) {
+- iput(inode);
++ btrfs_add_delayed_iput(inode);
+ continue;
+ }
+
+@@ -5056,7 +5056,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
+ }
+ path->slots[0]++;
+ }
+- iput(inode);
++ btrfs_add_delayed_iput(inode);
+ }
+
+ return ret;
+@@ -5689,7 +5689,7 @@ process_leaf:
+ }
+
+ if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
+- iput(di_inode);
++ btrfs_add_delayed_iput(di_inode);
+ break;
+ }
+
+@@ -5701,7 +5701,7 @@ process_leaf:
+ if (!ret &&
+ btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
+ ret = 1;
+- iput(di_inode);
++ btrfs_add_delayed_iput(di_inode);
+ if (ret)
+ goto next_dir_inode;
+ if (ctx->log_new_dentries) {
+@@ -5848,7 +5848,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
+ if (!ret && ctx && ctx->log_new_dentries)
+ ret = log_new_dir_dentries(trans, root,
+ BTRFS_I(dir_inode), ctx);
+- iput(dir_inode);
++ btrfs_add_delayed_iput(dir_inode);
+ if (ret)
+ goto out;
+ }
+@@ -5891,7 +5891,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
+ ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
+ LOG_INODE_EXISTS,
+ 0, LLONG_MAX, ctx);
+- iput(inode);
++ btrfs_add_delayed_iput(inode);
+ if (ret)
+ return ret;
+
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index f5a823cb0e43..e8e7b0e9532e 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -1158,8 +1158,8 @@ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
+ * o exact match, i.e. the found zero-level znode contains key @key, then %1
+ * is returned and slot number of the matched branch is stored in @n;
+ * o not exact match, which means that zero-level znode does not contain
+- * @key, then %0 is returned and slot number of the closest branch is stored
+- * in @n;
++ * @key, then %0 is returned and slot number of the closest branch or %-1
++ * is stored in @n; In this case calling tnc_next() is mandatory.
+ * o @key is so small that it is even less than the lowest key of the
+ * leftmost zero-level node, then %0 is returned and %0 is stored in @n.
+ *
+@@ -1882,13 +1882,19 @@ int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
+
+ static int search_dh_cookie(struct ubifs_info *c, const union ubifs_key *key,
+ struct ubifs_dent_node *dent, uint32_t cookie,
+- struct ubifs_znode **zn, int *n)
++ struct ubifs_znode **zn, int *n, int exact)
+ {
+ int err;
+ struct ubifs_znode *znode = *zn;
+ struct ubifs_zbranch *zbr;
+ union ubifs_key *dkey;
+
++ if (!exact) {
++ err = tnc_next(c, &znode, n);
++ if (err)
++ return err;
++ }
++
+ for (;;) {
+ zbr = &znode->zbranch[*n];
+ dkey = &zbr->key;
+@@ -1930,7 +1936,7 @@ static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
+ if (unlikely(err < 0))
+ goto out_unlock;
+
+- err = search_dh_cookie(c, key, dent, cookie, &znode, &n);
++ err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
+
+ out_unlock:
+ mutex_unlock(&c->tnc_mutex);
+@@ -2723,7 +2729,7 @@ int ubifs_tnc_remove_dh(struct ubifs_info *c, const union ubifs_key *key,
+ if (unlikely(err < 0))
+ goto out_free;
+
+- err = search_dh_cookie(c, key, dent, cookie, &znode, &n);
++ err = search_dh_cookie(c, key, dent, cookie, &znode, &n, err);
+ if (err)
+ goto out_free;
+ }
+diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
+index 1e5d86ebdaeb..52bc8e487ef7 100644
+--- a/include/linux/phy_fixed.h
++++ b/include/linux/phy_fixed.h
+@@ -11,6 +11,7 @@ struct fixed_phy_status {
+ };
+
+ struct device_node;
++struct gpio_desc;
+
+ #if IS_ENABLED(CONFIG_FIXED_PHY)
+ extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
+index 2bcef4c70183..4230b8532adb 100644
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -1397,4 +1397,23 @@ static inline unsigned int ksys_personality(unsigned int personality)
+ return old;
+ }
+
++/* for __ARCH_WANT_SYS_IPC */
++long ksys_semtimedop(int semid, struct sembuf __user *tsops,
++ unsigned int nsops,
++ const struct __kernel_timespec __user *timeout);
++long ksys_semget(key_t key, int nsems, int semflg);
++long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
++long ksys_msgget(key_t key, int msgflg);
++long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
++long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
++ long msgtyp, int msgflg);
++long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz,
++ int msgflg);
++long ksys_shmget(key_t key, size_t size, int shmflg);
++long ksys_shmdt(char __user *shmaddr);
++long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
++long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
++ unsigned int nsops,
++ const struct old_timespec32 __user *timeout);
++
+ #endif
+diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
+index a87904daf103..ae31a7f87ec8 100644
+--- a/include/uapi/asm-generic/unistd.h
++++ b/include/uapi/asm-generic/unistd.h
+@@ -569,7 +569,7 @@ __SYSCALL(__NR_semget, sys_semget)
+ __SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
+ #if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
+ #define __NR_semtimedop 192
+-__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32)
++__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop)
+ #endif
+ #define __NR_semop 193
+ __SYSCALL(__NR_semop, sys_semop)
+diff --git a/include/uapi/linux/isdn/capicmd.h b/include/uapi/linux/isdn/capicmd.h
+index 4941628a4fb9..5ec88e7548a9 100644
+--- a/include/uapi/linux/isdn/capicmd.h
++++ b/include/uapi/linux/isdn/capicmd.h
+@@ -16,6 +16,7 @@
+ #define CAPI_MSG_BASELEN 8
+ #define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2)
+ #define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2)
++#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4)
+
+ /*----- CAPI commands -----*/
+ #define CAPI_ALERT 0x01
+diff --git a/ipc/util.h b/ipc/util.h
+index 0fcf8e719b76..5766c61aed0e 100644
+--- a/ipc/util.h
++++ b/ipc/util.h
+@@ -276,29 +276,7 @@ static inline int compat_ipc_parse_version(int *cmd)
+ *cmd &= ~IPC_64;
+ return version;
+ }
+-#endif
+
+-/* for __ARCH_WANT_SYS_IPC */
+-long ksys_semtimedop(int semid, struct sembuf __user *tsops,
+- unsigned int nsops,
+- const struct __kernel_timespec __user *timeout);
+-long ksys_semget(key_t key, int nsems, int semflg);
+-long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
+-long ksys_msgget(key_t key, int msgflg);
+-long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
+-long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
+- long msgtyp, int msgflg);
+-long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz,
+- int msgflg);
+-long ksys_shmget(key_t key, size_t size, int shmflg);
+-long ksys_shmdt(char __user *shmaddr);
+-long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
+-
+-/* for CONFIG_ARCH_WANT_OLD_COMPAT_IPC */
+-long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
+- unsigned int nsops,
+- const struct old_timespec32 __user *timeout);
+-#ifdef CONFIG_COMPAT
+ long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg);
+ long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr);
+ long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz,
+@@ -306,6 +284,7 @@ long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz,
+ long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp,
+ compat_ssize_t msgsz, int msgflg);
+ long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr);
+-#endif /* CONFIG_COMPAT */
++
++#endif
+
+ #endif
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index bf9dbffd46b1..d2cba714d3ee 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -5213,8 +5213,16 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
+ * if the parent has to be frozen, the child has too.
+ */
+ cgrp->freezer.e_freeze = parent->freezer.e_freeze;
+- if (cgrp->freezer.e_freeze)
++ if (cgrp->freezer.e_freeze) {
++ /*
++ * Set the CGRP_FREEZE flag, so when a process will be
++ * attached to the child cgroup, it will become frozen.
++ * At this point the new cgroup is unpopulated, so we can
++ * consider it frozen immediately.
++ */
++ set_bit(CGRP_FREEZE, &cgrp->flags);
+ set_bit(CGRP_FROZEN, &cgrp->flags);
++ }
+
+ spin_lock_irq(&css_set_lock);
+ for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
+diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
+index 95414ad3506a..98c04ca5fa43 100644
+--- a/kernel/irq/resend.c
++++ b/kernel/irq/resend.c
+@@ -36,6 +36,8 @@ static void resend_irqs(unsigned long arg)
+ irq = find_first_bit(irqs_resend, nr_irqs);
+ clear_bit(irq, irqs_resend);
+ desc = irq_to_desc(irq);
++ if (!desc)
++ continue;
+ local_irq_disable();
+ desc->handle_irq(desc);
+ local_irq_enable();
+diff --git a/kernel/module.c b/kernel/module.c
+index 8431c3d47c97..dcf2cc656e7c 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -64,14 +64,9 @@
+
+ /*
+ * Modules' sections will be aligned on page boundaries
+- * to ensure complete separation of code and data, but
+- * only when CONFIG_STRICT_MODULE_RWX=y
++ * to ensure complete separation of code and data
+ */
+-#ifdef CONFIG_STRICT_MODULE_RWX
+ # define debug_align(X) ALIGN(X, PAGE_SIZE)
+-#else
+-# define debug_align(X) (X)
+-#endif
+
+ /* If this is set, the section belongs in the init part of the module */
+ #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
+@@ -1697,6 +1692,8 @@ static int add_usage_links(struct module *mod)
+ return ret;
+ }
+
++static void module_remove_modinfo_attrs(struct module *mod, int end);
++
+ static int module_add_modinfo_attrs(struct module *mod)
+ {
+ struct module_attribute *attr;
+@@ -1711,24 +1708,34 @@ static int module_add_modinfo_attrs(struct module *mod)
+ return -ENOMEM;
+
+ temp_attr = mod->modinfo_attrs;
+- for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
++ for (i = 0; (attr = modinfo_attrs[i]); i++) {
+ if (!attr->test || attr->test(mod)) {
+ memcpy(temp_attr, attr, sizeof(*temp_attr));
+ sysfs_attr_init(&temp_attr->attr);
+ error = sysfs_create_file(&mod->mkobj.kobj,
+ &temp_attr->attr);
++ if (error)
++ goto error_out;
+ ++temp_attr;
+ }
+ }
++
++ return 0;
++
++error_out:
++ if (i > 0)
++ module_remove_modinfo_attrs(mod, --i);
+ return error;
+ }
+
+-static void module_remove_modinfo_attrs(struct module *mod)
++static void module_remove_modinfo_attrs(struct module *mod, int end)
+ {
+ struct module_attribute *attr;
+ int i;
+
+ for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
++ if (end >= 0 && i > end)
++ break;
+ /* pick a field to test for end of list */
+ if (!attr->attr.name)
+ break;
+@@ -1816,7 +1823,7 @@ static int mod_sysfs_setup(struct module *mod,
+ return 0;
+
+ out_unreg_modinfo_attrs:
+- module_remove_modinfo_attrs(mod);
++ module_remove_modinfo_attrs(mod, -1);
+ out_unreg_param:
+ module_param_sysfs_remove(mod);
+ out_unreg_holders:
+@@ -1852,7 +1859,7 @@ static void mod_sysfs_fini(struct module *mod)
+ {
+ }
+
+-static void module_remove_modinfo_attrs(struct module *mod)
++static void module_remove_modinfo_attrs(struct module *mod, int end)
+ {
+ }
+
+@@ -1868,14 +1875,14 @@ static void init_param_lock(struct module *mod)
+ static void mod_sysfs_teardown(struct module *mod)
+ {
+ del_usage_links(mod);
+- module_remove_modinfo_attrs(mod);
++ module_remove_modinfo_attrs(mod, -1);
+ module_param_sysfs_remove(mod);
+ kobject_put(mod->mkobj.drivers_dir);
+ kobject_put(mod->holders_dir);
+ mod_sysfs_fini(mod);
+ }
+
+-#ifdef CONFIG_STRICT_MODULE_RWX
++#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
+ /*
+ * LKM RO/NX protection: protect module's text/ro-data
+ * from modification and any data from execution.
+@@ -1898,6 +1905,7 @@ static void frob_text(const struct module_layout *layout,
+ layout->text_size >> PAGE_SHIFT);
+ }
+
++#ifdef CONFIG_STRICT_MODULE_RWX
+ static void frob_rodata(const struct module_layout *layout,
+ int (*set_memory)(unsigned long start, int num_pages))
+ {
+@@ -1949,13 +1957,9 @@ void module_enable_ro(const struct module *mod, bool after_init)
+ set_vm_flush_reset_perms(mod->core_layout.base);
+ set_vm_flush_reset_perms(mod->init_layout.base);
+ frob_text(&mod->core_layout, set_memory_ro);
+- frob_text(&mod->core_layout, set_memory_x);
+
+ frob_rodata(&mod->core_layout, set_memory_ro);
+-
+ frob_text(&mod->init_layout, set_memory_ro);
+- frob_text(&mod->init_layout, set_memory_x);
+-
+ frob_rodata(&mod->init_layout, set_memory_ro);
+
+ if (after_init)
+@@ -2014,9 +2018,19 @@ void set_all_modules_text_ro(void)
+ }
+ mutex_unlock(&module_mutex);
+ }
+-#else
++#else /* !CONFIG_STRICT_MODULE_RWX */
+ static void module_enable_nx(const struct module *mod) { }
+-#endif
++#endif /* CONFIG_STRICT_MODULE_RWX */
++static void module_enable_x(const struct module *mod)
++{
++ frob_text(&mod->core_layout, set_memory_x);
++ frob_text(&mod->init_layout, set_memory_x);
++}
++#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
++static void module_enable_nx(const struct module *mod) { }
++static void module_enable_x(const struct module *mod) { }
++#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
++
+
+ #ifdef CONFIG_LIVEPATCH
+ /*
+@@ -3614,6 +3628,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
+
+ module_enable_ro(mod, false);
+ module_enable_nx(mod);
++ module_enable_x(mod);
+
+ /* Mark state as coming so strong_try_module_get() ignores us,
+ * but kallsyms etc. can see us. */
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index 46686d0e3df8..8374b18ebe9a 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -1408,6 +1408,7 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
+ * should freak out.
+ */
+ WARN(1, "Z3fold is experiencing kref problems\n");
++ z3fold_page_unlock(zhdr);
+ return false;
+ }
+ z3fold_page_unlock(zhdr);
+@@ -1439,16 +1440,11 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
+ zhdr = page_address(page);
+ pool = zhdr_to_pool(zhdr);
+
+- if (!trylock_page(page))
+- return -EAGAIN;
+-
+ if (!z3fold_page_trylock(zhdr)) {
+- unlock_page(page);
+ return -EAGAIN;
+ }
+ if (zhdr->mapped_count != 0) {
+ z3fold_page_unlock(zhdr);
+- unlock_page(page);
+ return -EBUSY;
+ }
+ if (work_pending(&zhdr->work)) {
+@@ -1494,7 +1490,6 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
+ spin_unlock(&pool->lock);
+
+ page_mapcount_reset(page);
+- unlock_page(page);
+ put_page(page);
+ return 0;
+ }
+diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
+index bf6acd34234d..63f9c08625f0 100644
+--- a/net/bridge/br_mdb.c
++++ b/net/bridge/br_mdb.c
+@@ -437,7 +437,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
+ struct nlmsghdr *nlh;
+ struct nlattr *nest;
+
+- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
++ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
+ if (!nlh)
+ return -EMSGSIZE;
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 29fcff2c3d51..2ff556906b5d 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -8768,6 +8768,8 @@ int register_netdevice(struct net_device *dev)
+ ret = notifier_to_errno(ret);
+ if (ret) {
+ rollback_registered(dev);
++ rcu_barrier();
++
+ dev->reg_state = NETREG_UNREGISTERED;
+ }
+ /*
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index c8cd99c3603f..74efd63f15e2 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3531,6 +3531,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
+ int pos;
+ int dummy;
+
++ if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
++ (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
++ /* gso_size is untrusted, and we have a frag_list with a linear
++ * non head_frag head.
++ *
++ * (we assume checking the first list_skb member suffices;
++ * i.e if either of the list_skb members have non head_frag
++ * head, then the first one has too).
++ *
++ * If head_skb's headlen does not fit requested gso_size, it
++ * means that the frag_list members do NOT terminate on exact
++ * gso_size boundaries. Hence we cannot perform skb_frag_t page
++ * sharing. Therefore we must fallback to copying the frag_list
++ * skbs; we do so by disabling SG.
++ */
++ if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
++ features &= ~NETIF_F_SG;
++ }
++
+ __skb_push(head_skb, doffset);
+ proto = skb_network_protocol(head_skb, &dummy);
+ if (unlikely(!proto))
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 8a4a45e7c29d..3b14de0e36d2 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -661,6 +661,7 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
+ struct sock *sk, u64 flags)
+ {
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
++ struct inet_connection_sock *icsk = inet_csk(sk);
+ u32 key_size = map->key_size, hash;
+ struct bpf_htab_elem *elem, *elem_new;
+ struct bpf_htab_bucket *bucket;
+@@ -671,6 +672,8 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
+ WARN_ON_ONCE(!rcu_read_lock_held());
+ if (unlikely(flags > BPF_EXIST))
+ return -EINVAL;
++ if (unlikely(icsk->icsk_ulp_data))
++ return -EINVAL;
+
+ link = sk_psock_init_link();
+ if (!link)
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index d95ee40df6c2..21ed010d7551 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -266,7 +266,7 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
+
+ static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
+ {
+- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
++ tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
+ }
+
+ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
+diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
+index 87d2d8c1db7c..98ac32b49d8c 100644
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -223,7 +223,7 @@ static int __net_init ping_v6_proc_init_net(struct net *net)
+ return 0;
+ }
+
+-static void __net_init ping_v6_proc_exit_net(struct net *net)
++static void __net_exit ping_v6_proc_exit_net(struct net *net)
+ {
+ remove_proc_entry("icmp6", net->proc_net);
+ }
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 5f5a0a42ce60..6a6e403c71ac 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3841,13 +3841,14 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
+ struct fib6_config cfg = {
+ .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
+ .fc_ifindex = idev->dev->ifindex,
+- .fc_flags = RTF_UP | RTF_ADDRCONF | RTF_NONEXTHOP,
++ .fc_flags = RTF_UP | RTF_NONEXTHOP,
+ .fc_dst = *addr,
+ .fc_dst_len = 128,
+ .fc_protocol = RTPROT_KERNEL,
+ .fc_nlinfo.nl_net = net,
+ .fc_ignore_dev_down = true,
+ };
++ struct fib6_info *f6i;
+
+ if (anycast) {
+ cfg.fc_type = RTN_ANYCAST;
+@@ -3857,7 +3858,10 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
+ cfg.fc_flags |= RTF_LOCAL;
+ }
+
+- return ip6_route_info_create(&cfg, gfp_flags, NULL);
++ f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
++ if (!IS_ERR(f6i))
++ f6i->dst_nocount = true;
++ return f6i;
+ }
+
+ /* remove deleted ip from prefsrc entries */
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 137db1cbde85..ac28f6a5d70e 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -46,6 +46,8 @@ EXPORT_SYMBOL(default_qdisc_ops);
+ * - updates to tree and tree walking are only done under the rtnl mutex.
+ */
+
++#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
++
+ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
+ {
+ const struct netdev_queue *txq = q->dev_queue;
+@@ -71,7 +73,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
+ q->q.qlen--;
+ }
+ } else {
+- skb = NULL;
++ skb = SKB_XOFF_MAGIC;
+ }
+ }
+
+@@ -253,8 +255,11 @@ validate:
+ return skb;
+
+ skb = qdisc_dequeue_skb_bad_txq(q);
+- if (unlikely(skb))
++ if (unlikely(skb)) {
++ if (skb == SKB_XOFF_MAGIC)
++ return NULL;
+ goto bulk;
++ }
+ skb = q->dequeue(q);
+ if (skb) {
+ bulk:
+diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
+index cee6971c1c82..23cd1c873a2c 100644
+--- a/net/sched/sch_hhf.c
++++ b/net/sched/sch_hhf.c
+@@ -531,7 +531,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
+ new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
+
+ non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
+- if (non_hh_quantum > INT_MAX)
++ if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
+ return -EINVAL;
+
+ sch_tree_lock(sch);
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 23af232c0a25..e2b4a440416b 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -1336,7 +1336,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net)
+ return status;
+ }
+
+-static void __net_init sctp_ctrlsock_exit(struct net *net)
++static void __net_exit sctp_ctrlsock_exit(struct net *net)
+ {
+ /* Free the control endpoint. */
+ inet_ctl_sock_destroy(net->sctp.ctl_sock);
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index 1cf5bb5b73c4..e52b2128e43b 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -547,7 +547,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
+ if (net->sctp.pf_enable &&
+ (transport->state == SCTP_ACTIVE) &&
+ (transport->error_count < transport->pathmaxrxt) &&
+- (transport->error_count > asoc->pf_retrans)) {
++ (transport->error_count > transport->pf_retrans)) {
+
+ sctp_assoc_control_transport(asoc, transport,
+ SCTP_TRANSPORT_PF,
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index f33aa9ee9e27..d0324796f0b3 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -7176,7 +7176,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
+ val.spt_pathmaxrxt = trans->pathmaxrxt;
+ val.spt_pathpfthld = trans->pf_retrans;
+
+- return 0;
++ goto out;
+ }
+
+ asoc = sctp_id2assoc(sk, val.spt_assoc_id);
+@@ -7194,6 +7194,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
+ val.spt_pathmaxrxt = sp->pathmaxrxt;
+ }
+
++out:
+ if (put_user(len, optlen) || copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
+index 61219f0b9677..836e629e8f4a 100644
+--- a/net/tipc/name_distr.c
++++ b/net/tipc/name_distr.c
+@@ -223,7 +223,8 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
+ publ->key);
+ }
+
+- kfree_rcu(p, rcu);
++ if (p)
++ kfree_rcu(p, rcu);
+ }
+
+ /**
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-09-16 12:27 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-09-16 12:27 UTC (permalink / raw
To: gentoo-commits
commit: b4205fc874c8fda736c920edbb6ec18d708ecb78
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Sep 16 12:26:54 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Sep 16 12:26:54 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b4205fc8
Linux patch 5.2.15
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1014_linux-5.2.15.patch | 1595 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 1599 insertions(+)
diff --git a/0000_README b/0000_README
index 6458e28..e8d3287 100644
--- a/0000_README
+++ b/0000_README
@@ -99,6 +99,10 @@ Patch: 1013_linux-5.2.14.patch
From: https://www.kernel.org
Desc: Linux 5.2.14
+Patch: 1014_linux-5.2.15.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.15
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1014_linux-5.2.15.patch b/1014_linux-5.2.15.patch
new file mode 100644
index 0000000..19f9e8e
--- /dev/null
+++ b/1014_linux-5.2.15.patch
@@ -0,0 +1,1595 @@
+diff --git a/Makefile b/Makefile
+index d019994462ba..3c977aa66650 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index f0fbbf6a6a1f..4f9bfe9fd960 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
+ }
+ }
+
+-static bool tm_active_with_fp(struct task_struct *tsk)
+-{
+- return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
+- (tsk->thread.ckpt_regs.msr & MSR_FP);
+-}
+-
+-static bool tm_active_with_altivec(struct task_struct *tsk)
+-{
+- return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
+- (tsk->thread.ckpt_regs.msr & MSR_VEC);
+-}
+ #else
+ static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
+-static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
+-static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
+ #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
+ bool strict_msr_control;
+@@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
+
+ static int restore_fp(struct task_struct *tsk)
+ {
+- if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
++ if (tsk->thread.load_fp) {
+ load_fp_state(¤t->thread.fp_state);
+ current->thread.load_fp++;
+ return 1;
+@@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
+
+ static int restore_altivec(struct task_struct *tsk)
+ {
+- if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
+- (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
++ if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
+ load_vr_state(&tsk->thread.vr_state);
+ tsk->thread.used_vr = 1;
+ tsk->thread.load_vec++;
+@@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk)
+ if (!tsk->thread.regs)
+ return;
+
++ check_if_tm_restore_required(tsk);
++
+ usermsr = tsk->thread.regs->msr;
+
+ if ((usermsr & msr_all_available) == 0)
+ return;
+
+ msr_check_and_set(msr_all_available);
+- check_if_tm_restore_required(tsk);
+
+ WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
+
+diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
+index d4acf6fa0596..bf60983a58c7 100644
+--- a/arch/powerpc/mm/nohash/tlb.c
++++ b/arch/powerpc/mm/nohash/tlb.c
+@@ -630,7 +630,6 @@ static void early_init_this_mmu(void)
+ #ifdef CONFIG_PPC_FSL_BOOK3E
+ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
+ unsigned int num_cams;
+- int __maybe_unused cpu = smp_processor_id();
+ bool map = true;
+
+ /* use a quarter of the TLBCAM for bolted linear map */
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index cfe827cefad8..96d42f571a18 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -604,10 +604,9 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
+ u8 new_irqs;
+ int level, i;
+ u8 invert_irq_mask[MAX_BANK];
+- int reg_direction[MAX_BANK];
++ u8 reg_direction[MAX_BANK];
+
+- regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
+- NBANK(chip));
++ pca953x_read_regs(chip, chip->regs->direction, reg_direction);
+
+ if (chip->driver_data & PCA_PCAL) {
+ /* Enable latch on interrupt-enabled inputs */
+@@ -679,7 +678,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
+ bool pending_seen = false;
+ bool trigger_seen = false;
+ u8 trigger[MAX_BANK];
+- int reg_direction[MAX_BANK];
++ u8 reg_direction[MAX_BANK];
+ int ret, i;
+
+ if (chip->driver_data & PCA_PCAL) {
+@@ -710,8 +709,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
+ return false;
+
+ /* Remove output pins from the equation */
+- regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
+- NBANK(chip));
++ pca953x_read_regs(chip, chip->regs->direction, reg_direction);
+ for (i = 0; i < NBANK(chip); i++)
+ cur_stat[i] &= reg_direction[i];
+
+@@ -768,7 +766,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
+ {
+ struct i2c_client *client = chip->client;
+ struct irq_chip *irq_chip = &chip->irq_chip;
+- int reg_direction[MAX_BANK];
++ u8 reg_direction[MAX_BANK];
+ int ret, i;
+
+ if (!client->irq)
+@@ -789,8 +787,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
+ * interrupt. We have to rely on the previous read for
+ * this purpose.
+ */
+- regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction,
+- NBANK(chip));
++ pca953x_read_regs(chip, chip->regs->direction, reg_direction);
+ for (i = 0; i < NBANK(chip); i++)
+ chip->irq_stat[i] &= reg_direction[i];
+ mutex_init(&chip->irq_lock);
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 13d6bd4e17b2..cf748b80e640 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -2510,6 +2510,13 @@ enum i915_power_well_id {
+ #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
+
+ #define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
++#define RING_FORCE_TO_NONPRIV_RW (0 << 28) /* CFL+ & Gen11+ */
++#define RING_FORCE_TO_NONPRIV_RD (1 << 28)
++#define RING_FORCE_TO_NONPRIV_WR (2 << 28)
++#define RING_FORCE_TO_NONPRIV_RANGE_1 (0 << 0) /* CFL+ & Gen11+ */
++#define RING_FORCE_TO_NONPRIV_RANGE_4 (1 << 0)
++#define RING_FORCE_TO_NONPRIV_RANGE_16 (2 << 0)
++#define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0)
+ #define RING_MAX_NONPRIV_SLOTS 12
+
+ #define GEN7_TLB_RD_ADDR _MMIO(0x4700)
+diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
+index ae40a8679314..fd5236da039f 100644
+--- a/drivers/gpu/drm/i915/intel_cdclk.c
++++ b/drivers/gpu/drm/i915/intel_cdclk.c
+@@ -2269,6 +2269,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
+ if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
+ min_cdclk = max(2 * 96000, min_cdclk);
+
++ /*
++ * "For DP audio configuration, cdclk frequency shall be set to
++ * meet the following requirements:
++ * DP Link Frequency(MHz) | Cdclk frequency(MHz)
++ * 270 | 320 or higher
++ * 162 | 200 or higher"
++ */
++ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
++ intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
++ min_cdclk = max(crtc_state->port_clock, min_cdclk);
++
+ /*
+ * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
+ * than 320000KHz.
+diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
+index 841b8e515f4d..edd57a5e0495 100644
+--- a/drivers/gpu/drm/i915/intel_workarounds.c
++++ b/drivers/gpu/drm/i915/intel_workarounds.c
+@@ -981,7 +981,7 @@ bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
+ }
+
+ static void
+-whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
++whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
+ {
+ struct i915_wa wa = {
+ .reg = reg
+@@ -990,9 +990,16 @@ whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+ if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
+ return;
+
++ wa.reg.reg |= flags;
+ _wa_add(wal, &wa);
+ }
+
++static void
++whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
++{
++ whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_RW);
++}
++
+ static void gen9_whitelist_build(struct i915_wa_list *w)
+ {
+ /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
+@@ -1005,56 +1012,131 @@ static void gen9_whitelist_build(struct i915_wa_list *w)
+ whitelist_reg(w, GEN8_HDC_CHICKEN1);
+ }
+
+-static void skl_whitelist_build(struct i915_wa_list *w)
++static void skl_whitelist_build(struct intel_engine_cs *engine)
+ {
++ struct i915_wa_list *w = &engine->whitelist;
++
++ if (engine->class != RENDER_CLASS)
++ return;
++
+ gen9_whitelist_build(w);
+
+ /* WaDisableLSQCROPERFforOCL:skl */
+ whitelist_reg(w, GEN8_L3SQCREG4);
+ }
+
+-static void bxt_whitelist_build(struct i915_wa_list *w)
++static void bxt_whitelist_build(struct intel_engine_cs *engine)
+ {
+- gen9_whitelist_build(w);
++ if (engine->class != RENDER_CLASS)
++ return;
++
++ gen9_whitelist_build(&engine->whitelist);
+ }
+
+-static void kbl_whitelist_build(struct i915_wa_list *w)
++static void kbl_whitelist_build(struct intel_engine_cs *engine)
+ {
++ struct i915_wa_list *w = &engine->whitelist;
++
++ if (engine->class != RENDER_CLASS)
++ return;
++
+ gen9_whitelist_build(w);
+
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ whitelist_reg(w, GEN8_L3SQCREG4);
+ }
+
+-static void glk_whitelist_build(struct i915_wa_list *w)
++static void glk_whitelist_build(struct intel_engine_cs *engine)
+ {
++ struct i915_wa_list *w = &engine->whitelist;
++
++ if (engine->class != RENDER_CLASS)
++ return;
++
+ gen9_whitelist_build(w);
+
+ /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
+ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+ }
+
+-static void cfl_whitelist_build(struct i915_wa_list *w)
++static void cfl_whitelist_build(struct intel_engine_cs *engine)
+ {
++ struct i915_wa_list *w = &engine->whitelist;
++
++ if (engine->class != RENDER_CLASS)
++ return;
++
+ gen9_whitelist_build(w);
++
++ /*
++ * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
++ *
++ * This covers 4 register which are next to one another :
++ * - PS_INVOCATION_COUNT
++ * - PS_INVOCATION_COUNT_UDW
++ * - PS_DEPTH_COUNT
++ * - PS_DEPTH_COUNT_UDW
++ */
++ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
++ RING_FORCE_TO_NONPRIV_RD |
++ RING_FORCE_TO_NONPRIV_RANGE_4);
+ }
+
+-static void cnl_whitelist_build(struct i915_wa_list *w)
++static void cnl_whitelist_build(struct intel_engine_cs *engine)
+ {
++ struct i915_wa_list *w = &engine->whitelist;
++
++ if (engine->class != RENDER_CLASS)
++ return;
++
+ /* WaEnablePreemptionGranularityControlByUMD:cnl */
+ whitelist_reg(w, GEN8_CS_CHICKEN1);
+ }
+
+-static void icl_whitelist_build(struct i915_wa_list *w)
++static void icl_whitelist_build(struct intel_engine_cs *engine)
+ {
+- /* WaAllowUMDToModifyHalfSliceChicken7:icl */
+- whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
++ struct i915_wa_list *w = &engine->whitelist;
+
+- /* WaAllowUMDToModifySamplerMode:icl */
+- whitelist_reg(w, GEN10_SAMPLER_MODE);
++ switch (engine->class) {
++ case RENDER_CLASS:
++ /* WaAllowUMDToModifyHalfSliceChicken7:icl */
++ whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
+
+- /* WaEnableStateCacheRedirectToCS:icl */
+- whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
++ /* WaAllowUMDToModifySamplerMode:icl */
++ whitelist_reg(w, GEN10_SAMPLER_MODE);
++
++ /* WaEnableStateCacheRedirectToCS:icl */
++ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
++
++ /*
++ * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
++ *
++ * This covers 4 register which are next to one another :
++ * - PS_INVOCATION_COUNT
++ * - PS_INVOCATION_COUNT_UDW
++ * - PS_DEPTH_COUNT
++ * - PS_DEPTH_COUNT_UDW
++ */
++ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
++ RING_FORCE_TO_NONPRIV_RD |
++ RING_FORCE_TO_NONPRIV_RANGE_4);
++ break;
++
++ case VIDEO_DECODE_CLASS:
++ /* hucStatusRegOffset */
++ whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
++ RING_FORCE_TO_NONPRIV_RD);
++ /* hucUKernelHdrInfoRegOffset */
++ whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
++ RING_FORCE_TO_NONPRIV_RD);
++ /* hucStatus2RegOffset */
++ whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
++ RING_FORCE_TO_NONPRIV_RD);
++ break;
++
++ default:
++ break;
++ }
+ }
+
+ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
+@@ -1062,24 +1144,22 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *w = &engine->whitelist;
+
+- GEM_BUG_ON(engine->id != RCS0);
+-
+ wa_init_start(w, "whitelist");
+
+ if (IS_GEN(i915, 11))
+- icl_whitelist_build(w);
++ icl_whitelist_build(engine);
+ else if (IS_CANNONLAKE(i915))
+- cnl_whitelist_build(w);
++ cnl_whitelist_build(engine);
+ else if (IS_COFFEELAKE(i915))
+- cfl_whitelist_build(w);
++ cfl_whitelist_build(engine);
+ else if (IS_GEMINILAKE(i915))
+- glk_whitelist_build(w);
++ glk_whitelist_build(engine);
+ else if (IS_KABYLAKE(i915))
+- kbl_whitelist_build(w);
++ kbl_whitelist_build(engine);
+ else if (IS_BROXTON(i915))
+- bxt_whitelist_build(w);
++ bxt_whitelist_build(engine);
+ else if (IS_SKYLAKE(i915))
+- skl_whitelist_build(w);
++ skl_whitelist_build(engine);
+ else if (INTEL_GEN(i915) <= 8)
+ return;
+ else
+@@ -1167,8 +1247,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+- GEN7_DISABLE_DEMAND_PREFETCH |
+- GEN7_DISABLE_SAMPLER_PREFETCH);
++ GEN7_DISABLE_DEMAND_PREFETCH);
++
++ /* Wa_1606682166:icl */
++ wa_write_or(wal,
++ GEN7_SARCHKMD,
++ GEN7_DISABLE_SAMPLER_PREFETCH);
+ }
+
+ if (IS_GEN_RANGE(i915, 9, 11)) {
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+index 84a2f243ed9b..4695f1c8e33f 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+@@ -190,6 +190,9 @@ MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
+ MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
+ MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
+ MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
++MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
++MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
++MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
+ MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
+ MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
+ MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
+@@ -210,6 +213,9 @@ MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
+ MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
+ MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
+ MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
++MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
++MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
++MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
+ MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
+ MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
+ MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
+@@ -230,6 +236,9 @@ MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
+ MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
+ MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
+ MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
++MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
++MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
++MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
+ MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
+ MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
+ MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
+@@ -250,3 +259,6 @@ MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
+ MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
+ MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
+ MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
++MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
++MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
++MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+index 59e9d05ab928..0af048d1a815 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+@@ -353,7 +353,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
+ !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
+ if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
+ kfree(reply);
+-
++ reply = NULL;
+ if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
+ /* A checkpoint occurred. Retry. */
+ continue;
+@@ -377,7 +377,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
+
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
+ kfree(reply);
+-
++ reply = NULL;
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
+ /* A checkpoint occurred. Retry. */
+ continue;
+@@ -389,10 +389,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
+ break;
+ }
+
+- if (retries == RETRIES) {
+- kfree(reply);
++ if (!reply)
+ return -EINVAL;
+- }
+
+ *msg_len = reply_len;
+ *msg = reply;
+diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
+index 7c8cfb149da0..5c0d90418e8c 100644
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -1830,23 +1830,13 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
+ }
+
+ while (qp->s_last != qp->s_acked) {
+- u32 s_last;
+-
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+ if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+ break;
+ trdma_clean_swqe(qp, wqe);
+- rvt_qp_wqe_unreserve(qp, wqe);
+- s_last = qp->s_last;
+- trace_hfi1_qp_send_completion(qp, wqe, s_last);
+- if (++s_last >= qp->s_size)
+- s_last = 0;
+- qp->s_last = s_last;
+- /* see post_send() */
+- barrier();
+- rvt_put_qp_swqe(qp, wqe);
+- rvt_qp_swqe_complete(qp,
++ trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
++ rvt_qp_complete_swqe(qp,
+ wqe,
+ ib_hfi1_wc_opcode[wqe->wr.opcode],
+ IB_WC_SUCCESS);
+@@ -1890,19 +1880,9 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
+ trace_hfi1_rc_completion(qp, wqe->lpsn);
+ if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+- u32 s_last;
+-
+ trdma_clean_swqe(qp, wqe);
+- rvt_put_qp_swqe(qp, wqe);
+- rvt_qp_wqe_unreserve(qp, wqe);
+- s_last = qp->s_last;
+- trace_hfi1_qp_send_completion(qp, wqe, s_last);
+- if (++s_last >= qp->s_size)
+- s_last = 0;
+- qp->s_last = s_last;
+- /* see post_send() */
+- barrier();
+- rvt_qp_swqe_complete(qp,
++ trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
++ rvt_qp_complete_swqe(qp,
+ wqe,
+ ib_hfi1_wc_opcode[wqe->wr.opcode],
+ IB_WC_SUCCESS);
+diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
+index 2ac4c67f5ba1..8d9a94d6f685 100644
+--- a/drivers/infiniband/hw/qib/qib_rc.c
++++ b/drivers/infiniband/hw/qib/qib_rc.c
+@@ -921,20 +921,11 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
+ rvt_add_retry_timer(qp);
+
+ while (qp->s_last != qp->s_acked) {
+- u32 s_last;
+-
+ wqe = rvt_get_swqe_ptr(qp, qp->s_last);
+ if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+ qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+ break;
+- s_last = qp->s_last;
+- if (++s_last >= qp->s_size)
+- s_last = 0;
+- qp->s_last = s_last;
+- /* see post_send() */
+- barrier();
+- rvt_put_qp_swqe(qp, wqe);
+- rvt_qp_swqe_complete(qp,
++ rvt_qp_complete_swqe(qp,
+ wqe,
+ ib_qib_wc_opcode[wqe->wr.opcode],
+ IB_WC_SUCCESS);
+@@ -972,21 +963,12 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
+ * is finished.
+ */
+ if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
+- qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
+- u32 s_last;
+-
+- rvt_put_qp_swqe(qp, wqe);
+- s_last = qp->s_last;
+- if (++s_last >= qp->s_size)
+- s_last = 0;
+- qp->s_last = s_last;
+- /* see post_send() */
+- barrier();
+- rvt_qp_swqe_complete(qp,
++ qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0)
++ rvt_qp_complete_swqe(qp,
+ wqe,
+ ib_qib_wc_opcode[wqe->wr.opcode],
+ IB_WC_SUCCESS);
+- } else
++ else
+ this_cpu_inc(*ibp->rvp.rc_delayed_comp);
+
+ qp->s_retry = qp->s_retry_cnt;
+diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
+index c5a50614a6c6..cb9e171d7e7b 100644
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -1856,10 +1856,9 @@ static inline int rvt_qp_is_avail(
+
+ /* see rvt_qp_wqe_unreserve() */
+ smp_mb__before_atomic();
+- reserved_used = atomic_read(&qp->s_reserved_used);
+ if (unlikely(reserved_op)) {
+ /* see rvt_qp_wqe_unreserve() */
+- smp_mb__before_atomic();
++ reserved_used = atomic_read(&qp->s_reserved_used);
+ if (reserved_used >= rdi->dparms.reserved_operations)
+ return -ENOMEM;
+ return 0;
+@@ -1867,14 +1866,13 @@ static inline int rvt_qp_is_avail(
+ /* non-reserved operations */
+ if (likely(qp->s_avail))
+ return 0;
+- slast = READ_ONCE(qp->s_last);
++ /* See rvt_qp_complete_swqe() */
++ slast = smp_load_acquire(&qp->s_last);
+ if (qp->s_head >= slast)
+ avail = qp->s_size - (qp->s_head - slast);
+ else
+ avail = slast - qp->s_head;
+
+- /* see rvt_qp_wqe_unreserve() */
+- smp_mb__before_atomic();
+ reserved_used = atomic_read(&qp->s_reserved_used);
+ avail = avail - 1 -
+ (rdi->dparms.reserved_operations - reserved_used);
+@@ -2667,27 +2665,16 @@ void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ enum ib_wc_status status)
+ {
+ u32 old_last, last;
+- struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
++ struct rvt_dev_info *rdi;
+
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
+ return;
++ rdi = ib_to_rvt(qp->ibqp.device);
+
+- last = qp->s_last;
+- old_last = last;
+- trace_rvt_qp_send_completion(qp, wqe, last);
+- if (++last >= qp->s_size)
+- last = 0;
+- trace_rvt_qp_send_completion(qp, wqe, last);
+- qp->s_last = last;
+- /* See post_send() */
+- barrier();
+- rvt_put_qp_swqe(qp, wqe);
+-
+- rvt_qp_swqe_complete(qp,
+- wqe,
+- rdi->wc_opcode[wqe->wr.opcode],
+- status);
+-
++ old_last = qp->s_last;
++ trace_rvt_qp_send_completion(qp, wqe, old_last);
++ last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
++ status);
+ if (qp->s_acked == old_last)
+ qp->s_acked = last;
+ if (qp->s_cur == old_last)
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 773f5fdad25f..5cf3247e8afb 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -35,7 +35,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/sched/clock.h>
+ #include <linux/rculist.h>
+-
++#include <linux/delay.h>
+ #include <trace/events/bcache.h>
+
+ /*
+@@ -655,7 +655,25 @@ static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
+ up(&b->io_mutex);
+ }
+
++retry:
++ /*
++ * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
++ * __bch_btree_node_write(). To avoid an extra flush, acquire
++ * b->write_lock before checking BTREE_NODE_dirty bit.
++ */
+ mutex_lock(&b->write_lock);
++ /*
++ * If this btree node is selected in btree_flush_write() by journal
++ * code, delay and retry until the node is flushed by journal code
++ * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
++ */
++ if (btree_node_journal_flush(b)) {
++ pr_debug("bnode %p is flushing by journal, retry", b);
++ mutex_unlock(&b->write_lock);
++ udelay(1);
++ goto retry;
++ }
++
+ if (btree_node_dirty(b))
+ __bch_btree_node_write(b, &cl);
+ mutex_unlock(&b->write_lock);
+@@ -778,10 +796,15 @@ void bch_btree_cache_free(struct cache_set *c)
+ while (!list_empty(&c->btree_cache)) {
+ b = list_first_entry(&c->btree_cache, struct btree, list);
+
+- if (btree_node_dirty(b))
++ /*
++ * This function is called by cache_set_free(), no I/O
++ * request on cache now, it is unnecessary to acquire
++ * b->write_lock before clearing BTREE_NODE_dirty anymore.
++ */
++ if (btree_node_dirty(b)) {
+ btree_complete_write(b, btree_current_write(b));
+- clear_bit(BTREE_NODE_dirty, &b->flags);
+-
++ clear_bit(BTREE_NODE_dirty, &b->flags);
++ }
+ mca_data_free(b);
+ }
+
+@@ -1067,11 +1090,25 @@ static void btree_node_free(struct btree *b)
+
+ BUG_ON(b == b->c->root);
+
++retry:
+ mutex_lock(&b->write_lock);
++ /*
++ * If the btree node is selected and flushing in btree_flush_write(),
++ * delay and retry until the BTREE_NODE_journal_flush bit cleared,
++ * then it is safe to free the btree node here. Otherwise this btree
++ * node will be in race condition.
++ */
++ if (btree_node_journal_flush(b)) {
++ mutex_unlock(&b->write_lock);
++ pr_debug("bnode %p journal_flush set, retry", b);
++ udelay(1);
++ goto retry;
++ }
+
+- if (btree_node_dirty(b))
++ if (btree_node_dirty(b)) {
+ btree_complete_write(b, btree_current_write(b));
+- clear_bit(BTREE_NODE_dirty, &b->flags);
++ clear_bit(BTREE_NODE_dirty, &b->flags);
++ }
+
+ mutex_unlock(&b->write_lock);
+
+diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
+index d1c72ef64edf..76cfd121a486 100644
+--- a/drivers/md/bcache/btree.h
++++ b/drivers/md/bcache/btree.h
+@@ -158,11 +158,13 @@ enum btree_flags {
+ BTREE_NODE_io_error,
+ BTREE_NODE_dirty,
+ BTREE_NODE_write_idx,
++ BTREE_NODE_journal_flush,
+ };
+
+ BTREE_FLAG(io_error);
+ BTREE_FLAG(dirty);
+ BTREE_FLAG(write_idx);
++BTREE_FLAG(journal_flush);
+
+ static inline struct btree_write *btree_current_write(struct btree *b)
+ {
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index cae2aff5e27a..33556acdcf9c 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -405,6 +405,7 @@ static void btree_flush_write(struct cache_set *c)
+ retry:
+ best = NULL;
+
++ mutex_lock(&c->bucket_lock);
+ for_each_cached_btree(b, c, i)
+ if (btree_current_write(b)->journal) {
+ if (!best)
+@@ -417,9 +418,14 @@ retry:
+ }
+
+ b = best;
++ if (b)
++ set_btree_node_journal_flush(b);
++ mutex_unlock(&c->bucket_lock);
++
+ if (b) {
+ mutex_lock(&b->write_lock);
+ if (!btree_current_write(b)->journal) {
++ clear_bit(BTREE_NODE_journal_flush, &b->flags);
+ mutex_unlock(&b->write_lock);
+ /* We raced */
+ atomic_long_inc(&c->retry_flush_write);
+@@ -427,6 +433,7 @@ retry:
+ }
+
+ __bch_btree_node_write(b, NULL);
++ clear_bit(BTREE_NODE_journal_flush, &b->flags);
+ mutex_unlock(&b->write_lock);
+ }
+ }
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index b3a130a9ee23..1604f512c7bd 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -883,7 +883,7 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
+
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
+- return sdhci_runtime_resume_host(c->host);
++ return sdhci_runtime_resume_host(c->host, 0);
+ }
+
+ #endif
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index c391510e9ef4..776a94216248 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -1705,7 +1705,7 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
+ esdhc_pltfm_set_clock(host, imx_data->actual_clock);
+ }
+
+- err = sdhci_runtime_resume_host(host);
++ err = sdhci_runtime_resume_host(host, 0);
+ if (err)
+ goto disable_ipg_clk;
+
+diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
+index d4993582f0f6..e7d1920729fb 100644
+--- a/drivers/mmc/host/sdhci-of-at91.c
++++ b/drivers/mmc/host/sdhci-of-at91.c
+@@ -289,7 +289,7 @@ static int sdhci_at91_runtime_resume(struct device *dev)
+ }
+
+ out:
+- return sdhci_runtime_resume_host(host);
++ return sdhci_runtime_resume_host(host, 0);
+ }
+ #endif /* CONFIG_PM */
+
+diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
+index 4154ee11b47d..267b90374fa4 100644
+--- a/drivers/mmc/host/sdhci-pci-core.c
++++ b/drivers/mmc/host/sdhci-pci-core.c
+@@ -167,7 +167,7 @@ static int sdhci_pci_runtime_suspend_host(struct sdhci_pci_chip *chip)
+
+ err_pci_runtime_suspend:
+ while (--i >= 0)
+- sdhci_runtime_resume_host(chip->slots[i]->host);
++ sdhci_runtime_resume_host(chip->slots[i]->host, 0);
+ return ret;
+ }
+
+@@ -181,7 +181,7 @@ static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
+ if (!slot)
+ continue;
+
+- ret = sdhci_runtime_resume_host(slot->host);
++ ret = sdhci_runtime_resume_host(slot->host, 0);
+ if (ret)
+ return ret;
+ }
+diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
+index 3ddecf479295..e55037ceda73 100644
+--- a/drivers/mmc/host/sdhci-pxav3.c
++++ b/drivers/mmc/host/sdhci-pxav3.c
+@@ -554,7 +554,7 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
+ if (!IS_ERR(pxa->clk_core))
+ clk_prepare_enable(pxa->clk_core);
+
+- return sdhci_runtime_resume_host(host);
++ return sdhci_runtime_resume_host(host, 0);
+ }
+ #endif
+
+diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
+index 8e4a8ba33f05..f5753aef7151 100644
+--- a/drivers/mmc/host/sdhci-s3c.c
++++ b/drivers/mmc/host/sdhci-s3c.c
+@@ -745,7 +745,7 @@ static int sdhci_s3c_runtime_resume(struct device *dev)
+ clk_prepare_enable(busclk);
+ if (ourhost->cur_clk >= 0)
+ clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]);
+- ret = sdhci_runtime_resume_host(host);
++ ret = sdhci_runtime_resume_host(host, 0);
+ return ret;
+ }
+ #endif
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index fc892a8d882f..53f3af53b3fb 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -497,7 +497,7 @@ static int sdhci_sprd_runtime_resume(struct device *dev)
+ return ret;
+ }
+
+- sdhci_runtime_resume_host(host);
++ sdhci_runtime_resume_host(host, 1);
+
+ return 0;
+ }
+diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
+index 8a18f14cf842..1dea1ba66f7b 100644
+--- a/drivers/mmc/host/sdhci-xenon.c
++++ b/drivers/mmc/host/sdhci-xenon.c
+@@ -638,7 +638,7 @@ static int xenon_runtime_resume(struct device *dev)
+ priv->restore_needed = false;
+ }
+
+- ret = sdhci_runtime_resume_host(host);
++ ret = sdhci_runtime_resume_host(host, 0);
+ if (ret)
+ goto out;
+ return 0;
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 59acf8e3331e..a5dc5aae973e 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3320,7 +3320,7 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host)
+ }
+ EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
+
+-int sdhci_runtime_resume_host(struct sdhci_host *host)
++int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
+ {
+ struct mmc_host *mmc = host->mmc;
+ unsigned long flags;
+@@ -3331,7 +3331,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
+ host->ops->enable_dma(host);
+ }
+
+- sdhci_init(host, 0);
++ sdhci_init(host, soft_reset);
+
+ if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
+ mmc->ios.power_mode != MMC_POWER_OFF) {
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 199712e7adbb..d2c7c9c436c9 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -781,7 +781,7 @@ void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
+ int sdhci_suspend_host(struct sdhci_host *host);
+ int sdhci_resume_host(struct sdhci_host *host);
+ int sdhci_runtime_suspend_host(struct sdhci_host *host);
+-int sdhci_runtime_resume_host(struct sdhci_host *host);
++int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset);
+ #endif
+
+ void sdhci_cqe_enable(struct mmc_host *mmc);
+diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
+index 6a3076881321..8d47ad61bac3 100644
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -132,6 +132,7 @@ struct airq_info {
+ struct airq_iv *aiv;
+ };
+ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
++static DEFINE_MUTEX(airq_areas_lock);
+
+ #define CCW_CMD_SET_VQ 0x13
+ #define CCW_CMD_VDEV_RESET 0x33
+@@ -244,9 +245,11 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
+ unsigned long bit, flags;
+
+ for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
++ mutex_lock(&airq_areas_lock);
+ if (!airq_areas[i])
+ airq_areas[i] = new_airq_info();
+ info = airq_areas[i];
++ mutex_unlock(&airq_areas_lock);
+ if (!info)
+ return 0;
+ write_lock_irqsave(&info->lock, flags);
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index ceec8d5985d4..5faae96735e6 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -13,6 +13,7 @@
+ #include <linux/usb/of.h>
+ #include <linux/clk.h>
+ #include <linux/pinctrl/consumer.h>
++#include <linux/pm_qos.h>
+
+ #include "ci.h"
+ #include "ci_hdrc_imx.h"
+@@ -63,6 +64,11 @@ static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = {
+ .flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
+ };
+
++static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = {
++ .flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
++ CI_HDRC_PMQOS,
++};
++
+ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ { .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
+ { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
+@@ -72,6 +78,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ { .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data},
+ { .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data},
+ { .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data},
++ { .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data},
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
+@@ -93,6 +100,8 @@ struct ci_hdrc_imx_data {
+ struct clk *clk_ahb;
+ struct clk *clk_per;
+ /* --------------------------------- */
++ struct pm_qos_request pm_qos_req;
++ const struct ci_hdrc_imx_platform_flag *plat_data;
+ };
+
+ /* Common functions shared by usbmisc drivers */
+@@ -309,6 +318,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ if (!data)
+ return -ENOMEM;
+
++ data->plat_data = imx_platform_flag;
++ pdata.flags |= imx_platform_flag->flags;
+ platform_set_drvdata(pdev, data);
+ data->usbmisc_data = usbmisc_get_init_data(dev);
+ if (IS_ERR(data->usbmisc_data))
+@@ -369,6 +380,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ }
+ }
+ }
++
++ if (pdata.flags & CI_HDRC_PMQOS)
++ pm_qos_add_request(&data->pm_qos_req,
++ PM_QOS_CPU_DMA_LATENCY, 0);
++
+ ret = imx_get_clks(dev);
+ if (ret)
+ goto disable_hsic_regulator;
+@@ -396,7 +412,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ usb_phy_init(pdata.usb_phy);
+ }
+
+- pdata.flags |= imx_platform_flag->flags;
+ if (pdata.flags & CI_HDRC_SUPPORTS_RUNTIME_PM)
+ data->supports_runtime_pm = true;
+
+@@ -438,7 +453,11 @@ err_clk:
+ imx_disable_unprepare_clks(dev);
+ disable_hsic_regulator:
+ if (data->hsic_pad_regulator)
+- ret = regulator_disable(data->hsic_pad_regulator);
++ /* don't overwrite original ret (cf. EPROBE_DEFER) */
++ regulator_disable(data->hsic_pad_regulator);
++ if (pdata.flags & CI_HDRC_PMQOS)
++ pm_qos_remove_request(&data->pm_qos_req);
++ data->ci_pdev = NULL;
+ return ret;
+ }
+
+@@ -451,12 +470,17 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ }
+- ci_hdrc_remove_device(data->ci_pdev);
++ if (data->ci_pdev)
++ ci_hdrc_remove_device(data->ci_pdev);
+ if (data->override_phy_control)
+ usb_phy_shutdown(data->phy);
+- imx_disable_unprepare_clks(&pdev->dev);
+- if (data->hsic_pad_regulator)
+- regulator_disable(data->hsic_pad_regulator);
++ if (data->ci_pdev) {
++ imx_disable_unprepare_clks(&pdev->dev);
++ if (data->plat_data->flags & CI_HDRC_PMQOS)
++ pm_qos_remove_request(&data->pm_qos_req);
++ if (data->hsic_pad_regulator)
++ regulator_disable(data->hsic_pad_regulator);
++ }
+
+ return 0;
+ }
+@@ -480,6 +504,9 @@ static int __maybe_unused imx_controller_suspend(struct device *dev)
+ }
+
+ imx_disable_unprepare_clks(dev);
++ if (data->plat_data->flags & CI_HDRC_PMQOS)
++ pm_qos_remove_request(&data->pm_qos_req);
++
+ data->in_lpm = true;
+
+ return 0;
+@@ -497,6 +524,10 @@ static int __maybe_unused imx_controller_resume(struct device *dev)
+ return 0;
+ }
+
++ if (data->plat_data->flags & CI_HDRC_PMQOS)
++ pm_qos_add_request(&data->pm_qos_req,
++ PM_QOS_CPU_DMA_LATENCY, 0);
++
+ ret = imx_prepare_enable_clks(dev);
+ if (ret)
+ return ret;
+diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
+index d8b67e150b12..b7a5727d0c8a 100644
+--- a/drivers/usb/chipidea/usbmisc_imx.c
++++ b/drivers/usb/chipidea/usbmisc_imx.c
+@@ -763,6 +763,10 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
+ .compatible = "fsl,imx7d-usbmisc",
+ .data = &imx7d_usbmisc_ops,
+ },
++ {
++ .compatible = "fsl,imx7ulp-usbmisc",
++ .data = &imx7d_usbmisc_ops,
++ },
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, usbmisc_imx_dt_ids);
+diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
+index 9e90e969af55..7804869c6a31 100644
+--- a/drivers/vhost/test.c
++++ b/drivers/vhost/test.c
+@@ -22,6 +22,12 @@
+ * Using this limit prevents one virtqueue from starving others. */
+ #define VHOST_TEST_WEIGHT 0x80000
+
++/* Max number of packets transferred before requeueing the job.
++ * Using this limit prevents one virtqueue from starving others with
++ * pkts.
++ */
++#define VHOST_TEST_PKT_WEIGHT 256
++
+ enum {
+ VHOST_TEST_VQ = 0,
+ VHOST_TEST_VQ_MAX = 1,
+@@ -80,10 +86,8 @@ static void handle_vq(struct vhost_test *n)
+ }
+ vhost_add_used_and_signal(&n->dev, vq, head, 0);
+ total_len += len;
+- if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
+- vhost_poll_queue(&vq->poll);
++ if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
+ break;
+- }
+ }
+
+ mutex_unlock(&vq->mutex);
+@@ -115,7 +119,8 @@ static int vhost_test_open(struct inode *inode, struct file *f)
+ dev = &n->dev;
+ vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
+ n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
+- vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
++ vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
++ VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
+
+ f->private_data = n;
+
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index e995c12d8e24..fcd8bf2846fc 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -2072,7 +2072,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
+ /* If this is an input descriptor, increment that count. */
+ if (access == VHOST_ACCESS_WO) {
+ *in_num += ret;
+- if (unlikely(log)) {
++ if (unlikely(log && ret)) {
+ log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
+ log[*log_num].len = vhost32_to_cpu(vq, desc.len);
+ ++*log_num;
+@@ -2215,7 +2215,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
+ /* If this is an input descriptor,
+ * increment that count. */
+ *in_num += ret;
+- if (unlikely(log)) {
++ if (unlikely(log && ret)) {
+ log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
+ log[*log_num].len = vhost32_to_cpu(vq, desc.len);
+ ++*log_num;
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index db337e53aab3..93900ff87df7 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -3591,6 +3591,13 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
+ TASK_UNINTERRUPTIBLE);
+ }
+
++static void end_extent_buffer_writeback(struct extent_buffer *eb)
++{
++ clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
++ smp_mb__after_atomic();
++ wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
++}
++
+ /*
+ * Lock eb pages and flush the bio if we can't the locks
+ *
+@@ -3662,8 +3669,11 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
+
+ if (!trylock_page(p)) {
+ if (!flush) {
+- ret = flush_write_bio(epd);
+- if (ret < 0) {
++ int err;
++
++ err = flush_write_bio(epd);
++ if (err < 0) {
++ ret = err;
+ failed_page_nr = i;
+ goto err_unlock;
+ }
+@@ -3678,16 +3688,23 @@ err_unlock:
+ /* Unlock already locked pages */
+ for (i = 0; i < failed_page_nr; i++)
+ unlock_page(eb->pages[i]);
++ /*
++ * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
++ * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
++ * be made and undo everything done before.
++ */
++ btrfs_tree_lock(eb);
++ spin_lock(&eb->refs_lock);
++ set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
++ end_extent_buffer_writeback(eb);
++ spin_unlock(&eb->refs_lock);
++ percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
++ fs_info->dirty_metadata_batch);
++ btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
++ btrfs_tree_unlock(eb);
+ return ret;
+ }
+
+-static void end_extent_buffer_writeback(struct extent_buffer *eb)
+-{
+- clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+- smp_mb__after_atomic();
+- wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
+-}
+-
+ static void set_btree_ioerr(struct page *page)
+ {
+ struct extent_buffer *eb = (struct extent_buffer *)page->private;
+diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
+index 911e05af671e..edd89b7c8f18 100644
+--- a/include/linux/usb/chipidea.h
++++ b/include/linux/usb/chipidea.h
+@@ -61,6 +61,7 @@ struct ci_hdrc_platform_data {
+ #define CI_HDRC_OVERRIDE_PHY_CONTROL BIT(12) /* Glue layer manages phy */
+ #define CI_HDRC_REQUIRES_ALIGNED_DMA BIT(13)
+ #define CI_HDRC_IMX_IS_HSIC BIT(14)
++#define CI_HDRC_PMQOS BIT(15)
+ enum usb_dr_mode dr_mode;
+ #define CI_HDRC_CONTROLLER_RESET_EVENT 0
+ #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
+diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
+index 68e38c20afc0..85544777587d 100644
+--- a/include/rdma/rdmavt_qp.h
++++ b/include/rdma/rdmavt_qp.h
+@@ -540,7 +540,7 @@ static inline void rvt_qp_wqe_reserve(
+ /**
+ * rvt_qp_wqe_unreserve - clean reserved operation
+ * @qp - the rvt qp
+- * @wqe - the send wqe
++ * @flags - send wqe flags
+ *
+ * This decrements the reserve use count.
+ *
+@@ -552,11 +552,9 @@ static inline void rvt_qp_wqe_reserve(
+ * the compiler does not juggle the order of the s_last
+ * ring index and the decrementing of s_reserved_used.
+ */
+-static inline void rvt_qp_wqe_unreserve(
+- struct rvt_qp *qp,
+- struct rvt_swqe *wqe)
++static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
+ {
+- if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
++ if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
+ atomic_dec(&qp->s_reserved_used);
+ /* insure no compiler re-order up to s_last change */
+ smp_mb__after_atomic();
+@@ -565,42 +563,6 @@ static inline void rvt_qp_wqe_unreserve(
+
+ extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
+
+-/**
+- * rvt_qp_swqe_complete() - insert send completion
+- * @qp - the qp
+- * @wqe - the send wqe
+- * @status - completion status
+- *
+- * Insert a send completion into the completion
+- * queue if the qp indicates it should be done.
+- *
+- * See IBTA 10.7.3.1 for info on completion
+- * control.
+- */
+-static inline void rvt_qp_swqe_complete(
+- struct rvt_qp *qp,
+- struct rvt_swqe *wqe,
+- enum ib_wc_opcode opcode,
+- enum ib_wc_status status)
+-{
+- if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
+- return;
+- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
+- (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+- status != IB_WC_SUCCESS) {
+- struct ib_wc wc;
+-
+- memset(&wc, 0, sizeof(wc));
+- wc.wr_id = wqe->wr.wr_id;
+- wc.status = status;
+- wc.opcode = opcode;
+- wc.qp = &qp->ibqp;
+- wc.byte_len = wqe->length;
+- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
+- status != IB_WC_SUCCESS);
+- }
+-}
+-
+ /*
+ * Compare the lower 24 bits of the msn values.
+ * Returns an integer <, ==, or > than zero.
+@@ -737,6 +699,79 @@ static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
+ atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
+ }
+
++/**
++ * rvt_qp_sqwe_incr - increment ring index
++ * @qp: the qp
++ * @val: the starting value
++ *
++ * Return: the new value wrapping as appropriate
++ */
++static inline u32
++rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
++{
++ if (++val >= qp->s_size)
++ val = 0;
++ return val;
++}
++
++/**
++ * rvt_qp_complete_swqe - insert send completion
++ * @qp - the qp
++ * @wqe - the send wqe
++ * @opcode - wc operation (driver dependent)
++ * @status - completion status
++ *
++ * Update the s_last information, and then insert a send
++ * completion into the completion
++ * queue if the qp indicates it should be done.
++ *
++ * See IBTA 10.7.3.1 for info on completion
++ * control.
++ *
++ * Return: new last
++ */
++static inline u32
++rvt_qp_complete_swqe(struct rvt_qp *qp,
++ struct rvt_swqe *wqe,
++ enum ib_wc_opcode opcode,
++ enum ib_wc_status status)
++{
++ bool need_completion;
++ u64 wr_id;
++ u32 byte_len, last;
++ int flags = wqe->wr.send_flags;
++
++ rvt_qp_wqe_unreserve(qp, flags);
++ rvt_put_qp_swqe(qp, wqe);
++
++ need_completion =
++ !(flags & RVT_SEND_RESERVE_USED) &&
++ (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
++ (flags & IB_SEND_SIGNALED) ||
++ status != IB_WC_SUCCESS);
++ if (need_completion) {
++ wr_id = wqe->wr.wr_id;
++ byte_len = wqe->length;
++ /* above fields required before writing s_last */
++ }
++ last = rvt_qp_swqe_incr(qp, qp->s_last);
++ /* see rvt_qp_is_avail() */
++ smp_store_release(&qp->s_last, last);
++ if (need_completion) {
++ struct ib_wc w = {
++ .wr_id = wr_id,
++ .status = status,
++ .opcode = opcode,
++ .qp = &qp->ibqp,
++ .byte_len = byte_len,
++ };
++
++ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &w,
++ status != IB_WC_SUCCESS);
++ }
++ return last;
++}
++
+ extern const int ib_rvt_state_ops[];
+
+ struct rvt_dev_info;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 9ecf1e4c624b..b07672e793a8 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4449,6 +4449,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
+ if (likely(cfs_rq->runtime_remaining > 0))
+ return;
+
++ if (cfs_rq->throttled)
++ return;
+ /*
+ * if we're unable to extend our runtime we resched so that the active
+ * hierarchy can be throttled
+@@ -4652,6 +4654,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
+ if (!cfs_rq_throttled(cfs_rq))
+ goto next;
+
++ /* By the above check, this should never be true */
++ SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
++
+ runtime = -cfs_rq->runtime_remaining + 1;
+ if (runtime > remaining)
+ runtime = remaining;
+diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
+index 240ed70912d6..d78938e3e008 100644
+--- a/net/batman-adv/bat_iv_ogm.c
++++ b/net/batman-adv/bat_iv_ogm.c
+@@ -277,17 +277,23 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
+ * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
+ * @buff_pos: current position in the skb
+ * @packet_len: total length of the skb
+- * @tvlv_len: tvlv length of the previously considered OGM
++ * @ogm_packet: potential OGM in buffer
+ *
+ * Return: true if there is enough space for another OGM, false otherwise.
+ */
+-static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+- __be16 tvlv_len)
++static bool
++batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
++ const struct batadv_ogm_packet *ogm_packet)
+ {
+ int next_buff_pos = 0;
+
+- next_buff_pos += buff_pos + BATADV_OGM_HLEN;
+- next_buff_pos += ntohs(tvlv_len);
++ /* check if there is enough space for the header */
++ next_buff_pos += buff_pos + sizeof(*ogm_packet);
++ if (next_buff_pos > packet_len)
++ return false;
++
++ /* check if there is enough space for the optional TVLV */
++ next_buff_pos += ntohs(ogm_packet->tvlv_len);
+
+ return (next_buff_pos <= packet_len) &&
+ (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
+@@ -315,7 +321,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
+
+ /* adjust all flags and log packets */
+ while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
+- batadv_ogm_packet->tvlv_len)) {
++ batadv_ogm_packet)) {
+ /* we might have aggregated direct link packets with an
+ * ordinary base packet
+ */
+@@ -1704,7 +1710,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
+
+ /* unpack the aggregated packets and process them one by one */
+ while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
+- ogm_packet->tvlv_len)) {
++ ogm_packet)) {
+ batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
+
+ ogm_offset += BATADV_OGM_HLEN;
+diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
+index a67720fad46c..fdbd9f4c976b 100644
+--- a/net/batman-adv/netlink.c
++++ b/net/batman-adv/netlink.c
+@@ -164,7 +164,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
+ {
+ struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
+
+- return attr ? nla_get_u32(attr) : 0;
++ return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
+ }
+
+ /**
+diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
+index 92390d457567..18e6546b4467 100644
+--- a/sound/pci/hda/hda_auto_parser.c
++++ b/sound/pci/hda/hda_auto_parser.c
+@@ -824,6 +824,8 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
+ while (id >= 0) {
+ const struct hda_fixup *fix = codec->fixup_list + id;
+
++ if (++depth > 10)
++ break;
+ if (fix->chained_before)
+ apply_fixup(codec, fix->chain_id, action, depth + 1);
+
+@@ -863,8 +865,6 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
+ }
+ if (!fix->chained || fix->chained_before)
+ break;
+- if (++depth > 10)
+- break;
+ id = fix->chain_id;
+ }
+ }
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 5bf24fb819d2..10d502328b76 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -6009,7 +6009,8 @@ int snd_hda_gen_init(struct hda_codec *codec)
+ if (spec->init_hook)
+ spec->init_hook(codec);
+
+- snd_hda_apply_verbs(codec);
++ if (!spec->skip_verbs)
++ snd_hda_apply_verbs(codec);
+
+ init_multi_out(codec);
+ init_extra_out(codec);
+diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
+index 5f199dcb0d18..fb9f1a90238b 100644
+--- a/sound/pci/hda/hda_generic.h
++++ b/sound/pci/hda/hda_generic.h
+@@ -243,6 +243,7 @@ struct hda_gen_spec {
+ unsigned int indep_hp_enabled:1; /* independent HP enabled */
+ unsigned int have_aamix_ctl:1;
+ unsigned int hp_mic_jack_modes:1;
++ unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */
+
+ /* additional mute flags (only effective with auto_mute_via_amp=1) */
+ u64 mute_bits;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index e333b3e30e31..c1ddfd2fac52 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -837,9 +837,11 @@ static int alc_init(struct hda_codec *codec)
+ if (spec->init_hook)
+ spec->init_hook(codec);
+
++ spec->gen.skip_verbs = 1; /* applied in below */
+ snd_hda_gen_init(codec);
+ alc_fix_pll(codec);
+ alc_auto_init_amp(codec, spec->init_amp);
++ snd_hda_apply_verbs(codec); /* apply verbs here after own init */
+
+ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
+
+@@ -5797,6 +5799,7 @@ enum {
+ ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+ ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+ ALC299_FIXUP_PREDATOR_SPK,
++ ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -6837,6 +6840,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ { }
+ }
+ },
++ [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x14, 0x411111f0 }, /* disable confusing internal speaker */
++ { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -6979,6 +6992,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
++ SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -6995,6 +7009,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
++ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+ SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
+@@ -7072,6 +7087,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
++ SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+@@ -8946,6 +8962,7 @@ static int patch_alc680(struct hda_codec *codec)
+ static const struct hda_device_id snd_hda_id_realtek[] = {
+ HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
++ HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-09-10 11:15 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-09-10 11:15 UTC (permalink / raw
To: gentoo-commits
commit: 4395dd51acf5698749593ea693441291af71e1de
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Sep 10 11:14:46 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Sep 10 11:14:46 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4395dd51
Linux patch 5.2.14
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 12 +
1013_linux-5.2.14.patch | 3717 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 3729 insertions(+)
diff --git a/0000_README b/0000_README
index 374124c..6458e28 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,18 @@ Patch: 1010_linux-5.2.11.patch
From: https://www.kernel.org
Desc: Linux 5.2.11
+Patch: 1011_linux-5.2.12.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.12
+
+Patch: 1012_linux-5.2.13.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.13
+
+Patch: 1013_linux-5.2.14.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.14
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1013_linux-5.2.14.patch b/1013_linux-5.2.14.patch
new file mode 100644
index 0000000..0c47490
--- /dev/null
+++ b/1013_linux-5.2.14.patch
@@ -0,0 +1,3717 @@
+diff --git a/Makefile b/Makefile
+index 288284de8858..d019994462ba 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
+index f8debf7aeb4c..76e1edf5bf12 100644
+--- a/arch/x86/boot/compressed/pgtable_64.c
++++ b/arch/x86/boot/compressed/pgtable_64.c
+@@ -73,6 +73,8 @@ static unsigned long find_trampoline_placement(void)
+
+ /* Find the first usable memory region under bios_start. */
+ for (i = boot_params->e820_entries - 1; i >= 0; i--) {
++ unsigned long new = bios_start;
++
+ entry = &boot_params->e820_table[i];
+
+ /* Skip all entries above bios_start. */
+@@ -85,15 +87,20 @@ static unsigned long find_trampoline_placement(void)
+
+ /* Adjust bios_start to the end of the entry if needed. */
+ if (bios_start > entry->addr + entry->size)
+- bios_start = entry->addr + entry->size;
++ new = entry->addr + entry->size;
+
+ /* Keep bios_start page-aligned. */
+- bios_start = round_down(bios_start, PAGE_SIZE);
++ new = round_down(new, PAGE_SIZE);
+
+ /* Skip the entry if it's too small. */
+- if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr)
++ if (new - TRAMPOLINE_32BIT_SIZE < entry->addr)
+ continue;
+
++ /* Protect against underflow. */
++ if (new - TRAMPOLINE_32BIT_SIZE > bios_start)
++ break;
++
++ bios_start = new;
+ break;
+ }
+
+diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
+index b16a6c7da6eb..f497697aa15d 100644
+--- a/arch/x86/include/asm/bootparam_utils.h
++++ b/arch/x86/include/asm/bootparam_utils.h
+@@ -70,6 +70,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
+ BOOT_PARAM_PRESERVE(eddbuf_entries),
+ BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
+ BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
++ BOOT_PARAM_PRESERVE(secure_boot),
+ BOOT_PARAM_PRESERVE(hdr),
+ BOOT_PARAM_PRESERVE(e820_table),
+ BOOT_PARAM_PRESERVE(eddbuf),
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 97c3a1c9502e..2f067b443326 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1152,10 +1152,6 @@ void clear_local_APIC(void)
+ apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
+ v = apic_read(APIC_LVT1);
+ apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
+- if (!x2apic_enabled()) {
+- v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+- apic_write(APIC_LDR, v);
+- }
+ if (maxlvt >= 4) {
+ v = apic_read(APIC_LVTPC);
+ apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
+diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
+index aff1d22223bd..ee25e6ae1a09 100644
+--- a/drivers/bluetooth/btqca.c
++++ b/drivers/bluetooth/btqca.c
+@@ -99,6 +99,27 @@ static int qca_send_reset(struct hci_dev *hdev)
+ return 0;
+ }
+
++int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
++{
++ struct sk_buff *skb;
++ int err;
++
++ bt_dev_dbg(hdev, "QCA pre shutdown cmd");
++
++ skb = __hci_cmd_sync(hdev, QCA_PRE_SHUTDOWN_CMD, 0,
++ NULL, HCI_INIT_TIMEOUT);
++ if (IS_ERR(skb)) {
++ err = PTR_ERR(skb);
++ bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err);
++ return err;
++ }
++
++ kfree_skb(skb);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
++
+ static void qca_tlv_check_data(struct rome_config *config,
+ const struct firmware *fw)
+ {
+@@ -350,6 +371,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ return err;
+ }
+
++ /* Give the controller some time to get ready to receive the NVM */
++ msleep(10);
++
+ /* Download NVM configuration */
+ config.type = TLV_TYPE_NVM;
+ if (qca_is_wcn399x(soc_type))
+diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
+index e9c999959603..f2a9e576a86c 100644
+--- a/drivers/bluetooth/btqca.h
++++ b/drivers/bluetooth/btqca.h
+@@ -13,6 +13,7 @@
+ #define EDL_PATCH_TLV_REQ_CMD (0x1E)
+ #define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
+ #define MAX_SIZE_PER_TLV_SEGMENT (243)
++#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
+
+ #define EDL_CMD_REQ_RES_EVT (0x00)
+ #define EDL_PATCH_VER_RES_EVT (0x19)
+@@ -130,6 +131,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
+ enum qca_btsoc_type soc_type, u32 soc_ver);
+ int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
+ int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
++int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
+ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
+ {
+ return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
+@@ -161,4 +163,9 @@ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
+ {
+ return false;
+ }
++
++static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
++{
++ return -EOPNOTSUPP;
++}
+ #endif
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index f41fb2c02e4f..d88b024eaf56 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -1319,6 +1319,9 @@ static int qca_power_off(struct hci_dev *hdev)
+ {
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
++ /* Perform pre shutdown command */
++ qca_send_pre_shutdown_cmd(hdev);
++
+ qca_power_shutdown(hu);
+ return 0;
+ }
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 87b410d6e51d..3a4961dc5831 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -324,6 +324,25 @@ static struct clk_core *clk_core_lookup(const char *name)
+ return NULL;
+ }
+
++#ifdef CONFIG_OF
++static int of_parse_clkspec(const struct device_node *np, int index,
++ const char *name, struct of_phandle_args *out_args);
++static struct clk_hw *
++of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
++#else
++static inline int of_parse_clkspec(const struct device_node *np, int index,
++ const char *name,
++ struct of_phandle_args *out_args)
++{
++ return -ENOENT;
++}
++static inline struct clk_hw *
++of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
++{
++ return ERR_PTR(-ENOENT);
++}
++#endif
++
+ /**
+ * clk_core_get - Find the clk_core parent of a clk
+ * @core: clk to find parent of
+@@ -355,8 +374,9 @@ static struct clk_core *clk_core_lookup(const char *name)
+ * };
+ *
+ * Returns: -ENOENT when the provider can't be found or the clk doesn't
+- * exist in the provider. -EINVAL when the name can't be found. NULL when the
+- * provider knows about the clk but it isn't provided on this system.
++ * exist in the provider or the name can't be found in the DT node or
++ * in a clkdev lookup. NULL when the provider knows about the clk but it
++ * isn't provided on this system.
+ * A valid clk_core pointer when the clk can be found in the provider.
+ */
+ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
+@@ -367,17 +387,19 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
+ struct device *dev = core->dev;
+ const char *dev_id = dev ? dev_name(dev) : NULL;
+ struct device_node *np = core->of_node;
++ struct of_phandle_args clkspec;
+
+- if (np && (name || index >= 0))
+- hw = of_clk_get_hw(np, index, name);
+-
+- /*
+- * If the DT search above couldn't find the provider or the provider
+- * didn't know about this clk, fallback to looking up via clkdev based
+- * clk_lookups
+- */
+- if (PTR_ERR(hw) == -ENOENT && name)
++ if (np && (name || index >= 0) &&
++ !of_parse_clkspec(np, index, name, &clkspec)) {
++ hw = of_clk_get_hw_from_clkspec(&clkspec);
++ of_node_put(clkspec.np);
++ } else if (name) {
++ /*
++ * If the DT search above couldn't find the provider fallback to
++ * looking up via clkdev based clk_lookups.
++ */
+ hw = clk_find_hw(dev_id, name);
++ }
+
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+@@ -401,7 +423,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
+ parent = ERR_PTR(-EPROBE_DEFER);
+ } else {
+ parent = clk_core_get(core, index);
+- if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT)
++ if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
+ parent = clk_core_lookup(entry->name);
+ }
+
+@@ -1635,7 +1657,8 @@ static int clk_fetch_parent_index(struct clk_core *core,
+ break;
+
+ /* Fallback to comparing globally unique names */
+- if (!strcmp(parent->name, core->parents[i].name))
++ if (core->parents[i].name &&
++ !strcmp(parent->name, core->parents[i].name))
+ break;
+ }
+
+diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
+index 91db7894125d..65c82d922b05 100644
+--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
++++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
+@@ -14,7 +14,7 @@
+ #include "clk-exynos5-subcmu.h"
+
+ static struct samsung_clk_provider *ctx;
+-static const struct exynos5_subcmu_info *cmu;
++static const struct exynos5_subcmu_info **cmu;
+ static int nr_cmus;
+
+ static void exynos5_subcmu_clk_save(void __iomem *base,
+@@ -56,17 +56,17 @@ static void exynos5_subcmu_defer_gate(struct samsung_clk_provider *ctx,
+ * when OF-core populates all device-tree nodes.
+ */
+ void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus,
+- const struct exynos5_subcmu_info *_cmu)
++ const struct exynos5_subcmu_info **_cmu)
+ {
+ ctx = _ctx;
+ cmu = _cmu;
+ nr_cmus = _nr_cmus;
+
+ for (; _nr_cmus--; _cmu++) {
+- exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks,
+- _cmu->nr_gate_clks);
+- exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs,
+- _cmu->nr_suspend_regs);
++ exynos5_subcmu_defer_gate(ctx, (*_cmu)->gate_clks,
++ (*_cmu)->nr_gate_clks);
++ exynos5_subcmu_clk_save(ctx->reg_base, (*_cmu)->suspend_regs,
++ (*_cmu)->nr_suspend_regs);
+ }
+ }
+
+@@ -163,9 +163,9 @@ static int __init exynos5_clk_probe(struct platform_device *pdev)
+ if (of_property_read_string(np, "label", &name) < 0)
+ continue;
+ for (i = 0; i < nr_cmus; i++)
+- if (strcmp(cmu[i].pd_name, name) == 0)
++ if (strcmp(cmu[i]->pd_name, name) == 0)
+ exynos5_clk_register_subcmu(&pdev->dev,
+- &cmu[i], np);
++ cmu[i], np);
+ }
+ return 0;
+ }
+diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.h b/drivers/clk/samsung/clk-exynos5-subcmu.h
+index 755ee8aaa3de..9ae5356f25aa 100644
+--- a/drivers/clk/samsung/clk-exynos5-subcmu.h
++++ b/drivers/clk/samsung/clk-exynos5-subcmu.h
+@@ -21,6 +21,6 @@ struct exynos5_subcmu_info {
+ };
+
+ void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus,
+- const struct exynos5_subcmu_info *cmu);
++ const struct exynos5_subcmu_info **cmu);
+
+ #endif
+diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
+index f2b896881768..931c70a4da19 100644
+--- a/drivers/clk/samsung/clk-exynos5250.c
++++ b/drivers/clk/samsung/clk-exynos5250.c
+@@ -681,6 +681,10 @@ static const struct exynos5_subcmu_info exynos5250_disp_subcmu = {
+ .pd_name = "DISP1",
+ };
+
++static const struct exynos5_subcmu_info *exynos5250_subcmus[] = {
++ &exynos5250_disp_subcmu,
++};
++
+ static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = {
+ /* sorted in descending order */
+ /* PLL_36XX_RATE(rate, m, p, s, k) */
+@@ -843,7 +847,8 @@ static void __init exynos5250_clk_init(struct device_node *np)
+
+ samsung_clk_sleep_init(reg_base, exynos5250_clk_regs,
+ ARRAY_SIZE(exynos5250_clk_regs));
+- exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu);
++ exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5250_subcmus),
++ exynos5250_subcmus);
+
+ samsung_clk_of_add_provider(np, ctx);
+
+diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
+index 12d800fd9528..893697e00d2a 100644
+--- a/drivers/clk/samsung/clk-exynos5420.c
++++ b/drivers/clk/samsung/clk-exynos5420.c
+@@ -524,8 +524,6 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
+ GATE_BUS_TOP, 24, 0, 0),
+ GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
+ GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
+- GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
+- SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
+ };
+
+ static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
+@@ -567,8 +565,13 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = {
+
+ static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = {
+ GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
++ /* Maudio Block */
+ GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
+ SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
++ GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
++ GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
++ GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
++ GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
+ };
+
+ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
+@@ -867,9 +870,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
+ /* GSCL Block */
+ DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2),
+
+- /* MSCL Block */
+- DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
+-
+ /* PSGEN */
+ DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1),
+ DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1),
+@@ -994,12 +994,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
+ GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1",
+ GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0),
+
+- /* Maudio Block */
+- GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
+- GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
+- GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
+- GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
+-
+ /* FSYS Block */
+ GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0),
+ GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0),
+@@ -1139,17 +1133,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
+ GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
+ GATE_IP_GSCL1, 17, 0, 0),
+
+- /* MSCL Block */
+- GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
+- GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
+- GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
+- GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
+- GATE_IP_MSCL, 8, 0, 0),
+- GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
+- GATE_IP_MSCL, 9, 0, 0),
+- GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
+- GATE_IP_MSCL, 10, 0, 0),
+-
+ /* ISP */
+ GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp",
+ GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0),
+@@ -1232,32 +1215,103 @@ static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = {
+ { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */
+ };
+
+-static const struct exynos5_subcmu_info exynos5x_subcmus[] = {
+- {
+- .div_clks = exynos5x_disp_div_clks,
+- .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
+- .gate_clks = exynos5x_disp_gate_clks,
+- .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
+- .suspend_regs = exynos5x_disp_suspend_regs,
+- .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
+- .pd_name = "DISP",
+- }, {
+- .div_clks = exynos5x_gsc_div_clks,
+- .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
+- .gate_clks = exynos5x_gsc_gate_clks,
+- .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
+- .suspend_regs = exynos5x_gsc_suspend_regs,
+- .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
+- .pd_name = "GSC",
+- }, {
+- .div_clks = exynos5x_mfc_div_clks,
+- .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
+- .gate_clks = exynos5x_mfc_gate_clks,
+- .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
+- .suspend_regs = exynos5x_mfc_suspend_regs,
+- .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
+- .pd_name = "MFC",
+- },
++static const struct samsung_gate_clock exynos5x_mscl_gate_clks[] __initconst = {
++ /* MSCL Block */
++ GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
++ GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
++ GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
++ GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
++ GATE_IP_MSCL, 8, 0, 0),
++ GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
++ GATE_IP_MSCL, 9, 0, 0),
++ GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
++ GATE_IP_MSCL, 10, 0, 0),
++};
++
++static const struct samsung_div_clock exynos5x_mscl_div_clks[] __initconst = {
++ DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
++};
++
++static struct exynos5_subcmu_reg_dump exynos5x_mscl_suspend_regs[] = {
++ { GATE_IP_MSCL, 0xffffffff, 0xffffffff }, /* MSCL gates */
++ { SRC_TOP3, 0, BIT(4) }, /* MUX mout_user_aclk400_mscl */
++ { DIV2_RATIO0, 0, 0x30000000 }, /* DIV dout_mscl_blk */
++};
++
++static const struct samsung_gate_clock exynos5800_mau_gate_clks[] __initconst = {
++ GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
++ SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
++ GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
++ GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
++ GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
++ GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
++};
++
++static struct exynos5_subcmu_reg_dump exynos5800_mau_suspend_regs[] = {
++ { SRC_TOP9, 0, BIT(8) }, /* MUX mout_user_mau_epll */
++};
++
++static const struct exynos5_subcmu_info exynos5x_disp_subcmu = {
++ .div_clks = exynos5x_disp_div_clks,
++ .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
++ .gate_clks = exynos5x_disp_gate_clks,
++ .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
++ .suspend_regs = exynos5x_disp_suspend_regs,
++ .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
++ .pd_name = "DISP",
++};
++
++static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = {
++ .div_clks = exynos5x_gsc_div_clks,
++ .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
++ .gate_clks = exynos5x_gsc_gate_clks,
++ .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
++ .suspend_regs = exynos5x_gsc_suspend_regs,
++ .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
++ .pd_name = "GSC",
++};
++
++static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = {
++ .div_clks = exynos5x_mfc_div_clks,
++ .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
++ .gate_clks = exynos5x_mfc_gate_clks,
++ .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
++ .suspend_regs = exynos5x_mfc_suspend_regs,
++ .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
++ .pd_name = "MFC",
++};
++
++static const struct exynos5_subcmu_info exynos5x_mscl_subcmu = {
++ .div_clks = exynos5x_mscl_div_clks,
++ .nr_div_clks = ARRAY_SIZE(exynos5x_mscl_div_clks),
++ .gate_clks = exynos5x_mscl_gate_clks,
++ .nr_gate_clks = ARRAY_SIZE(exynos5x_mscl_gate_clks),
++ .suspend_regs = exynos5x_mscl_suspend_regs,
++ .nr_suspend_regs = ARRAY_SIZE(exynos5x_mscl_suspend_regs),
++ .pd_name = "MSC",
++};
++
++static const struct exynos5_subcmu_info exynos5800_mau_subcmu = {
++ .gate_clks = exynos5800_mau_gate_clks,
++ .nr_gate_clks = ARRAY_SIZE(exynos5800_mau_gate_clks),
++ .suspend_regs = exynos5800_mau_suspend_regs,
++ .nr_suspend_regs = ARRAY_SIZE(exynos5800_mau_suspend_regs),
++ .pd_name = "MAU",
++};
++
++static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
++ &exynos5x_disp_subcmu,
++ &exynos5x_gsc_subcmu,
++ &exynos5x_mfc_subcmu,
++ &exynos5x_mscl_subcmu,
++};
++
++static const struct exynos5_subcmu_info *exynos5800_subcmus[] = {
++ &exynos5x_disp_subcmu,
++ &exynos5x_gsc_subcmu,
++ &exynos5x_mfc_subcmu,
++ &exynos5x_mscl_subcmu,
++ &exynos5800_mau_subcmu,
+ };
+
+ static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = {
+@@ -1475,11 +1529,17 @@ static void __init exynos5x_clk_init(struct device_node *np,
+ samsung_clk_extended_sleep_init(reg_base,
+ exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs),
+ exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc));
+- if (soc == EXYNOS5800)
++
++ if (soc == EXYNOS5800) {
+ samsung_clk_sleep_init(reg_base, exynos5800_clk_regs,
+ ARRAY_SIZE(exynos5800_clk_regs));
+- exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
+- exynos5x_subcmus);
++
++ exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5800_subcmus),
++ exynos5800_subcmus);
++ } else {
++ exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
++ exynos5x_subcmus);
++ }
+
+ samsung_clk_of_add_provider(np, ctx);
+ }
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 7f9f75201138..f272b5143997 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -1373,21 +1373,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
+ if (status)
+ goto err_remove_from_list;
+
+- status = gpiochip_irqchip_init_valid_mask(chip);
+- if (status)
+- goto err_remove_from_list;
+-
+ status = gpiochip_alloc_valid_mask(chip);
+ if (status)
+- goto err_remove_irqchip_mask;
+-
+- status = gpiochip_add_irqchip(chip, lock_key, request_key);
+- if (status)
+- goto err_free_gpiochip_mask;
++ goto err_remove_from_list;
+
+ status = of_gpiochip_add(chip);
+ if (status)
+- goto err_remove_chip;
++ goto err_free_gpiochip_mask;
+
+ status = gpiochip_init_valid_mask(chip);
+ if (status)
+@@ -1413,6 +1405,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
+
+ machine_gpiochip_add(chip);
+
++ status = gpiochip_irqchip_init_valid_mask(chip);
++ if (status)
++ goto err_remove_acpi_chip;
++
++ status = gpiochip_add_irqchip(chip, lock_key, request_key);
++ if (status)
++ goto err_remove_irqchip_mask;
++
+ /*
+ * By first adding the chardev, and then adding the device,
+ * we get a device node entry in sysfs under
+@@ -1424,21 +1424,21 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
+ if (gpiolib_initialized) {
+ status = gpiochip_setup_dev(gdev);
+ if (status)
+- goto err_remove_acpi_chip;
++ goto err_remove_irqchip;
+ }
+ return 0;
+
++err_remove_irqchip:
++ gpiochip_irqchip_remove(chip);
++err_remove_irqchip_mask:
++ gpiochip_irqchip_free_valid_mask(chip);
+ err_remove_acpi_chip:
+ acpi_gpiochip_remove(chip);
+ err_remove_of_chip:
+ gpiochip_free_hogs(chip);
+ of_gpiochip_remove(chip);
+-err_remove_chip:
+- gpiochip_irqchip_remove(chip);
+ err_free_gpiochip_mask:
+ gpiochip_free_valid_mask(chip);
+-err_remove_irqchip_mask:
+- gpiochip_irqchip_free_valid_mask(chip);
+ err_remove_from_list:
+ spin_lock_irqsave(&gpio_lock, flags);
+ list_del(&gdev->list);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index fe028561dc0e..bc40d6eabce7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1192,6 +1192,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_sem);
+
++ if (p->post_deps)
++ return -EINVAL;
++
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
+@@ -1215,8 +1218,7 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
+
+
+ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
+- struct amdgpu_cs_chunk
+- *chunk)
++ struct amdgpu_cs_chunk *chunk)
+ {
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
+ unsigned num_deps;
+@@ -1226,6 +1228,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+
++ if (p->post_deps)
++ return -EINVAL;
++
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+index 95fdbd0fbcac..c021d4c8324f 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+@@ -213,6 +213,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
+ struct mtk_drm_private *private = drm->dev_private;
+ struct platform_device *pdev;
+ struct device_node *np;
++ struct device *dma_dev;
+ int ret;
+
+ if (!iommu_present(&platform_bus_type))
+@@ -275,7 +276,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
+ goto err_component_unbind;
+ }
+
+- private->dma_dev = &pdev->dev;
++ dma_dev = &pdev->dev;
++ private->dma_dev = dma_dev;
++
++ /*
++ * Configure the DMA segment size to make sure we get contiguous IOVA
++ * when importing PRIME buffers.
++ */
++ if (!dma_dev->dma_parms) {
++ private->dma_parms_allocated = true;
++ dma_dev->dma_parms =
++ devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
++ GFP_KERNEL);
++ }
++ if (!dma_dev->dma_parms) {
++ ret = -ENOMEM;
++ goto err_component_unbind;
++ }
++
++ ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
++ if (ret) {
++ dev_err(dma_dev, "Failed to set DMA segment size\n");
++ goto err_unset_dma_parms;
++ }
+
+ /*
+ * We don't use the drm_irq_install() helpers provided by the DRM
+@@ -285,13 +308,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
+ drm->irq_enabled = true;
+ ret = drm_vblank_init(drm, MAX_CRTC);
+ if (ret < 0)
+- goto err_component_unbind;
++ goto err_unset_dma_parms;
+
+ drm_kms_helper_poll_init(drm);
+ drm_mode_config_reset(drm);
+
+ return 0;
+
++err_unset_dma_parms:
++ if (private->dma_parms_allocated)
++ dma_dev->dma_parms = NULL;
+ err_component_unbind:
+ component_unbind_all(drm->dev, drm);
+ err_config_cleanup:
+@@ -302,9 +328,14 @@ err_config_cleanup:
+
+ static void mtk_drm_kms_deinit(struct drm_device *drm)
+ {
++ struct mtk_drm_private *private = drm->dev_private;
++
+ drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
+
++ if (private->dma_parms_allocated)
++ private->dma_dev->dma_parms = NULL;
++
+ component_unbind_all(drm->dev, drm);
+ drm_mode_config_cleanup(drm);
+ }
+@@ -320,6 +351,18 @@ static const struct file_operations mtk_drm_fops = {
+ .compat_ioctl = drm_compat_ioctl,
+ };
+
++/*
++ * We need to override this because the device used to import the memory is
++ * not dev->dev, as drm_gem_prime_import() expects.
++ */
++struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
++ struct dma_buf *dma_buf)
++{
++ struct mtk_drm_private *private = dev->dev_private;
++
++ return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
++}
++
+ static struct drm_driver mtk_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+@@ -331,7 +374,7 @@ static struct drm_driver mtk_drm_driver = {
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+- .gem_prime_import = drm_gem_prime_import,
++ .gem_prime_import = mtk_drm_gem_prime_import,
+ .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
+ .gem_prime_mmap = mtk_drm_gem_mmap_buf,
+diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+index 598ff3e70446..e03fea12ff59 100644
+--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
++++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+@@ -51,6 +51,8 @@ struct mtk_drm_private {
+ } commit;
+
+ struct drm_atomic_state *suspend_state;
++
++ bool dma_parms_allocated;
+ };
+
+ extern struct platform_driver mtk_ddp_driver;
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
+index 8bbe3d0cbe5d..8fd44407a0df 100644
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -1152,8 +1152,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
+
+ INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
+
+- cp2112_gpio_direction_input(gc, d->hwirq);
+-
+ if (!dev->gpio_poll) {
+ dev->gpio_poll = true;
+ schedule_delayed_work(&dev->gpio_poll_worker, 0);
+@@ -1201,6 +1199,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
+ return PTR_ERR(dev->desc[pin]);
+ }
+
++ ret = cp2112_gpio_direction_input(&dev->gc, pin);
++ if (ret < 0) {
++ dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
++ goto err_desc;
++ }
++
+ ret = gpiochip_lock_as_irq(&dev->gc, pin);
+ if (ret) {
+ dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
+diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+index 1065692f90e2..5792a104000a 100644
+--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
++++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+@@ -24,6 +24,7 @@
+ #define ICL_MOBILE_DEVICE_ID 0x34FC
+ #define SPT_H_DEVICE_ID 0xA135
+ #define CML_LP_DEVICE_ID 0x02FC
++#define EHL_Ax_DEVICE_ID 0x4BB3
+
+ #define REVISION_ID_CHT_A0 0x6
+ #define REVISION_ID_CHT_Ax_SI 0x0
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+index 17ae49fba920..8cce3cfe28e0 100644
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -33,6 +33,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
+ {0, }
+ };
+ MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 19f1730a4f24..a68d0ccf67a4 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -4724,10 +4724,14 @@ static int __init cma_init(void)
+ if (ret)
+ goto err;
+
+- cma_configfs_init();
++ ret = cma_configfs_init();
++ if (ret)
++ goto err_ib;
+
+ return 0;
+
++err_ib:
++ ib_unregister_client(&cma_client);
+ err:
+ unregister_netdevice_notifier(&cma_nb);
+ ib_sa_unregister_client(&sa_client);
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+index 48b04d2f175f..60c8f76aab33 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+@@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
+ spin_unlock_irqrestore(&cmdq->lock, flags);
+ return -EBUSY;
+ }
++
++ size = req->cmd_size;
++ /* change the cmd_size to the number of 16byte cmdq unit.
++ * req->cmd_size is modified here
++ */
++ bnxt_qplib_set_cmd_slots(req);
++
+ memset(resp, 0, sizeof(*resp));
+ crsqe->resp = (struct creq_qp_event *)resp;
+ crsqe->resp->cookie = req->cookie;
+@@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
+
+ cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
+ preq = (u8 *)req;
+- size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
+ do {
+ /* Locate the next cmdq slot */
+ sw_prod = HWQ_CMP(cmdq->prod, cmdq);
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+index 2138533bb642..dfeadc192e17 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+@@ -55,9 +55,7 @@
+ do { \
+ memset(&(req), 0, sizeof((req))); \
+ (req).opcode = CMDQ_BASE_OPCODE_##CMD; \
+- (req).cmd_size = (sizeof((req)) + \
+- BNXT_QPLIB_CMDQE_UNITS - 1) / \
+- BNXT_QPLIB_CMDQE_UNITS; \
++ (req).cmd_size = sizeof((req)); \
+ (req).flags = cpu_to_le16(cmd_flags); \
+ } while (0)
+
+@@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
+ BNXT_QPLIB_CMDQE_UNITS);
+ }
+
++/* Set the cmd_size to a factor of CMDQE unit */
++static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
++{
++ req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
++ BNXT_QPLIB_CMDQE_UNITS;
++}
++
+ #define MAX_CMDQ_IDX(depth) ((depth) - 1)
+
+ static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
+diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
+index 93613e5def9b..986c12153e62 100644
+--- a/drivers/infiniband/hw/hfi1/fault.c
++++ b/drivers/infiniband/hw/hfi1/fault.c
+@@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
+ if (!data)
+ return -ENOMEM;
+ copy = min(len, datalen - 1);
+- if (copy_from_user(data, buf, copy))
+- return -EFAULT;
++ if (copy_from_user(data, buf, copy)) {
++ ret = -EFAULT;
++ goto free_data;
++ }
+
+ ret = debugfs_file_get(file->f_path.dentry);
+ if (unlikely(ret))
+- return ret;
++ goto free_data;
+ ptr = data;
+ token = ptr;
+ for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
+@@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
+ ret = len;
+
+ debugfs_file_put(file->f_path.dentry);
++free_data:
+ kfree(data);
+ return ret;
+ }
+@@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
+ return -ENOMEM;
+ ret = debugfs_file_get(file->f_path.dentry);
+ if (unlikely(ret))
+- return ret;
++ goto free_data;
+ bit = find_first_bit(fault->opcodes, bitsize);
+ while (bit < bitsize) {
+ zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
+@@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
+ data[size - 1] = '\n';
+ data[size] = '\0';
+ ret = simple_read_from_buffer(buf, len, pos, data, size);
++free_data:
+ kfree(data);
+ return ret;
+ }
+diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
+index 68c951491a08..57079110af9b 100644
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -1677,8 +1677,6 @@ tx_err:
+ tx_buf_size, DMA_TO_DEVICE);
+ kfree(tun_qp->tx_ring[i].buf.addr);
+ }
+- kfree(tun_qp->tx_ring);
+- tun_qp->tx_ring = NULL;
+ i = MLX4_NUM_TUNNEL_BUFS;
+ err:
+ while (i > 0) {
+@@ -1687,6 +1685,8 @@ err:
+ rx_buf_size, DMA_FROM_DEVICE);
+ kfree(tun_qp->ring[i].addr);
+ }
++ kfree(tun_qp->tx_ring);
++ tun_qp->tx_ring = NULL;
+ kfree(tun_qp->ring);
+ tun_qp->ring = NULL;
+ return -ENOMEM;
+diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
+index 8e457e50f837..770e36d0c66f 100644
+--- a/drivers/input/serio/hyperv-keyboard.c
++++ b/drivers/input/serio/hyperv-keyboard.c
+@@ -237,40 +237,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
+
+ static void hv_kbd_on_channel_callback(void *context)
+ {
++ struct vmpacket_descriptor *desc;
+ struct hv_device *hv_dev = context;
+- void *buffer;
+- int bufferlen = 0x100; /* Start with sensible size */
+ u32 bytes_recvd;
+ u64 req_id;
+- int error;
+
+- buffer = kmalloc(bufferlen, GFP_ATOMIC);
+- if (!buffer)
+- return;
+-
+- while (1) {
+- error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
+- &bytes_recvd, &req_id);
+- switch (error) {
+- case 0:
+- if (bytes_recvd == 0) {
+- kfree(buffer);
+- return;
+- }
+-
+- hv_kbd_handle_received_packet(hv_dev, buffer,
+- bytes_recvd, req_id);
+- break;
++ foreach_vmbus_pkt(desc, hv_dev->channel) {
++ bytes_recvd = desc->len8 * 8;
++ req_id = desc->trans_id;
+
+- case -ENOBUFS:
+- kfree(buffer);
+- /* Handle large packet */
+- bufferlen = bytes_recvd;
+- buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
+- if (!buffer)
+- return;
+- break;
+- }
++ hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd,
++ req_id);
+ }
+ }
+
+diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
+index 74e4364bc9fb..09113b9ad679 100644
+--- a/drivers/mmc/core/mmc_ops.c
++++ b/drivers/mmc/core/mmc_ops.c
+@@ -564,7 +564,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
+ if (index == EXT_CSD_SANITIZE_START)
+ cmd.sanitize_busy = true;
+
+- err = mmc_wait_for_cmd(host, &cmd, 0);
++ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+ if (err)
+ goto out;
+
+diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
+index 73632b843749..b821c9e1604c 100644
+--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
++++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
+@@ -10,7 +10,7 @@
+
+ #include "cavium_ptp.h"
+
+-#define DRV_NAME "Cavium PTP Driver"
++#define DRV_NAME "cavium_ptp"
+
+ #define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
+ #define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
+diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+index fcf20a8f92d9..6a823710987d 100644
+--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
++++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+@@ -239,8 +239,10 @@ int octeon_setup_iq(struct octeon_device *oct,
+ }
+
+ oct->num_iqs++;
+- if (oct->fn_list.enable_io_queues(oct))
++ if (oct->fn_list.enable_io_queues(oct)) {
++ octeon_delete_instr_queue(oct, iq_no);
+ return 1;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+index 02959035ed3f..d692251ee252 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+@@ -3236,8 +3236,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
+ return -ENOMEM;
+
+ err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
+- if (err)
++ if (err) {
++ kvfree(t);
+ return err;
++ }
+
+ bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
+ kvfree(t);
+diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
+index d654c234aaf7..c5be4ebd8437 100644
+--- a/drivers/net/ethernet/ibm/ibmveth.c
++++ b/drivers/net/ethernet/ibm/ibmveth.c
+@@ -1605,7 +1605,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
+ struct net_device *netdev;
+ struct ibmveth_adapter *adapter;
+ unsigned char *mac_addr_p;
+- unsigned int *mcastFilterSize_p;
++ __be32 *mcastFilterSize_p;
+ long ret;
+ unsigned long ret_attr;
+
+@@ -1627,8 +1627,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
+ return -EINVAL;
+ }
+
+- mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
+- VETH_MCAST_FILTER_SIZE, NULL);
++ mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
++ VETH_MCAST_FILTER_SIZE,
++ NULL);
+ if (!mcastFilterSize_p) {
+ dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
+ "attribute\n");
+@@ -1645,7 +1646,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
+
+ adapter->vdev = dev;
+ adapter->netdev = netdev;
+- adapter->mcastFilterSize = *mcastFilterSize_p;
++ adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
+ adapter->pool_config = 0;
+
+ netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 3da680073265..cebd20f3128d 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1568,6 +1568,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
+ (u64)tx_buff->indir_dma,
+ (u64)num_entries);
++ dma_unmap_single(dev, tx_buff->indir_dma,
++ sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
+ } else {
+ tx_buff->num_entries = num_entries;
+ lpar_rc = send_subcrq(adapter, handle_array[queue_num],
+@@ -2788,7 +2790,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
+ union sub_crq *next;
+ int index;
+ int i, j;
+- u8 *first;
+
+ restart_loop:
+ while (pending_scrq(adapter, scrq)) {
+@@ -2818,14 +2819,6 @@ restart_loop:
+
+ txbuff->data_dma[j] = 0;
+ }
+- /* if sub_crq was sent indirectly */
+- first = &txbuff->indir_arr[0].generic.first;
+- if (*first == IBMVNIC_CRQ_CMD) {
+- dma_unmap_single(dev, txbuff->indir_dma,
+- sizeof(txbuff->indir_arr),
+- DMA_TO_DEVICE);
+- *first = 0;
+- }
+
+ if (txbuff->last_frag) {
+ dev_kfree_skb_any(txbuff->skb);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 57fd9ee6de66..f7c049559c1a 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -7893,11 +7893,8 @@ static void ixgbe_service_task(struct work_struct *work)
+ return;
+ }
+ if (ixgbe_check_fw_error(adapter)) {
+- if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+- rtnl_lock();
++ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ unregister_netdev(adapter->netdev);
+- rtnl_unlock();
+- }
+ ixgbe_service_event_complete(adapter);
+ return;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+index c1caf14bc334..c7f86453c638 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+@@ -80,17 +80,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
+ if (err) {
+ netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
+ sq->sqn, err);
+- return err;
++ goto out;
+ }
+
+ if (state != MLX5_SQC_STATE_ERR)
+- return 0;
++ goto out;
+
+ mlx5e_tx_disable_queue(sq->txq);
+
+ err = mlx5e_wait_for_sq_flush(sq);
+ if (err)
+- return err;
++ goto out;
+
+ /* At this point, no new packets will arrive from the stack as TXQ is
+ * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
+@@ -99,13 +99,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
+
+ err = mlx5e_sq_to_ready(sq, state);
+ if (err)
+- return err;
++ goto out;
+
+ mlx5e_reset_txqsq_cc_pc(sq);
+ sq->stats->recover++;
++ clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
+ mlx5e_activate_txqsq(sq);
+
+ return 0;
++out:
++ clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
++ return err;
+ }
+
+ static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 882d26b8095d..bbdfdaf06391 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1279,7 +1279,6 @@ err_free_txqsq:
+ void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
+ {
+ sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
+- clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
+ set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+ netdev_tx_reset_queue(sq->txq);
+ netif_tx_start_queue(sq->txq);
+diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+index d8b7fba96d58..337b0cbfd153 100644
+--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
++++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+@@ -3919,7 +3919,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ * setup (if available). */
+ status = myri10ge_request_irq(mgp);
+ if (status != 0)
+- goto abort_with_firmware;
++ goto abort_with_slices;
+ myri10ge_free_irq(mgp);
+
+ /* Save configuration space to be restored if the
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+index 1fbfeb43c538..f5ebd9403d72 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
+@@ -1280,9 +1280,10 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
+ struct nfp_flower_priv *priv = app->priv;
+ int err;
+
+- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+- !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+- nfp_flower_internal_port_can_offload(app, netdev)))
++ if ((f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
++ !nfp_flower_internal_port_can_offload(app, netdev)) ||
++ (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
++ nfp_flower_internal_port_can_offload(app, netdev)))
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+index 8c67505865a4..43faad1893f7 100644
+--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+@@ -329,13 +329,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
+
+ flow.daddr = *(__be32 *)n->primary_key;
+
+- /* Only concerned with route changes for representors. */
+- if (!nfp_netdev_is_nfp_repr(n->dev))
+- return NOTIFY_DONE;
+-
+ app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
+ app = app_priv->app;
+
++ if (!nfp_netdev_is_nfp_repr(n->dev) &&
++ !nfp_flower_internal_port_can_offload(app, n->dev))
++ return NOTIFY_DONE;
++
+ /* Only concerned with changes to routes already added to NFP. */
+ if (!nfp_tun_has_route(app, flow.daddr))
+ return NOTIFY_DONE;
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index ef8f08931fe8..6cacd5e893ac 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1,7 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Renesas Ethernet AVB device driver
+ *
+- * Copyright (C) 2014-2015 Renesas Electronics Corporation
++ * Copyright (C) 2014-2019 Renesas Electronics Corporation
+ * Copyright (C) 2015 Renesas Solutions Corp.
+ * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
+ *
+@@ -513,7 +513,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
+ kfree(ts_skb);
+ if (tag == tfa_tag) {
+ skb_tstamp_tx(skb, &shhwtstamps);
++ dev_consume_skb_any(skb);
+ break;
++ } else {
++ dev_kfree_skb_any(skb);
+ }
+ }
+ ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
+@@ -1564,7 +1567,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ }
+ goto unmap;
+ }
+- ts_skb->skb = skb;
++ ts_skb->skb = skb_get(skb);
+ ts_skb->tag = priv->ts_skb_tag++;
+ priv->ts_skb_tag &= 0x3ff;
+ list_add_tail(&ts_skb->list, &priv->ts_skb_list);
+@@ -1693,6 +1696,7 @@ static int ravb_close(struct net_device *ndev)
+ /* Clear the timestamp list */
+ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
+ list_del(&ts_skb->list);
++ kfree_skb(ts_skb->skb);
+ kfree(ts_skb);
+ }
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+index 4644b2aeeba1..e2e469c37a4d 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+@@ -1194,10 +1194,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
+ int ret;
+ struct device *dev = &bsp_priv->pdev->dev;
+
+- if (!ldo) {
+- dev_err(dev, "no regulator found\n");
+- return -1;
+- }
++ if (!ldo)
++ return 0;
+
+ if (enable) {
+ ret = regulator_enable(ldo);
+diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
+index c50a9772f4af..3b5a26b05295 100644
+--- a/drivers/net/ethernet/toshiba/tc35815.c
++++ b/drivers/net/ethernet/toshiba/tc35815.c
+@@ -1504,7 +1504,7 @@ tc35815_rx(struct net_device *dev, int limit)
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_skbs[cur_bd].skb_dma,
+ RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+- if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
++ if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
+ memmove(skb->data, skb->data - NET_IP_ALIGN,
+ pkt_len);
+ data = skb_put(skb, pkt_len);
+diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
+index 78a7de3fb622..c62f474b6d08 100644
+--- a/drivers/net/ethernet/tundra/tsi108_eth.c
++++ b/drivers/net/ethernet/tundra/tsi108_eth.c
+@@ -371,9 +371,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
+ static void tsi108_stat_carry(struct net_device *dev)
+ {
+ struct tsi108_prv_data *data = netdev_priv(dev);
++ unsigned long flags;
+ u32 carry1, carry2;
+
+- spin_lock_irq(&data->misclock);
++ spin_lock_irqsave(&data->misclock, flags);
+
+ carry1 = TSI_READ(TSI108_STAT_CARRY1);
+ carry2 = TSI_READ(TSI108_STAT_CARRY2);
+@@ -441,7 +442,7 @@ static void tsi108_stat_carry(struct net_device *dev)
+ TSI108_STAT_TXPAUSEDROP_CARRY,
+ &data->tx_pause_drop);
+
+- spin_unlock_irq(&data->misclock);
++ spin_unlock_irqrestore(&data->misclock, flags);
+ }
+
+ /* Read a stat counter atomically with respect to carries.
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 3544e1991579..e8fce6d715ef 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -1239,12 +1239,15 @@ static void netvsc_get_stats64(struct net_device *net,
+ struct rtnl_link_stats64 *t)
+ {
+ struct net_device_context *ndev_ctx = netdev_priv(net);
+- struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
++ struct netvsc_device *nvdev;
+ struct netvsc_vf_pcpu_stats vf_tot;
+ int i;
+
++ rcu_read_lock();
++
++ nvdev = rcu_dereference(ndev_ctx->nvdev);
+ if (!nvdev)
+- return;
++ goto out;
+
+ netdev_stats_to_stats64(t, &net->stats);
+
+@@ -1283,6 +1286,8 @@ static void netvsc_get_stats64(struct net_device *net,
+ t->rx_packets += packets;
+ t->multicast += multicast;
+ }
++out:
++ rcu_read_unlock();
+ }
+
+ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
+diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
+index 58bb25e4af10..7935593debb1 100644
+--- a/drivers/net/phy/phy-c45.c
++++ b/drivers/net/phy/phy-c45.c
+@@ -523,6 +523,32 @@ int genphy_c45_read_status(struct phy_device *phydev)
+ }
+ EXPORT_SYMBOL_GPL(genphy_c45_read_status);
+
++/**
++ * genphy_c45_config_aneg - restart auto-negotiation or forced setup
++ * @phydev: target phy_device struct
++ *
++ * Description: If auto-negotiation is enabled, we configure the
++ * advertising, and then restart auto-negotiation. If it is not
++ * enabled, then we force a configuration.
++ */
++int genphy_c45_config_aneg(struct phy_device *phydev)
++{
++ bool changed = false;
++ int ret;
++
++ if (phydev->autoneg == AUTONEG_DISABLE)
++ return genphy_c45_pma_setup_forced(phydev);
++
++ ret = genphy_c45_an_config_aneg(phydev);
++ if (ret < 0)
++ return ret;
++ if (ret > 0)
++ changed = true;
++
++ return genphy_c45_check_and_restart_aneg(phydev, changed);
++}
++EXPORT_SYMBOL_GPL(genphy_c45_config_aneg);
++
+ /* The gen10g_* functions are the old Clause 45 stub */
+
+ int gen10g_config_aneg(struct phy_device *phydev)
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index e8885429293a..57b337687782 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -499,7 +499,7 @@ static int phy_config_aneg(struct phy_device *phydev)
+ * allowed to call genphy_config_aneg()
+ */
+ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
+- return -EOPNOTSUPP;
++ return genphy_c45_config_aneg(phydev);
+
+ return genphy_config_aneg(phydev);
+ }
+diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
+index 5519248a791e..32b08b18e120 100644
+--- a/drivers/net/usb/cx82310_eth.c
++++ b/drivers/net/usb/cx82310_eth.c
+@@ -163,7 +163,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
+ }
+ if (!timeout) {
+ dev_err(&udev->dev, "firmware not ready in time\n");
+- return -ETIMEDOUT;
++ ret = -ETIMEDOUT;
++ goto err;
+ }
+
+ /* enable ethernet mode (?) */
+diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
+index d62b6706a537..fc5895f85cee 100644
+--- a/drivers/net/usb/kalmia.c
++++ b/drivers/net/usb/kalmia.c
+@@ -113,16 +113,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
+ status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1),
+ usb_buf, 24);
+ if (status != 0)
+- return status;
++ goto out;
+
+ memcpy(usb_buf, init_msg_2, 12);
+ status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2),
+ usb_buf, 28);
+ if (status != 0)
+- return status;
++ goto out;
+
+ memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
+-
++out:
+ kfree(usb_buf);
+ return status;
+ }
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 3d92ea6fcc02..f033fee225a1 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -3792,7 +3792,7 @@ static int lan78xx_probe(struct usb_interface *intf,
+ ret = register_netdev(netdev);
+ if (ret != 0) {
+ netif_err(dev, probe, netdev, "couldn't register the device\n");
+- goto out3;
++ goto out4;
+ }
+
+ usb_set_intfdata(intf, dev);
+@@ -3807,12 +3807,14 @@ static int lan78xx_probe(struct usb_interface *intf,
+
+ ret = lan78xx_phy_init(dev);
+ if (ret < 0)
+- goto out4;
++ goto out5;
+
+ return 0;
+
+-out4:
++out5:
+ unregister_netdev(netdev);
++out4:
++ usb_free_urb(dev->urb_intr);
+ out3:
+ lan78xx_unbind(dev, intf);
+ out2:
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index e0dcb681cfe5..1a7b7bd412f9 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -3987,8 +3987,7 @@ static int rtl8152_close(struct net_device *netdev)
+ #ifdef CONFIG_PM_SLEEP
+ unregister_pm_notifier(&tp->pm_notifier);
+ #endif
+- if (!test_bit(RTL8152_UNPLUG, &tp->flags))
+- napi_disable(&tp->napi);
++ napi_disable(&tp->napi);
+ clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
+ cancel_delayed_work_sync(&tp->schedule);
+@@ -5310,7 +5309,6 @@ static int rtl8152_probe(struct usb_interface *intf,
+ return 0;
+
+ out1:
+- netif_napi_del(&tp->napi);
+ usb_set_intfdata(intf, NULL);
+ out:
+ free_netdev(netdev);
+@@ -5328,7 +5326,6 @@ static void rtl8152_disconnect(struct usb_interface *intf)
+ if (udev->state == USB_STATE_NOTATTACHED)
+ set_bit(RTL8152_UNPLUG, &tp->flags);
+
+- netif_napi_del(&tp->napi);
+ unregister_netdev(tp->netdev);
+ cancel_delayed_work_sync(&tp->hw_phy_work);
+ tp->rtl_ops.unload(tp);
+diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
+index e9fc168bb734..489cba9b284d 100644
+--- a/drivers/net/wimax/i2400m/fw.c
++++ b/drivers/net/wimax/i2400m/fw.c
+@@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options)
+ }
+ result = i2400m_barker_db_add(barker);
+ if (result < 0)
+- goto error_add;
++ goto error_parse_add;
+ }
+ kfree(options_orig);
+ }
+ return 0;
+
++error_parse_add:
+ error_parse:
++ kfree(options_orig);
+ error_add:
+ kfree(i2400m_barker_db);
+ return result;
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 601509b3251a..963b4c6309b9 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2549,6 +2549,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
+ goto out_free;
+ }
+
++ if (!(ctrl->ops->flags & NVME_F_FABRICS))
++ ctrl->cntlid = le16_to_cpu(id->cntlid);
++
+ if (!ctrl->identified) {
+ int i;
+
+@@ -2649,7 +2652,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
+ goto out_free;
+ }
+ } else {
+- ctrl->cntlid = le16_to_cpu(id->cntlid);
+ ctrl->hmpre = le32_to_cpu(id->hmpre);
+ ctrl->hmmin = le32_to_cpu(id->hmmin);
+ ctrl->hmminds = le32_to_cpu(id->hmminds);
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 747c0d4f9ff5..304aa8a65f2f 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -420,6 +420,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ }
+
++ synchronize_srcu(&ns->head->srcu);
+ kblockd_schedule_work(&ns->head->requeue_work);
+ }
+
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index 784a2e76a1b0..c5f60f95e8db 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -640,6 +640,7 @@ struct qeth_seqno {
+ struct qeth_reply {
+ struct list_head list;
+ struct completion received;
++ spinlock_t lock;
+ int (*callback)(struct qeth_card *, struct qeth_reply *,
+ unsigned long);
+ u32 seqno;
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index b1823d75dd35..6b8f99e7d8a8 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -548,6 +548,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
+ if (reply) {
+ refcount_set(&reply->refcnt, 1);
+ init_completion(&reply->received);
++ spin_lock_init(&reply->lock);
+ }
+ return reply;
+ }
+@@ -832,6 +833,13 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
+
+ if (!reply->callback) {
+ rc = 0;
++ goto no_callback;
++ }
++
++ spin_lock_irqsave(&reply->lock, flags);
++ if (reply->rc) {
++ /* Bail out when the requestor has already left: */
++ rc = reply->rc;
+ } else {
+ if (cmd) {
+ reply->offset = (u16)((char *)cmd - (char *)iob->data);
+@@ -840,7 +848,9 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
+ rc = reply->callback(card, reply, (unsigned long)iob);
+ }
+ }
++ spin_unlock_irqrestore(&reply->lock, flags);
+
++no_callback:
+ if (rc <= 0)
+ qeth_notify_reply(reply, rc);
+ qeth_put_reply(reply);
+@@ -1880,6 +1890,16 @@ static int qeth_send_control_data(struct qeth_card *card, int len,
+ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
+
+ qeth_dequeue_reply(card, reply);
++
++ if (reply_cb) {
++ /* Wait until the callback for a late reply has completed: */
++ spin_lock_irq(&reply->lock);
++ if (rc)
++ /* Zap any callback that's still pending: */
++ reply->rc = rc;
++ spin_unlock_irq(&reply->lock);
++ }
++
+ if (!rc)
+ rc = reply->rc;
+ qeth_put_reply(reply);
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index aafcffaa25f7..4604e1bc334c 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -822,6 +822,7 @@ struct lpfc_hba {
+ uint32_t cfg_cq_poll_threshold;
+ uint32_t cfg_cq_max_proc_limit;
+ uint32_t cfg_fcp_cpu_map;
++ uint32_t cfg_fcp_mq_threshold;
+ uint32_t cfg_hdw_queue;
+ uint32_t cfg_irq_chann;
+ uint32_t cfg_suppress_rsp;
+diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
+index d4c65e2109e2..353da12d797b 100644
+--- a/drivers/scsi/lpfc/lpfc_attr.c
++++ b/drivers/scsi/lpfc/lpfc_attr.c
+@@ -5640,6 +5640,19 @@ LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
+ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
+ "Embed NVME Command in WQE");
+
++/*
++ * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
++ * the driver will advertise it supports to the SCSI layer.
++ *
++ * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
++ * 1,128 = Manually specify the maximum nr_hw_queue value to be set,
++ *
++ * Value range is [0,128]. Default value is 8.
++ */
++LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
++ LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
++ "Set the number of SCSI Queues advertised");
++
+ /*
+ * lpfc_hdw_queue: Set the number of Hardware Queues the driver
+ * will advertise it supports to the NVME and SCSI layers. This also
+@@ -5961,6 +5974,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
+ &dev_attr_lpfc_cq_poll_threshold,
+ &dev_attr_lpfc_cq_max_proc_limit,
+ &dev_attr_lpfc_fcp_cpu_map,
++ &dev_attr_lpfc_fcp_mq_threshold,
+ &dev_attr_lpfc_hdw_queue,
+ &dev_attr_lpfc_irq_chann,
+ &dev_attr_lpfc_suppress_rsp,
+@@ -7042,6 +7056,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
+ /* Initialize first burst. Target vs Initiator are different. */
+ lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
+ lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
++ lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
+ lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
+ lpfc_irq_chann_init(phba, lpfc_irq_chann);
+ lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index eaaef682de25..2fd8f15f9997 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -4308,10 +4308,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
+ shost->max_cmd_len = 16;
+
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+- if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
+- shost->nr_hw_queues = phba->cfg_hdw_queue;
+- else
+- shost->nr_hw_queues = phba->sli4_hba.num_present_cpu;
++ if (!phba->cfg_fcp_mq_threshold ||
++ phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
++ phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
++
++ shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
++ phba->cfg_fcp_mq_threshold);
+
+ shost->dma_boundary =
+ phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
+diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
+index 8e4fd1a98023..986594ec40e2 100644
+--- a/drivers/scsi/lpfc/lpfc_sli4.h
++++ b/drivers/scsi/lpfc/lpfc_sli4.h
+@@ -44,6 +44,11 @@
+ #define LPFC_HBA_HDWQ_MAX 128
+ #define LPFC_HBA_HDWQ_DEF 0
+
++/* FCP MQ queue count limiting */
++#define LPFC_FCP_MQ_THRESHOLD_MIN 0
++#define LPFC_FCP_MQ_THRESHOLD_MAX 128
++#define LPFC_FCP_MQ_THRESHOLD_DEF 8
++
+ /* Common buffer size to accomidate SCSI and NVME IO buffers */
+ #define LPFC_COMMON_IO_BUF_SZ 768
+
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index 8d560c562e9c..6b7b390b2e52 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -2956,6 +2956,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
+ dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
+ vha->gnl.ldma);
+
++ vha->gnl.l = NULL;
++
+ vfree(vha->scan.l);
+
+ if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index d056f5e7cf93..794478e5f7ec 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -3440,6 +3440,12 @@ skip_dpc:
+ return 0;
+
+ probe_failed:
++ if (base_vha->gnl.l) {
++ dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
++ base_vha->gnl.l, base_vha->gnl.ldma);
++ base_vha->gnl.l = NULL;
++ }
++
+ if (base_vha->timer_active)
+ qla2x00_stop_timer(base_vha);
+ base_vha->flags.online = 0;
+@@ -3673,7 +3679,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
+ if (!atomic_read(&pdev->enable_cnt)) {
+ dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
+ base_vha->gnl.l, base_vha->gnl.ldma);
+-
++ base_vha->gnl.l = NULL;
+ scsi_host_put(base_vha->host);
+ kfree(ha);
+ pci_set_drvdata(pdev, NULL);
+@@ -3713,6 +3719,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
+ dma_free_coherent(&ha->pdev->dev,
+ base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
+
++ base_vha->gnl.l = NULL;
++
+ vfree(base_vha->scan.l);
+
+ if (IS_QLAFX00(ha))
+@@ -4817,6 +4825,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
+ "Alloc failed for scan database.\n");
+ dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
+ vha->gnl.l, vha->gnl.ldma);
++ vha->gnl.l = NULL;
+ scsi_remove_host(vha->host);
+ return NULL;
+ }
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index b43d6385a1a0..95b2371fb67b 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -1132,14 +1132,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
+ struct se_cmd *se_cmd = cmd->se_cmd;
+ struct tcmu_dev *udev = cmd->tcmu_dev;
+ bool read_len_valid = false;
+- uint32_t read_len = se_cmd->data_length;
++ uint32_t read_len;
+
+ /*
+ * cmd has been completed already from timeout, just reclaim
+ * data area space and free cmd
+ */
+- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
++ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
++ WARN_ON_ONCE(se_cmd);
+ goto out;
++ }
+
+ list_del_init(&cmd->queue_entry);
+
+@@ -1152,6 +1154,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
+ goto done;
+ }
+
++ read_len = se_cmd->data_length;
+ if (se_cmd->data_direction == DMA_FROM_DEVICE &&
+ (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
+ read_len_valid = true;
+@@ -1307,6 +1310,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
+ */
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ list_del_init(&cmd->queue_entry);
++ cmd->se_cmd = NULL;
+ } else {
+ list_del_init(&cmd->queue_entry);
+ idr_remove(&udev->commands, id);
+@@ -2024,6 +2028,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
+
+ idr_remove(&udev->commands, i);
+ if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
++ WARN_ON(!cmd->se_cmd);
+ list_del_init(&cmd->queue_entry);
+ if (err_level == 1) {
+ /*
+diff --git a/fs/afs/cell.c b/fs/afs/cell.c
+index a2a87117d262..fd5133e26a38 100644
+--- a/fs/afs/cell.c
++++ b/fs/afs/cell.c
+@@ -74,6 +74,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
+ cell = rcu_dereference_raw(net->ws_cell);
+ if (cell) {
+ afs_get_cell(cell);
++ ret = 0;
+ break;
+ }
+ ret = -EDESTADDRREQ;
+@@ -108,6 +109,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
+
+ done_seqretry(&net->cells_lock, seq);
+
++ if (ret != 0 && cell)
++ afs_put_cell(net, cell);
++
+ return ret == 0 ? cell : ERR_PTR(ret);
+ }
+
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 9620f19308f5..9bd5c067d55d 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -960,7 +960,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
+ inode ? AFS_FS_I(inode) : NULL);
+ } else {
+ trace_afs_lookup(dvnode, &dentry->d_name,
+- inode ? AFS_FS_I(inode) : NULL);
++ IS_ERR_OR_NULL(inode) ? NULL
++ : AFS_FS_I(inode));
+ }
+ return d;
+ }
+diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
+index 18722aaeda33..a1baf3f1f14d 100644
+--- a/fs/afs/yfsclient.c
++++ b/fs/afs/yfsclient.c
+@@ -2155,7 +2155,7 @@ int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl
+ key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+
+ size = round_up(acl->size, 4);
+- call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus,
++ call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2,
+ sizeof(__be32) * 2 +
+ sizeof(struct yfs_xdr_YFSFid) +
+ sizeof(__be32) + size,
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 7754d7679122..622467e47cde 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -1305,6 +1305,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
+ {
+ struct ceph_inode_info *ci = cap->ci;
+ struct inode *inode = &ci->vfs_inode;
++ struct ceph_buffer *old_blob = NULL;
+ struct cap_msg_args arg;
+ int held, revoking;
+ int wake = 0;
+@@ -1369,7 +1370,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
+ ci->i_requested_max_size = arg.max_size;
+
+ if (flushing & CEPH_CAP_XATTR_EXCL) {
+- __ceph_build_xattrs_blob(ci);
++ old_blob = __ceph_build_xattrs_blob(ci);
+ arg.xattr_version = ci->i_xattrs.version;
+ arg.xattr_buf = ci->i_xattrs.blob;
+ } else {
+@@ -1404,6 +1405,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
+
+ spin_unlock(&ci->i_ceph_lock);
+
++ ceph_buffer_put(old_blob);
++
+ ret = send_cap_msg(&arg);
+ if (ret < 0) {
+ dout("error sending cap msg, must requeue %p\n", inode);
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 3c7a32779574..ca3821b0309f 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -743,6 +743,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
+ int issued, new_issued, info_caps;
+ struct timespec64 mtime, atime, ctime;
+ struct ceph_buffer *xattr_blob = NULL;
++ struct ceph_buffer *old_blob = NULL;
+ struct ceph_string *pool_ns = NULL;
+ struct ceph_cap *new_cap = NULL;
+ int err = 0;
+@@ -883,7 +884,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
+ if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
+ le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
+ if (ci->i_xattrs.blob)
+- ceph_buffer_put(ci->i_xattrs.blob);
++ old_blob = ci->i_xattrs.blob;
+ ci->i_xattrs.blob = xattr_blob;
+ if (xattr_blob)
+ memcpy(ci->i_xattrs.blob->vec.iov_base,
+@@ -1023,8 +1024,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
+ out:
+ if (new_cap)
+ ceph_put_cap(mdsc, new_cap);
+- if (xattr_blob)
+- ceph_buffer_put(xattr_blob);
++ ceph_buffer_put(old_blob);
++ ceph_buffer_put(xattr_blob);
+ ceph_put_string(pool_ns);
+ return err;
+ }
+diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
+index 72c6c022f02b..213bc1475e91 100644
+--- a/fs/ceph/snap.c
++++ b/fs/ceph/snap.c
+@@ -464,6 +464,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
+ struct inode *inode = &ci->vfs_inode;
+ struct ceph_cap_snap *capsnap;
+ struct ceph_snap_context *old_snapc, *new_snapc;
++ struct ceph_buffer *old_blob = NULL;
+ int used, dirty;
+
+ capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
+@@ -540,7 +541,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
+ capsnap->gid = inode->i_gid;
+
+ if (dirty & CEPH_CAP_XATTR_EXCL) {
+- __ceph_build_xattrs_blob(ci);
++ old_blob = __ceph_build_xattrs_blob(ci);
+ capsnap->xattr_blob =
+ ceph_buffer_get(ci->i_xattrs.blob);
+ capsnap->xattr_version = ci->i_xattrs.version;
+@@ -583,6 +584,7 @@ update_snapc:
+ }
+ spin_unlock(&ci->i_ceph_lock);
+
++ ceph_buffer_put(old_blob);
+ kfree(capsnap);
+ ceph_put_snap_context(old_snapc);
+ }
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 1d313d0536f9..38b42d7594b6 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -924,7 +924,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat,
+ int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
+ ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
+ extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
+-extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci);
++extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
+ extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
+ extern void __init ceph_xattr_init(void);
+ extern void ceph_xattr_exit(void);
+diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
+index 0619adbcbe14..9772db01720b 100644
+--- a/fs/ceph/xattr.c
++++ b/fs/ceph/xattr.c
+@@ -752,12 +752,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
+
+ /*
+ * If there are dirty xattrs, reencode xattrs into the prealloc_blob
+- * and swap into place.
++ * and swap into place. It returns the old i_xattrs.blob (or NULL) so
++ * that it can be freed by the caller as the i_ceph_lock is likely to be
++ * held.
+ */
+-void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
++struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
+ {
+ struct rb_node *p;
+ struct ceph_inode_xattr *xattr = NULL;
++ struct ceph_buffer *old_blob = NULL;
+ void *dest;
+
+ dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
+@@ -788,12 +791,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
+ dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
+
+ if (ci->i_xattrs.blob)
+- ceph_buffer_put(ci->i_xattrs.blob);
++ old_blob = ci->i_xattrs.blob;
+ ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
+ ci->i_xattrs.prealloc_blob = NULL;
+ ci->i_xattrs.dirty = false;
+ ci->i_xattrs.version++;
+ }
++
++ return old_blob;
+ }
+
+ static inline int __get_request_mask(struct inode *in) {
+@@ -1028,6 +1033,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_cap_flush *prealloc_cf = NULL;
++ struct ceph_buffer *old_blob = NULL;
+ int issued;
+ int err;
+ int dirty = 0;
+@@ -1101,13 +1107,15 @@ retry:
+ struct ceph_buffer *blob;
+
+ spin_unlock(&ci->i_ceph_lock);
+- dout(" preaallocating new blob size=%d\n", required_blob_size);
++ ceph_buffer_put(old_blob); /* Shouldn't be required */
++ dout(" pre-allocating new blob size=%d\n", required_blob_size);
+ blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
+ if (!blob)
+ goto do_sync_unlocked;
+ spin_lock(&ci->i_ceph_lock);
++ /* prealloc_blob can't be released while holding i_ceph_lock */
+ if (ci->i_xattrs.prealloc_blob)
+- ceph_buffer_put(ci->i_xattrs.prealloc_blob);
++ old_blob = ci->i_xattrs.prealloc_blob;
+ ci->i_xattrs.prealloc_blob = blob;
+ goto retry;
+ }
+@@ -1123,6 +1131,7 @@ retry:
+ }
+
+ spin_unlock(&ci->i_ceph_lock);
++ ceph_buffer_put(old_blob);
+ if (lock_snap_rwsem)
+ up_read(&mdsc->snap_rwsem);
+ if (dirty)
+diff --git a/fs/read_write.c b/fs/read_write.c
+index c543d965e288..e8b0f1192a3a 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -1776,10 +1776,7 @@ static int generic_remap_check_len(struct inode *inode_in,
+ return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
+ }
+
+-/*
+- * Read a page's worth of file data into the page cache. Return the page
+- * locked.
+- */
++/* Read a page's worth of file data into the page cache. */
+ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
+ {
+ struct page *page;
+@@ -1791,10 +1788,32 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
+ put_page(page);
+ return ERR_PTR(-EIO);
+ }
+- lock_page(page);
+ return page;
+ }
+
++/*
++ * Lock two pages, ensuring that we lock in offset order if the pages are from
++ * the same file.
++ */
++static void vfs_lock_two_pages(struct page *page1, struct page *page2)
++{
++ /* Always lock in order of increasing index. */
++ if (page1->index > page2->index)
++ swap(page1, page2);
++
++ lock_page(page1);
++ if (page1 != page2)
++ lock_page(page2);
++}
++
++/* Unlock two pages, being careful not to unlock the same page twice. */
++static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
++{
++ unlock_page(page1);
++ if (page1 != page2)
++ unlock_page(page2);
++}
++
+ /*
+ * Compare extents of two files to see if they are the same.
+ * Caller must have locked both inodes to prevent write races.
+@@ -1832,10 +1851,24 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+ dest_page = vfs_dedupe_get_page(dest, destoff);
+ if (IS_ERR(dest_page)) {
+ error = PTR_ERR(dest_page);
+- unlock_page(src_page);
+ put_page(src_page);
+ goto out_error;
+ }
++
++ vfs_lock_two_pages(src_page, dest_page);
++
++ /*
++ * Now that we've locked both pages, make sure they're still
++ * mapped to the file data we're interested in. If not,
++ * someone is invalidating pages on us and we lose.
++ */
++ if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
++ src_page->mapping != src->i_mapping ||
++ dest_page->mapping != dest->i_mapping) {
++ same = false;
++ goto unlock;
++ }
++
+ src_addr = kmap_atomic(src_page);
+ dest_addr = kmap_atomic(dest_page);
+
+@@ -1847,8 +1880,8 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+
+ kunmap_atomic(dest_addr);
+ kunmap_atomic(src_addr);
+- unlock_page(dest_page);
+- unlock_page(src_page);
++unlock:
++ vfs_unlock_two_pages(src_page, dest_page);
+ put_page(dest_page);
+ put_page(src_page);
+
+diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
+index 5e58bb29b1a3..11cdc7c60480 100644
+--- a/include/linux/ceph/buffer.h
++++ b/include/linux/ceph/buffer.h
+@@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
+
+ static inline void ceph_buffer_put(struct ceph_buffer *b)
+ {
+- kref_put(&b->kref, ceph_buffer_release);
++ if (b)
++ kref_put(&b->kref, ceph_buffer_release);
+ }
+
+ extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
+diff --git a/include/linux/gpio.h b/include/linux/gpio.h
+index 39745b8bdd65..b3115d1a7d49 100644
+--- a/include/linux/gpio.h
++++ b/include/linux/gpio.h
+@@ -240,30 +240,6 @@ static inline int irq_to_gpio(unsigned irq)
+ return -EINVAL;
+ }
+
+-static inline int
+-gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+- unsigned int gpio_offset, unsigned int pin_offset,
+- unsigned int npins)
+-{
+- WARN_ON(1);
+- return -EINVAL;
+-}
+-
+-static inline int
+-gpiochip_add_pingroup_range(struct gpio_chip *chip,
+- struct pinctrl_dev *pctldev,
+- unsigned int gpio_offset, const char *pin_group)
+-{
+- WARN_ON(1);
+- return -EINVAL;
+-}
+-
+-static inline void
+-gpiochip_remove_pin_ranges(struct gpio_chip *chip)
+-{
+- WARN_ON(1);
+-}
+-
+ static inline int devm_gpio_request(struct device *dev, unsigned gpio,
+ const char *label)
+ {
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index 6424586fe2d6..7c5a9fb9c9f4 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -1108,6 +1108,7 @@ int genphy_c45_an_disable_aneg(struct phy_device *phydev);
+ int genphy_c45_read_mdix(struct phy_device *phydev);
+ int genphy_c45_pma_read_abilities(struct phy_device *phydev);
+ int genphy_c45_read_status(struct phy_device *phydev);
++int genphy_c45_config_aneg(struct phy_device *phydev);
+
+ /* The gen10g_* functions are the old Clause 45 stub */
+ int gen10g_config_aneg(struct phy_device *phydev);
+diff --git a/include/net/act_api.h b/include/net/act_api.h
+index c61a1bf4e3de..3a1a72990fce 100644
+--- a/include/net/act_api.h
++++ b/include/net/act_api.h
+@@ -15,6 +15,7 @@
+ struct tcf_idrinfo {
+ struct mutex lock;
+ struct idr action_idr;
++ struct net *net;
+ };
+
+ struct tc_action_ops;
+@@ -108,7 +109,7 @@ struct tc_action_net {
+ };
+
+ static inline
+-int tc_action_net_init(struct tc_action_net *tn,
++int tc_action_net_init(struct net *net, struct tc_action_net *tn,
+ const struct tc_action_ops *ops)
+ {
+ int err = 0;
+@@ -117,6 +118,7 @@ int tc_action_net_init(struct tc_action_net *tn,
+ if (!tn->idrinfo)
+ return -ENOMEM;
+ tn->ops = ops;
++ tn->idrinfo->net = net;
+ mutex_init(&tn->idrinfo->lock);
+ idr_init(&tn->idrinfo->action_idr);
+ return err;
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 5b8624ae4a27..930d062940b7 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -419,8 +419,7 @@ struct nft_set {
+ unsigned char *udata;
+ /* runtime data below here */
+ const struct nft_set_ops *ops ____cacheline_aligned;
+- u16 flags:13,
+- bound:1,
++ u16 flags:14,
+ genmask:2;
+ u8 klen;
+ u8 dlen;
+@@ -1333,12 +1332,15 @@ struct nft_trans_rule {
+ struct nft_trans_set {
+ struct nft_set *set;
+ u32 set_id;
++ bool bound;
+ };
+
+ #define nft_trans_set(trans) \
+ (((struct nft_trans_set *)trans->data)->set)
+ #define nft_trans_set_id(trans) \
+ (((struct nft_trans_set *)trans->data)->set_id)
++#define nft_trans_set_bound(trans) \
++ (((struct nft_trans_set *)trans->data)->bound)
+
+ struct nft_trans_chain {
+ bool update;
+@@ -1369,12 +1371,15 @@ struct nft_trans_table {
+ struct nft_trans_elem {
+ struct nft_set *set;
+ struct nft_set_elem elem;
++ bool bound;
+ };
+
+ #define nft_trans_elem_set(trans) \
+ (((struct nft_trans_elem *)trans->data)->set)
+ #define nft_trans_elem(trans) \
+ (((struct nft_trans_elem *)trans->data)->elem)
++#define nft_trans_elem_set_bound(trans) \
++ (((struct nft_trans_elem *)trans->data)->bound)
+
+ struct nft_trans_obj {
+ struct nft_object *obj;
+diff --git a/include/net/psample.h b/include/net/psample.h
+index 37a4df2325b2..6b578ce69cd8 100644
+--- a/include/net/psample.h
++++ b/include/net/psample.h
+@@ -11,6 +11,7 @@ struct psample_group {
+ u32 group_num;
+ u32 refcount;
+ u32 seq;
++ struct rcu_head rcu;
+ };
+
+ struct psample_group *psample_group_get(struct net *net, u32 group_num);
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 445337c107e0..2504c269e658 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -470,6 +470,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
+ */
+ static void do_optimize_kprobes(void)
+ {
++ lockdep_assert_held(&text_mutex);
+ /*
+ * The optimization/unoptimization refers online_cpus via
+ * stop_machine() and cpu-hotplug modifies online_cpus.
+@@ -487,9 +488,7 @@ static void do_optimize_kprobes(void)
+ list_empty(&optimizing_list))
+ return;
+
+- mutex_lock(&text_mutex);
+ arch_optimize_kprobes(&optimizing_list);
+- mutex_unlock(&text_mutex);
+ }
+
+ /*
+@@ -500,6 +499,7 @@ static void do_unoptimize_kprobes(void)
+ {
+ struct optimized_kprobe *op, *tmp;
+
++ lockdep_assert_held(&text_mutex);
+ /* See comment in do_optimize_kprobes() */
+ lockdep_assert_cpus_held();
+
+@@ -507,7 +507,6 @@ static void do_unoptimize_kprobes(void)
+ if (list_empty(&unoptimizing_list))
+ return;
+
+- mutex_lock(&text_mutex);
+ arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
+ /* Loop free_list for disarming */
+ list_for_each_entry_safe(op, tmp, &freeing_list, list) {
+@@ -524,7 +523,6 @@ static void do_unoptimize_kprobes(void)
+ } else
+ list_del_init(&op->list);
+ }
+- mutex_unlock(&text_mutex);
+ }
+
+ /* Reclaim all kprobes on the free_list */
+@@ -556,6 +554,7 @@ static void kprobe_optimizer(struct work_struct *work)
+ {
+ mutex_lock(&kprobe_mutex);
+ cpus_read_lock();
++ mutex_lock(&text_mutex);
+ /* Lock modules while optimizing kprobes */
+ mutex_lock(&module_mutex);
+
+@@ -583,6 +582,7 @@ static void kprobe_optimizer(struct work_struct *work)
+ do_free_cleaned_kprobes();
+
+ mutex_unlock(&module_mutex);
++ mutex_unlock(&text_mutex);
+ cpus_read_unlock();
+ mutex_unlock(&kprobe_mutex);
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 4d5962232a55..42bc2986520d 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3469,7 +3469,7 @@ void __noreturn do_task_dead(void)
+
+ static inline void sched_submit_work(struct task_struct *tsk)
+ {
+- if (!tsk->state || tsk_is_pi_blocked(tsk))
++ if (!tsk->state)
+ return;
+
+ /*
+@@ -3485,6 +3485,9 @@ static inline void sched_submit_work(struct task_struct *tsk)
+ preempt_enable_no_resched();
+ }
+
++ if (tsk_is_pi_blocked(tsk))
++ return;
++
+ /*
+ * If we are going to sleep and we have plugged IO queued,
+ * make sure to submit it to avoid deadlocks.
+diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
+index ec54e236e345..50fe9dfb088b 100644
+--- a/net/batman-adv/multicast.c
++++ b/net/batman-adv/multicast.c
+@@ -1653,7 +1653,7 @@ __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
+
+ while (bucket_tmp < hash->size) {
+ if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
+- *bucket, &idx_tmp))
++ bucket_tmp, &idx_tmp))
+ break;
+
+ bucket_tmp++;
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index dd8b1a460d64..cb36d01ea0dd 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work)
+ txq = netdev_get_tx_queue(dev, q_index);
+ HARD_TX_LOCK(dev, txq, smp_processor_id());
+ if (netif_xmit_frozen_or_stopped(txq) ||
+- netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
++ !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
+ skb_queue_head(&npinfo->txq, skb);
+ HARD_TX_UNLOCK(dev, txq);
+ local_irq_restore(flags);
+@@ -335,7 +335,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
+
+ HARD_TX_UNLOCK(dev, txq);
+
+- if (status == NETDEV_TX_OK)
++ if (dev_xmit_complete(status))
+ break;
+
+ }
+@@ -352,7 +352,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
+
+ }
+
+- if (status != NETDEV_TX_OK) {
++ if (!dev_xmit_complete(status)) {
+ skb_queue_tail(&npinfo->txq, skb);
+ schedule_delayed_work(&npinfo->tx_work,0);
+ }
+diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
+index 65a35e976d7b..4d0312b97cce 100644
+--- a/net/dsa/tag_8021q.c
++++ b/net/dsa/tag_8021q.c
+@@ -28,6 +28,7 @@
+ *
+ * RSV - VID[9]:
+ * To be used for further expansion of SWITCH_ID or for other purposes.
++ * Must be transmitted as zero and ignored on receive.
+ *
+ * SWITCH_ID - VID[8:6]:
+ * Index of switch within DSA tree. Must be between 0 and
+@@ -35,6 +36,7 @@
+ *
+ * RSV - VID[5:4]:
+ * To be used for further expansion of PORT or for other purposes.
++ * Must be transmitted as zero and ignored on receive.
+ *
+ * PORT - VID[3:0]:
+ * Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1.
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index b30f7f877181..b2f0d2988a8e 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -935,6 +935,22 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
+ return mss_now;
+ }
+
++/* In some cases, both sendpage() and sendmsg() could have added
++ * an skb to the write queue, but failed adding payload on it.
++ * We need to remove it to consume less memory, but more
++ * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
++ * users.
++ */
++static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
++{
++ if (skb && !skb->len) {
++ tcp_unlink_write_queue(skb, sk);
++ if (tcp_write_queue_empty(sk))
++ tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
++ sk_wmem_free_skb(sk, skb);
++ }
++}
++
+ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
+ size_t size, int flags)
+ {
+@@ -1064,6 +1080,7 @@ out:
+ return copied;
+
+ do_error:
++ tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
+ if (copied)
+ goto out;
+ out_err:
+@@ -1388,18 +1405,11 @@ out_nopush:
+ sock_zerocopy_put(uarg);
+ return copied + copied_syn;
+
++do_error:
++ skb = tcp_write_queue_tail(sk);
+ do_fault:
+- if (!skb->len) {
+- tcp_unlink_write_queue(skb, sk);
+- /* It is the one place in all of TCP, except connection
+- * reset, where we can be unlinking the send_head.
+- */
+- if (tcp_write_queue_empty(sk))
+- tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
+- sk_wmem_free_skb(sk, skb);
+- }
++ tcp_remove_empty_skb(sk, skb);
+
+-do_error:
+ if (copied + copied_syn)
+ goto out;
+ out_err:
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 359d298348c7..37c2f1204c1a 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2051,7 +2051,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
+ if (len <= skb->len)
+ break;
+
+- if (unlikely(TCP_SKB_CB(skb)->eor))
++ if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
+ return false;
+
+ len -= skb->len;
+@@ -2168,6 +2168,7 @@ static int tcp_mtu_probe(struct sock *sk)
+ * we need to propagate it to the new skb.
+ */
+ TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
++ tcp_skb_collapse_tstamp(nskb, skb);
+ tcp_unlink_write_queue(skb, sk);
+ sk_wmem_free_skb(sk, skb);
+ } else {
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 7f3f13c37916..eaa4c2cc2fbb 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -787,14 +787,15 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
+ if (pmc) {
+ im->idev = pmc->idev;
+ if (im->mca_sfmode == MCAST_INCLUDE) {
+- im->mca_tomb = pmc->mca_tomb;
+- im->mca_sources = pmc->mca_sources;
++ swap(im->mca_tomb, pmc->mca_tomb);
++ swap(im->mca_sources, pmc->mca_sources);
+ for (psf = im->mca_sources; psf; psf = psf->sf_next)
+ psf->sf_crcount = idev->mc_qrv;
+ } else {
+ im->mca_crcount = idev->mc_qrv;
+ }
+ in6_dev_put(pmc->idev);
++ ip6_mc_clear_src(pmc);
+ kfree(pmc);
+ }
+ spin_unlock_bh(&im->mca_lock);
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index 948b4ebbe3fb..49248fe5847a 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -112,15 +112,16 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
+ #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
+ #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
+
+-static void flow_offload_fixup_ct_state(struct nf_conn *ct)
++static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
++{
++ return (__s32)(timeout - (u32)jiffies);
++}
++
++static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
+ {
+ const struct nf_conntrack_l4proto *l4proto;
++ int l4num = nf_ct_protonum(ct);
+ unsigned int timeout;
+- int l4num;
+-
+- l4num = nf_ct_protonum(ct);
+- if (l4num == IPPROTO_TCP)
+- flow_offload_fixup_tcp(&ct->proto.tcp);
+
+ l4proto = nf_ct_l4proto_find(l4num);
+ if (!l4proto)
+@@ -133,7 +134,20 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct)
+ else
+ return;
+
+- ct->timeout = nfct_time_stamp + timeout;
++ if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
++ ct->timeout = nfct_time_stamp + timeout;
++}
++
++static void flow_offload_fixup_ct_state(struct nf_conn *ct)
++{
++ if (nf_ct_protonum(ct) == IPPROTO_TCP)
++ flow_offload_fixup_tcp(&ct->proto.tcp);
++}
++
++static void flow_offload_fixup_ct(struct nf_conn *ct)
++{
++ flow_offload_fixup_ct_state(ct);
++ flow_offload_fixup_ct_timeout(ct);
+ }
+
+ void flow_offload_free(struct flow_offload *flow)
+@@ -209,6 +223,11 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
+ }
+ EXPORT_SYMBOL_GPL(flow_offload_add);
+
++static inline bool nf_flow_has_expired(const struct flow_offload *flow)
++{
++ return nf_flow_timeout_delta(flow->timeout) <= 0;
++}
++
+ static void flow_offload_del(struct nf_flowtable *flow_table,
+ struct flow_offload *flow)
+ {
+@@ -224,6 +243,11 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
+ e = container_of(flow, struct flow_offload_entry, flow);
+ clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
+
++ if (nf_flow_has_expired(flow))
++ flow_offload_fixup_ct(e->ct);
++ else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
++ flow_offload_fixup_ct_timeout(e->ct);
++
+ flow_offload_free(flow);
+ }
+
+@@ -299,11 +323,6 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
+ return err;
+ }
+
+-static inline bool nf_flow_has_expired(const struct flow_offload *flow)
+-{
+- return (__s32)(flow->timeout - (u32)jiffies) <= 0;
+-}
+-
+ static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
+ {
+ struct nf_flowtable *flow_table = data;
+diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
+index cdfc33517e85..d68c801dd614 100644
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -214,6 +214,25 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+ return true;
+ }
+
++static int nf_flow_offload_dst_check(struct dst_entry *dst)
++{
++ if (unlikely(dst_xfrm(dst)))
++ return dst_check(dst, 0) ? 0 : -1;
++
++ return 0;
++}
++
++static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
++ const struct nf_hook_state *state,
++ struct dst_entry *dst)
++{
++ skb_orphan(skb);
++ skb_dst_set_noref(skb, dst);
++ skb->tstamp = 0;
++ dst_output(state->net, state->sk, skb);
++ return NF_STOLEN;
++}
++
+ unsigned int
+ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+@@ -254,6 +273,11 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
+ return NF_ACCEPT;
+
++ if (nf_flow_offload_dst_check(&rt->dst)) {
++ flow_offload_teardown(flow);
++ return NF_ACCEPT;
++ }
++
+ if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
+ return NF_DROP;
+
+@@ -261,6 +285,13 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
+ iph = ip_hdr(skb);
+ ip_decrease_ttl(iph);
+
++ if (unlikely(dst_xfrm(&rt->dst))) {
++ memset(skb->cb, 0, sizeof(struct inet_skb_parm));
++ IPCB(skb)->iif = skb->dev->ifindex;
++ IPCB(skb)->flags = IPSKB_FORWARDED;
++ return nf_flow_xmit_xfrm(skb, state, &rt->dst);
++ }
++
+ skb->dev = outdev;
+ nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
+ skb_dst_set_noref(skb, &rt->dst);
+@@ -467,6 +498,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+ sizeof(*ip6h)))
+ return NF_ACCEPT;
+
++ if (nf_flow_offload_dst_check(&rt->dst)) {
++ flow_offload_teardown(flow);
++ return NF_ACCEPT;
++ }
++
+ if (skb_try_make_writable(skb, sizeof(*ip6h)))
+ return NF_DROP;
+
+@@ -477,6 +513,13 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
+ ip6h = ipv6_hdr(skb);
+ ip6h->hop_limit--;
+
++ if (unlikely(dst_xfrm(&rt->dst))) {
++ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
++ IP6CB(skb)->iif = skb->dev->ifindex;
++ IP6CB(skb)->flags = IP6SKB_FORWARDED;
++ return nf_flow_xmit_xfrm(skb, state, &rt->dst);
++ }
++
+ skb->dev = outdev;
+ nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
+ skb_dst_set_noref(skb, &rt->dst);
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index bcf17fb46d96..8e4cdae2c4f1 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -136,9 +136,14 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
+ return;
+
+ list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
+- if (trans->msg_type == NFT_MSG_NEWSET &&
+- nft_trans_set(trans) == set) {
+- set->bound = true;
++ switch (trans->msg_type) {
++ case NFT_MSG_NEWSET:
++ if (nft_trans_set(trans) == set)
++ nft_trans_set_bound(trans) = true;
++ break;
++ case NFT_MSG_NEWSETELEM:
++ if (nft_trans_elem_set(trans) == set)
++ nft_trans_elem_set_bound(trans) = true;
+ break;
+ }
+ }
+@@ -6849,7 +6854,7 @@ static int __nf_tables_abort(struct net *net)
+ break;
+ case NFT_MSG_NEWSET:
+ trans->ctx.table->use--;
+- if (nft_trans_set(trans)->bound) {
++ if (nft_trans_set_bound(trans)) {
+ nft_trans_destroy(trans);
+ break;
+ }
+@@ -6861,7 +6866,7 @@ static int __nf_tables_abort(struct net *net)
+ nft_trans_destroy(trans);
+ break;
+ case NFT_MSG_NEWSETELEM:
+- if (nft_trans_elem_set(trans)->bound) {
++ if (nft_trans_elem_set_bound(trans)) {
+ nft_trans_destroy(trans);
+ break;
+ }
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index aa5f571d4361..060a4ed46d5e 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -72,11 +72,11 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
+ {
+ struct nft_flow_offload *priv = nft_expr_priv(expr);
+ struct nf_flowtable *flowtable = &priv->flowtable->data;
++ struct tcphdr _tcph, *tcph = NULL;
+ enum ip_conntrack_info ctinfo;
+ struct nf_flow_route route;
+ struct flow_offload *flow;
+ enum ip_conntrack_dir dir;
+- bool is_tcp = false;
+ struct nf_conn *ct;
+ int ret;
+
+@@ -89,7 +89,10 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
+
+ switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
+ case IPPROTO_TCP:
+- is_tcp = true;
++ tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
++ sizeof(_tcph), &_tcph);
++ if (unlikely(!tcph || tcph->fin || tcph->rst))
++ goto out;
+ break;
+ case IPPROTO_UDP:
+ break;
+@@ -115,7 +118,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
+ if (!flow)
+ goto err_flow_alloc;
+
+- if (is_tcp) {
++ if (tcph) {
+ ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ }
+diff --git a/net/psample/psample.c b/net/psample/psample.c
+index 841f198ea1a8..66e4b61a350d 100644
+--- a/net/psample/psample.c
++++ b/net/psample/psample.c
+@@ -154,7 +154,7 @@ static void psample_group_destroy(struct psample_group *group)
+ {
+ psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
+ list_del(&group->list);
+- kfree(group);
++ kfree_rcu(group, rcu);
+ }
+
+ static struct psample_group *
+diff --git a/net/rds/recv.c b/net/rds/recv.c
+index 853de4876088..a42ba7fa06d5 100644
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
++ * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+@@ -811,6 +811,7 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
+
+ minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
+ minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
++ minfo6.tos = inc->i_conn->c_tos;
+
+ if (flip) {
+ minfo6.laddr = *daddr;
+@@ -824,6 +825,8 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
+ minfo6.fport = inc->i_hdr.h_dport;
+ }
+
++ minfo6.flags = 0;
++
+ rds_info_copy(iter, &minfo6, sizeof(minfo6));
+ }
+ #endif
+diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
+index fd1f7e799e23..04b7bd4ec751 100644
+--- a/net/sched/act_bpf.c
++++ b/net/sched/act_bpf.c
+@@ -422,7 +422,7 @@ static __net_init int bpf_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, bpf_net_id);
+
+- return tc_action_net_init(tn, &act_bpf_ops);
++ return tc_action_net_init(net, tn, &act_bpf_ops);
+ }
+
+ static void __net_exit bpf_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
+index 32ac04d77a45..2b43cacf82af 100644
+--- a/net/sched/act_connmark.c
++++ b/net/sched/act_connmark.c
+@@ -231,7 +231,7 @@ static __net_init int connmark_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, connmark_net_id);
+
+- return tc_action_net_init(tn, &act_connmark_ops);
++ return tc_action_net_init(net, tn, &act_connmark_ops);
+ }
+
+ static void __net_exit connmark_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
+index 9b9288267a54..d3cfad88dc3a 100644
+--- a/net/sched/act_csum.c
++++ b/net/sched/act_csum.c
+@@ -714,7 +714,7 @@ static __net_init int csum_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, csum_net_id);
+
+- return tc_action_net_init(tn, &act_csum_ops);
++ return tc_action_net_init(net, tn, &act_csum_ops);
+ }
+
+ static void __net_exit csum_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
+index 8f0140c6ca58..324f1d1f6d47 100644
+--- a/net/sched/act_gact.c
++++ b/net/sched/act_gact.c
+@@ -278,7 +278,7 @@ static __net_init int gact_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, gact_net_id);
+
+- return tc_action_net_init(tn, &act_gact_ops);
++ return tc_action_net_init(net, tn, &act_gact_ops);
+ }
+
+ static void __net_exit gact_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
+index 92ee853d43e6..3a31e241c647 100644
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -890,7 +890,7 @@ static __net_init int ife_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, ife_net_id);
+
+- return tc_action_net_init(tn, &act_ife_ops);
++ return tc_action_net_init(net, tn, &act_ife_ops);
+ }
+
+ static void __net_exit ife_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
+index ce2c30a591d2..214a03d405cf 100644
+--- a/net/sched/act_ipt.c
++++ b/net/sched/act_ipt.c
+@@ -61,12 +61,13 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
+ return 0;
+ }
+
+-static void ipt_destroy_target(struct xt_entry_target *t)
++static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
+ {
+ struct xt_tgdtor_param par = {
+ .target = t->u.kernel.target,
+ .targinfo = t->data,
+ .family = NFPROTO_IPV4,
++ .net = net,
+ };
+ if (par.target->destroy != NULL)
+ par.target->destroy(&par);
+@@ -78,7 +79,7 @@ static void tcf_ipt_release(struct tc_action *a)
+ struct tcf_ipt *ipt = to_ipt(a);
+
+ if (ipt->tcfi_t) {
+- ipt_destroy_target(ipt->tcfi_t);
++ ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
+ kfree(ipt->tcfi_t);
+ }
+ kfree(ipt->tcfi_tname);
+@@ -180,7 +181,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
+
+ spin_lock_bh(&ipt->tcf_lock);
+ if (ret != ACT_P_CREATED) {
+- ipt_destroy_target(ipt->tcfi_t);
++ ipt_destroy_target(ipt->tcfi_t, net);
+ kfree(ipt->tcfi_tname);
+ kfree(ipt->tcfi_t);
+ }
+@@ -350,7 +351,7 @@ static __net_init int ipt_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, ipt_net_id);
+
+- return tc_action_net_init(tn, &act_ipt_ops);
++ return tc_action_net_init(net, tn, &act_ipt_ops);
+ }
+
+ static void __net_exit ipt_exit_net(struct list_head *net_list)
+@@ -399,7 +400,7 @@ static __net_init int xt_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, xt_net_id);
+
+- return tc_action_net_init(tn, &act_xt_ops);
++ return tc_action_net_init(net, tn, &act_xt_ops);
+ }
+
+ static void __net_exit xt_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
+index d10dca7a13e1..bd3178a95cb9 100644
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -432,7 +432,7 @@ static __net_init int mirred_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, mirred_net_id);
+
+- return tc_action_net_init(tn, &act_mirred_ops);
++ return tc_action_net_init(net, tn, &act_mirred_ops);
+ }
+
+ static void __net_exit mirred_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
+index 7b858c11b1b5..ea4c5359e7df 100644
+--- a/net/sched/act_nat.c
++++ b/net/sched/act_nat.c
+@@ -327,7 +327,7 @@ static __net_init int nat_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, nat_net_id);
+
+- return tc_action_net_init(tn, &act_nat_ops);
++ return tc_action_net_init(net, tn, &act_nat_ops);
+ }
+
+ static void __net_exit nat_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 17360c6faeaa..cdfaa79382a2 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -498,7 +498,7 @@ static __net_init int pedit_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, pedit_net_id);
+
+- return tc_action_net_init(tn, &act_pedit_ops);
++ return tc_action_net_init(net, tn, &act_pedit_ops);
+ }
+
+ static void __net_exit pedit_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_police.c b/net/sched/act_police.c
+index 49cec3e64a4d..6315e0f8d26e 100644
+--- a/net/sched/act_police.c
++++ b/net/sched/act_police.c
+@@ -371,7 +371,7 @@ static __net_init int police_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, police_net_id);
+
+- return tc_action_net_init(tn, &act_police_ops);
++ return tc_action_net_init(net, tn, &act_police_ops);
+ }
+
+ static void __net_exit police_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
+index 595308d60133..10229124a992 100644
+--- a/net/sched/act_sample.c
++++ b/net/sched/act_sample.c
+@@ -102,13 +102,17 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
+ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
+ s->rate = rate;
+ s->psample_group_num = psample_group_num;
+- RCU_INIT_POINTER(s->psample_group, psample_group);
++ rcu_swap_protected(s->psample_group, psample_group,
++ lockdep_is_held(&s->tcf_lock));
+
+ if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
+ s->truncate = true;
+ s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
+ }
+ spin_unlock_bh(&s->tcf_lock);
++
++ if (psample_group)
++ psample_group_put(psample_group);
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+
+@@ -265,7 +269,7 @@ static __net_init int sample_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, sample_net_id);
+
+- return tc_action_net_init(tn, &act_sample_ops);
++ return tc_action_net_init(net, tn, &act_sample_ops);
+ }
+
+ static void __net_exit sample_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
+index 33aefa25b545..6120e56117ca 100644
+--- a/net/sched/act_simple.c
++++ b/net/sched/act_simple.c
+@@ -232,7 +232,7 @@ static __net_init int simp_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, simp_net_id);
+
+- return tc_action_net_init(tn, &act_simp_ops);
++ return tc_action_net_init(net, tn, &act_simp_ops);
+ }
+
+ static void __net_exit simp_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
+index 37dced00b63d..6a8d3337c577 100644
+--- a/net/sched/act_skbedit.c
++++ b/net/sched/act_skbedit.c
+@@ -336,7 +336,7 @@ static __net_init int skbedit_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, skbedit_net_id);
+
+- return tc_action_net_init(tn, &act_skbedit_ops);
++ return tc_action_net_init(net, tn, &act_skbedit_ops);
+ }
+
+ static void __net_exit skbedit_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
+index 7da3518e18ef..888437f97ba6 100644
+--- a/net/sched/act_skbmod.c
++++ b/net/sched/act_skbmod.c
+@@ -287,7 +287,7 @@ static __net_init int skbmod_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+- return tc_action_net_init(tn, &act_skbmod_ops);
++ return tc_action_net_init(net, tn, &act_skbmod_ops);
+ }
+
+ static void __net_exit skbmod_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
+index 6d0debdc9b97..2f83a79f76aa 100644
+--- a/net/sched/act_tunnel_key.c
++++ b/net/sched/act_tunnel_key.c
+@@ -600,7 +600,7 @@ static __net_init int tunnel_key_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+
+- return tc_action_net_init(tn, &act_tunnel_key_ops);
++ return tc_action_net_init(net, tn, &act_tunnel_key_ops);
+ }
+
+ static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
+diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
+index a3c9eea1ee8a..287a30bf8930 100644
+--- a/net/sched/act_vlan.c
++++ b/net/sched/act_vlan.c
+@@ -334,7 +334,7 @@ static __net_init int vlan_init_net(struct net *net)
+ {
+ struct tc_action_net *tn = net_generic(net, vlan_net_id);
+
+- return tc_action_net_init(tn, &act_vlan_ops);
++ return tc_action_net_init(net, tn, &act_vlan_ops);
+ }
+
+ static void __net_exit vlan_exit_net(struct list_head *net_list)
+diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
+index 732e109c3055..810645b5c086 100644
+--- a/net/sched/sch_cbs.c
++++ b/net/sched/sch_cbs.c
+@@ -181,11 +181,6 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
+ s64 credits;
+ int len;
+
+- if (atomic64_read(&q->port_rate) == -1) {
+- WARN_ONCE(1, "cbs: dequeue() called with unknown port rate.");
+- return NULL;
+- }
+-
+ if (q->credits < 0) {
+ credits = timediff_to_credits(now - q->last, q->idleslope);
+
+@@ -303,11 +298,19 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
+ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
+ {
+ struct ethtool_link_ksettings ecmd;
++ int speed = SPEED_10;
+ int port_rate = -1;
++ int err;
++
++ err = __ethtool_get_link_ksettings(dev, &ecmd);
++ if (err < 0)
++ goto skip;
++
++ if (ecmd.base.speed != SPEED_UNKNOWN)
++ speed = ecmd.base.speed;
+
+- if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
+- ecmd.base.speed != SPEED_UNKNOWN)
+- port_rate = ecmd.base.speed * 1000 * BYTES_PER_KBIT;
++skip:
++ port_rate = speed * 1000 * BYTES_PER_KBIT;
+
+ atomic64_set(&q->port_rate, port_rate);
+ netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 11c03cf4aa74..137db1cbde85 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -624,8 +624,12 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
+
+ err = skb_array_produce(q, skb);
+
+- if (unlikely(err))
+- return qdisc_drop_cpu(skb, qdisc, to_free);
++ if (unlikely(err)) {
++ if (qdisc_is_percpu_stats(qdisc))
++ return qdisc_drop_cpu(skb, qdisc, to_free);
++ else
++ return qdisc_drop(skb, qdisc, to_free);
++ }
+
+ qdisc_update_stats_at_enqueue(qdisc, pkt_len);
+ return NET_XMIT_SUCCESS;
+@@ -688,11 +692,14 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
+ kfree_skb(skb);
+ }
+
+- for_each_possible_cpu(i) {
+- struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
++ if (qdisc_is_percpu_stats(qdisc)) {
++ for_each_possible_cpu(i) {
++ struct gnet_stats_queue *q;
+
+- q->backlog = 0;
+- q->qlen = 0;
++ q = per_cpu_ptr(qdisc->cpu_qstats, i);
++ q->backlog = 0;
++ q->qlen = 0;
++ }
+ }
+ }
+
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 8be89aa52b6e..11c2873ec68b 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -205,11 +205,6 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
+ u32 gate_mask;
+ int i;
+
+- if (atomic64_read(&q->picos_per_byte) == -1) {
+- WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte.");
+- return NULL;
+- }
+-
+ rcu_read_lock();
+ entry = rcu_dereference(q->current_entry);
+ /* if there's no entry, it means that the schedule didn't
+@@ -665,12 +660,20 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
+ struct taprio_sched *q)
+ {
+ struct ethtool_link_ksettings ecmd;
+- int picos_per_byte = -1;
++ int speed = SPEED_10;
++ int picos_per_byte;
++ int err;
+
+- if (!__ethtool_get_link_ksettings(dev, &ecmd) &&
+- ecmd.base.speed != SPEED_UNKNOWN)
+- picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
+- ecmd.base.speed * 1000 * 1000);
++ err = __ethtool_get_link_ksettings(dev, &ecmd);
++ if (err < 0)
++ goto skip;
++
++ if (ecmd.base.speed != SPEED_UNKNOWN)
++ speed = ecmd.base.speed;
++
++skip:
++ picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
++ speed * 1000 * 1000);
+
+ atomic64_set(&q->picos_per_byte, picos_per_byte);
+ netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
+@@ -903,6 +906,10 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
+ */
+ q->clockid = -1;
+
++ spin_lock(&taprio_list_lock);
++ list_add(&q->taprio_list, &taprio_list);
++ spin_unlock(&taprio_list_lock);
++
+ if (sch->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+@@ -920,10 +927,6 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
+ if (!opt)
+ return -EINVAL;
+
+- spin_lock(&taprio_list_lock);
+- list_add(&q->taprio_list, &taprio_list);
+- spin_unlock(&taprio_list_lock);
+-
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *dev_queue;
+ struct Qdisc *qdisc;
+diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
+index f7261fad45c1..647d8a4044fb 100644
+--- a/tools/bpf/bpftool/common.c
++++ b/tools/bpf/bpftool/common.c
+@@ -236,7 +236,7 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
+
+ fd = get_fd_by_id(id);
+ if (fd < 0) {
+- p_err("can't get prog by id (%u): %s", id, strerror(errno));
++ p_err("can't open object by id (%u): %s", id, strerror(errno));
+ return -1;
+ }
+
+diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
+index 0ce50c319cfd..ef8a82f29f02 100644
+--- a/tools/hv/hv_kvp_daemon.c
++++ b/tools/hv/hv_kvp_daemon.c
+@@ -809,7 +809,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
+ int sn_offset = 0;
+ int error = 0;
+ char *buffer;
+- struct hv_kvp_ipaddr_value *ip_buffer;
++ struct hv_kvp_ipaddr_value *ip_buffer = NULL;
+ char cidr_mask[5]; /* /xyz */
+ int weight;
+ int i;
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 77e14d995479..0ccf6aa533ae 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -178,7 +178,6 @@ struct bpf_program {
+ bpf_program_clear_priv_t clear_priv;
+
+ enum bpf_attach_type expected_attach_type;
+- int btf_fd;
+ void *func_info;
+ __u32 func_info_rec_size;
+ __u32 func_info_cnt;
+@@ -305,7 +304,6 @@ void bpf_program__unload(struct bpf_program *prog)
+ prog->instances.nr = -1;
+ zfree(&prog->instances.fds);
+
+- zclose(prog->btf_fd);
+ zfree(&prog->func_info);
+ zfree(&prog->line_info);
+ }
+@@ -382,7 +380,6 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
+ prog->instances.fds = NULL;
+ prog->instances.nr = -1;
+ prog->type = BPF_PROG_TYPE_UNSPEC;
+- prog->btf_fd = -1;
+
+ return 0;
+ errout:
+@@ -1888,9 +1885,6 @@ bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
+ prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
+ }
+
+- if (!insn_offset)
+- prog->btf_fd = btf__fd(obj->btf);
+-
+ return 0;
+ }
+
+@@ -2065,7 +2059,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
+ char *cp, errmsg[STRERR_BUFSIZE];
+ int log_buf_size = BPF_LOG_BUF_SIZE;
+ char *log_buf;
+- int ret;
++ int btf_fd, ret;
+
+ memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
+ load_attr.prog_type = prog->type;
+@@ -2077,7 +2071,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
+ load_attr.license = license;
+ load_attr.kern_version = kern_version;
+ load_attr.prog_ifindex = prog->prog_ifindex;
+- load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
++ /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
++ if (prog->obj->btf_ext)
++ btf_fd = bpf_object__btf_fd(prog->obj);
++ else
++ btf_fd = -1;
++ load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
+ load_attr.func_info = prog->func_info;
+ load_attr.func_info_rec_size = prog->func_info_rec_size;
+ load_attr.func_info_cnt = prog->func_info_cnt;
+diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h
+index 4059014d93ea..4912d23844bc 100644
+--- a/tools/testing/selftests/kvm/include/evmcs.h
++++ b/tools/testing/selftests/kvm/include/evmcs.h
+@@ -220,6 +220,8 @@ struct hv_enlightened_vmcs {
+ struct hv_enlightened_vmcs *current_evmcs;
+ struct hv_vp_assist_page *current_vp_assist;
+
++int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id);
++
+ static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
+ {
+ u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
+diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
+index d2ad85fb01ac..5f1ba3da2dbd 100644
+--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
++++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
+@@ -1059,9 +1059,11 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
+ TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
+ r);
+
+- r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
+- TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
+- r);
++ if (kvm_check_cap(KVM_CAP_XCRS)) {
++ r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
++ TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
++ r);
++ }
+
+ r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
+ TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
+@@ -1102,9 +1104,11 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
+ TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
+ r);
+
+- r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
+- TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
+- r);
++ if (kvm_check_cap(KVM_CAP_XCRS)) {
++ r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
++ TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
++ r);
++ }
+
+ r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
+ TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
+diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+index fe56d159d65f..52b6491ed706 100644
+--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
++++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+@@ -14,6 +14,26 @@
+
+ bool enable_evmcs;
+
++int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
++{
++ uint16_t evmcs_ver;
++
++ struct kvm_enable_cap enable_evmcs_cap = {
++ .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
++ .args[0] = (unsigned long)&evmcs_ver
++ };
++
++ vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap);
++
++ /* KVM should return supported EVMCS version range */
++ TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
++ (evmcs_ver & 0xff) > 0,
++ "Incorrect EVMCS version range: %x:%x\n",
++ evmcs_ver & 0xff, evmcs_ver >> 8);
++
++ return evmcs_ver;
++}
++
+ /* Allocate memory regions for nested VMX tests.
+ *
+ * Input Args:
+diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+index 241919ef1eac..9f250c39c9bb 100644
+--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
++++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+@@ -79,11 +79,6 @@ int main(int argc, char *argv[])
+ struct kvm_x86_state *state;
+ struct ucall uc;
+ int stage;
+- uint16_t evmcs_ver;
+- struct kvm_enable_cap enable_evmcs_cap = {
+- .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
+- .args[0] = (unsigned long)&evmcs_ver
+- };
+
+ /* Create VM */
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+@@ -96,13 +91,7 @@ int main(int argc, char *argv[])
+ exit(KSFT_SKIP);
+ }
+
+- vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
+-
+- /* KVM should return supported EVMCS version range */
+- TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
+- (evmcs_ver & 0xff) > 0,
+- "Incorrect EVMCS version range: %x:%x\n",
+- evmcs_ver & 0xff, evmcs_ver >> 8);
++ vcpu_enable_evmcs(vm, VCPU_ID);
+
+ run = vcpu_state(vm, VCPU_ID);
+
+@@ -146,7 +135,7 @@ int main(int argc, char *argv[])
+ kvm_vm_restart(vm, O_RDWR);
+ vm_vcpu_add(vm, VCPU_ID, 0, 0);
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+- vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
++ vcpu_enable_evmcs(vm, VCPU_ID);
+ vcpu_load_state(vm, VCPU_ID, state);
+ run = vcpu_state(vm, VCPU_ID);
+ free(state);
+diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+index f72b3043db0e..ee59831fbc98 100644
+--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
++++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+@@ -18,6 +18,7 @@
+ #include "test_util.h"
+ #include "kvm_util.h"
+ #include "processor.h"
++#include "vmx.h"
+
+ #define VCPU_ID 0
+
+@@ -106,12 +107,7 @@ int main(int argc, char *argv[])
+ {
+ struct kvm_vm *vm;
+ int rv;
+- uint16_t evmcs_ver;
+ struct kvm_cpuid2 *hv_cpuid_entries;
+- struct kvm_enable_cap enable_evmcs_cap = {
+- .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
+- .args[0] = (unsigned long)&evmcs_ver
+- };
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+@@ -136,14 +132,14 @@ int main(int argc, char *argv[])
+
+ free(hv_cpuid_entries);
+
+- rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
+-
+- if (rv) {
++ if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
+ fprintf(stderr,
+ "Enlightened VMCS is unsupported, skip related test\n");
+ goto vm_free;
+ }
+
++ vcpu_enable_evmcs(vm, VCPU_ID);
++
+ hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
+ if (!hv_cpuid_entries)
+ return 1;
+diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
+index 40050e44ec0a..f9334bd3cce9 100644
+--- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c
++++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
+@@ -99,8 +99,8 @@ int main(int argc, char *argv[])
+ msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
+ vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
+ msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
+- test_msr_platform_info_disabled(vm);
+ test_msr_platform_info_enabled(vm);
++ test_msr_platform_info_disabled(vm);
+ vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
+
+ kvm_vm_free(vm);
+diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
+index ed7218d166da..853e370e8a39 100644
+--- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
++++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
+@@ -25,24 +25,17 @@
+ #define VMCS12_REVISION 0x11e57ed0
+ #define VCPU_ID 5
+
++bool have_evmcs;
++
+ void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state)
+ {
+- volatile struct kvm_run *run;
+-
+ vcpu_nested_state_set(vm, VCPU_ID, state, false);
+- run = vcpu_state(vm, VCPU_ID);
+- vcpu_run(vm, VCPU_ID);
+- TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+- "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
+- run->exit_reason,
+- exit_reason_str(run->exit_reason));
+ }
+
+ void test_nested_state_expect_errno(struct kvm_vm *vm,
+ struct kvm_nested_state *state,
+ int expected_errno)
+ {
+- volatile struct kvm_run *run;
+ int rv;
+
+ rv = vcpu_nested_state_set(vm, VCPU_ID, state, true);
+@@ -50,12 +43,6 @@ void test_nested_state_expect_errno(struct kvm_vm *vm,
+ "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
+ strerror(expected_errno), expected_errno, rv, strerror(errno),
+ errno);
+- run = vcpu_state(vm, VCPU_ID);
+- vcpu_run(vm, VCPU_ID);
+- TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+- "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
+- run->exit_reason,
+- exit_reason_str(run->exit_reason));
+ }
+
+ void test_nested_state_expect_einval(struct kvm_vm *vm,
+@@ -90,8 +77,9 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size)
+ {
+ memset(state, 0, size);
+ state->flags = KVM_STATE_NESTED_GUEST_MODE |
+- KVM_STATE_NESTED_RUN_PENDING |
+- KVM_STATE_NESTED_EVMCS;
++ KVM_STATE_NESTED_RUN_PENDING;
++ if (have_evmcs)
++ state->flags |= KVM_STATE_NESTED_EVMCS;
+ state->format = 0;
+ state->size = size;
+ state->hdr.vmx.vmxon_pa = 0x1000;
+@@ -141,13 +129,19 @@ void test_vmx_nested_state(struct kvm_vm *vm)
+ /*
+ * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
+ * setting the nested state but flags other than eVMCS must be clear.
++ * The eVMCS flag can be set if the enlightened VMCS capability has
++ * been enabled.
+ */
+ set_default_vmx_state(state, state_sz);
+ state->hdr.vmx.vmxon_pa = -1ull;
+ state->hdr.vmx.vmcs12_pa = -1ull;
+ test_nested_state_expect_einval(vm, state);
+
+- state->flags = KVM_STATE_NESTED_EVMCS;
++ state->flags &= KVM_STATE_NESTED_EVMCS;
++ if (have_evmcs) {
++ test_nested_state_expect_einval(vm, state);
++ vcpu_enable_evmcs(vm, VCPU_ID);
++ }
+ test_nested_state(vm, state);
+
+ /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
+@@ -232,6 +226,8 @@ int main(int argc, char *argv[])
+ struct kvm_nested_state state;
+ struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
+
++ have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
++
+ if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
+ printf("KVM_CAP_NESTED_STATE not available, skipping test\n");
+ exit(KSFT_SKIP);
+diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
+index a8a6a0c883f1..6af5c91337f2 100644
+--- a/virt/kvm/arm/mmio.c
++++ b/virt/kvm/arm/mmio.c
+@@ -86,6 +86,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ unsigned int len;
+ int mask;
+
++ /* Detect an already handled MMIO return */
++ if (unlikely(!vcpu->mmio_needed))
++ return 0;
++
++ vcpu->mmio_needed = 0;
++
+ if (!run->mmio.is_write) {
+ len = run->mmio.len;
+ if (len > sizeof(unsigned long))
+@@ -188,6 +194,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ run->mmio.is_write = is_write;
+ run->mmio.phys_addr = fault_ipa;
+ run->mmio.len = len;
++ vcpu->mmio_needed = 1;
+
+ if (!ret) {
+ /* We handled the access successfully in the kernel. */
+diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
+index bdbc297d06fb..e621b5d45b27 100644
+--- a/virt/kvm/arm/vgic/vgic-init.c
++++ b/virt/kvm/arm/vgic/vgic-init.c
+@@ -8,6 +8,7 @@
+ #include <linux/cpu.h>
+ #include <linux/kvm_host.h>
+ #include <kvm/arm_vgic.h>
++#include <asm/kvm_emulate.h>
+ #include <asm/kvm_mmu.h>
+ #include "vgic.h"
+
+@@ -164,12 +165,18 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
+ irq->vcpu = NULL;
+ irq->target_vcpu = vcpu0;
+ kref_init(&irq->refcount);
+- if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
++ switch (dist->vgic_model) {
++ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ irq->targets = 0;
+ irq->group = 0;
+- } else {
++ break;
++ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ irq->mpidr = 0;
+ irq->group = 1;
++ break;
++ default:
++ kfree(dist->spis);
++ return -EINVAL;
+ }
+ }
+ return 0;
+@@ -209,7 +216,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
+ irq->intid = i;
+ irq->vcpu = NULL;
+ irq->target_vcpu = vcpu;
+- irq->targets = 1U << vcpu->vcpu_id;
+ kref_init(&irq->refcount);
+ if (vgic_irq_is_sgi(i)) {
+ /* SGIs */
+@@ -219,11 +225,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
+ /* PPIs */
+ irq->config = VGIC_CONFIG_LEVEL;
+ }
+-
+- if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+- irq->group = 1;
+- else
+- irq->group = 0;
+ }
+
+ if (!irqchip_in_kernel(vcpu->kvm))
+@@ -286,10 +287,19 @@ int vgic_init(struct kvm *kvm)
+
+ for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
+ struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
+- if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
++ switch (dist->vgic_model) {
++ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ irq->group = 1;
+- else
++ irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
++ break;
++ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ irq->group = 0;
++ irq->targets = 1U << idx;
++ break;
++ default:
++ ret = -EINVAL;
++ goto out;
++ }
+ }
+ }
+
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-09-06 17:26 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-09-06 17:26 UTC (permalink / raw
To: gentoo-commits
commit: 50acba1d52c3a0eeae20f935118bf8e76cab062b
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Sep 6 17:26:15 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Sep 6 17:26:15 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=50acba1d
Linux patch 5.2.12 and 5.2.13
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
1011_linux-5.2.12.patch | 5532 +++++++++++++++++++++++++++++++++++++++++++++++
1012_linux-5.2.13.patch | 92 +
2 files changed, 5624 insertions(+)
diff --git a/1011_linux-5.2.12.patch b/1011_linux-5.2.12.patch
new file mode 100644
index 0000000..0df54ef
--- /dev/null
+++ b/1011_linux-5.2.12.patch
@@ -0,0 +1,5532 @@
+diff --git a/Makefile b/Makefile
+index a3b26dcfc5c8..e26d52d93bb1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index ae63eedea1c1..68faf535f40a 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -184,9 +184,17 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
+ };
+
+ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
+- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+- S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
++ /*
++ * We already refuse to boot CPUs that don't support our configured
++ * page size, so we can only detect mismatches for a page size other
++ * than the one we're currently using. Unfortunately, SoCs like this
++ * exist in the wild so, even though we don't like it, we'll have to go
++ * along with it and treat them as non-strict.
++ */
++ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
++ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
++
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
+ /* Linux shouldn't care about secure memory */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
+index 5bf05cc774e2..446d91d6cf70 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -696,8 +696,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ }
+ tce = be64_to_cpu(tce);
+
+- if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua))
+- return H_PARAMETER;
++ if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
++ ret = H_PARAMETER;
++ goto unlock_exit;
++ }
+
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+ ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
+diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
+index f50bbeedfc66..b4f20f13b860 100644
+--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
+@@ -556,8 +556,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
+
+ ua = 0;
+- if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
+- return H_PARAMETER;
++ if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
++ ret = H_PARAMETER;
++ goto unlock_exit;
++ }
+
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+ ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
+diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
+index 687dd19735a7..4d9bbe8438bf 100644
+--- a/arch/riscv/include/asm/tlbflush.h
++++ b/arch/riscv/include/asm/tlbflush.h
+@@ -53,10 +53,17 @@ static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
+ }
+
+ #define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
+-#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0)
++
+ #define flush_tlb_range(vma, start, end) \
+ remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
+-#define flush_tlb_mm(mm) \
++
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++ unsigned long addr)
++{
++ flush_tlb_range(vma, addr, addr + PAGE_SIZE);
++}
++
++#define flush_tlb_mm(mm) \
+ remote_sfence_vma(mm_cpumask(mm), 0, -1)
+
+ #endif /* CONFIG_SMP */
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 2f067b443326..97c3a1c9502e 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1152,6 +1152,10 @@ void clear_local_APIC(void)
+ apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
+ v = apic_read(APIC_LVT1);
+ apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
++ if (!x2apic_enabled()) {
++ v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
++ apic_write(APIC_LDR, v);
++ }
+ if (maxlvt >= 4) {
+ v = apic_read(APIC_LVTPC);
+ apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
+diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
+index afee386ff711..caedd8d60d36 100644
+--- a/arch/x86/kernel/apic/bigsmp_32.c
++++ b/arch/x86/kernel/apic/bigsmp_32.c
+@@ -38,32 +38,12 @@ static int bigsmp_early_logical_apicid(int cpu)
+ return early_per_cpu(x86_cpu_to_apicid, cpu);
+ }
+
+-static inline unsigned long calculate_ldr(int cpu)
+-{
+- unsigned long val, id;
+-
+- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+- id = per_cpu(x86_bios_cpu_apicid, cpu);
+- val |= SET_APIC_LOGICAL_ID(id);
+-
+- return val;
+-}
+-
+ /*
+- * Set up the logical destination ID.
+- *
+- * Intel recommends to set DFR, LDR and TPR before enabling
+- * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
+- * document number 292116). So here it goes...
++ * bigsmp enables physical destination mode
++ * and doesn't use LDR and DFR
+ */
+ static void bigsmp_init_apic_ldr(void)
+ {
+- unsigned long val;
+- int cpu = smp_processor_id();
+-
+- apic_write(APIC_DFR, APIC_DFR_FLAT);
+- val = calculate_ldr(cpu);
+- apic_write(APIC_LDR, val);
+ }
+
+ static void bigsmp_setup_apic_routing(void)
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+index e9d0bc3a5e88..00fccf952d9b 100644
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -644,11 +644,10 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
+ {
+ struct thread_struct *thread = &tsk->thread;
+ unsigned long val = 0;
+- int index = n;
+
+ if (n < HBP_NUM) {
++ int index = array_index_nospec(n, HBP_NUM);
+ struct perf_event *bp = thread->ptrace_bps[index];
+- index = array_index_nospec(index, HBP_NUM);
+
+ if (bp)
+ val = bp->hw.info.address;
+diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
+index 918b5092a85f..0b16309898c6 100644
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -508,9 +508,12 @@ struct uprobe_xol_ops {
+ void (*abort)(struct arch_uprobe *, struct pt_regs *);
+ };
+
+-static inline int sizeof_long(void)
++static inline int sizeof_long(struct pt_regs *regs)
+ {
+- return in_ia32_syscall() ? 4 : 8;
++ /*
++ * Check registers for mode as in_xxx_syscall() does not apply here.
++ */
++ return user_64bit_mode(regs) ? 8 : 4;
+ }
+
+ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
+@@ -521,9 +524,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
+
+ static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
+ {
+- unsigned long new_sp = regs->sp - sizeof_long();
++ unsigned long new_sp = regs->sp - sizeof_long(regs);
+
+- if (copy_to_user((void __user *)new_sp, &val, sizeof_long()))
++ if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
+ return -EFAULT;
+
+ regs->sp = new_sp;
+@@ -556,7 +559,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
+ long correction = utask->vaddr - utask->xol_vaddr;
+ regs->ip += correction;
+ } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
+- regs->sp += sizeof_long(); /* Pop incorrect return address */
++ regs->sp += sizeof_long(regs); /* Pop incorrect return address */
+ if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
+ return -ERESTART;
+ }
+@@ -675,7 +678,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
+ * "call" insn was executed out-of-line. Just restore ->sp and restart.
+ * We could also restore ->ip and try to call branch_emulate_op() again.
+ */
+- regs->sp += sizeof_long();
++ regs->sp += sizeof_long(regs);
+ return -ERESTART;
+ }
+
+@@ -1056,7 +1059,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+ unsigned long
+ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
+ {
+- int rasize = sizeof_long(), nleft;
++ int rasize = sizeof_long(regs), nleft;
+ unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
+
+ if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
+diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
+index a39e38f13029..742ecf5b6c00 100644
+--- a/arch/x86/kvm/hyperv.c
++++ b/arch/x86/kvm/hyperv.c
+@@ -1783,7 +1783,7 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
+ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries)
+ {
+- uint16_t evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
++ uint16_t evmcs_ver = 0;
+ struct kvm_cpuid_entry2 cpuid_entries[] = {
+ { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
+ { .function = HYPERV_CPUID_INTERFACE },
+@@ -1795,6 +1795,9 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
+ };
+ int i, nent = ARRAY_SIZE(cpuid_entries);
+
++ if (kvm_x86_ops->nested_get_evmcs_version)
++ evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
++
+ /* Skip NESTED_FEATURES if eVMCS is not supported */
+ if (!evmcs_ver)
+ --nent;
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 4dabc318adb8..8d22c79f5333 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -212,6 +212,9 @@ static void recalculate_apic_map(struct kvm *kvm)
+ if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
+ new->phys_map[xapic_id] = apic;
+
++ if (!kvm_apic_sw_enabled(apic))
++ continue;
++
+ ldr = kvm_lapic_get_reg(apic, APIC_LDR);
+
+ if (apic_x2apic_mode(apic)) {
+@@ -254,6 +257,8 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
+ static_key_slow_dec_deferred(&apic_sw_disabled);
+ else
+ static_key_slow_inc(&apic_sw_disabled.key);
++
++ recalculate_apic_map(apic->vcpu->kvm);
+ }
+ }
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 14384a1ec53f..2c7daa3b968d 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -7107,12 +7107,6 @@ failed:
+ return ret;
+ }
+
+-static uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
+-{
+- /* Not supported */
+- return 0;
+-}
+-
+ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
+ uint16_t *vmcs_version)
+ {
+@@ -7283,7 +7277,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ .mem_enc_unreg_region = svm_unregister_enc_region,
+
+ .nested_enable_evmcs = nested_enable_evmcs,
+- .nested_get_evmcs_version = nested_get_evmcs_version,
++ .nested_get_evmcs_version = NULL,
+
+ .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
+ };
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 4b830c0adcf8..d5c12d5a5905 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7733,6 +7733,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
+ .set_nested_state = NULL,
+ .get_vmcs12_pages = NULL,
+ .nested_enable_evmcs = NULL,
++ .nested_get_evmcs_version = NULL,
+ .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
+ };
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index cbced8ff29d4..1f80fd560ede 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6547,12 +6547,13 @@ restart:
+ unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
+ toggle_interruptibility(vcpu, ctxt->interruptibility);
+ vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
+- kvm_rip_write(vcpu, ctxt->eip);
+- if (r == EMULATE_DONE && ctxt->tf)
+- kvm_vcpu_do_singlestep(vcpu, &r);
+ if (!ctxt->have_exception ||
+- exception_type(ctxt->exception.vector) == EXCPT_TRAP)
++ exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
++ kvm_rip_write(vcpu, ctxt->eip);
++ if (r == EMULATE_DONE && ctxt->tf)
++ kvm_vcpu_do_singlestep(vcpu, &r);
+ __kvm_set_rflags(vcpu, ctxt->eflags);
++ }
+
+ /*
+ * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index 6a9a77a403c9..e14e95ea7338 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -516,7 +516,7 @@ static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
+ */
+ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
+ unsigned long pfn, unsigned long npg,
+- int warnlvl)
++ unsigned long lpsize, int warnlvl)
+ {
+ pgprotval_t forbidden, res;
+ unsigned long end;
+@@ -535,9 +535,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
+ check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
+ forbidden = res;
+
+- res = protect_kernel_text_ro(start, end);
+- check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
+- forbidden |= res;
++ /*
++ * Special case to preserve a large page. If the change spawns the
++ * full large page mapping then there is no point to split it
++ * up. Happens with ftrace and is going to be removed once ftrace
++ * switched to text_poke().
++ */
++ if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
++ res = protect_kernel_text_ro(start, end);
++ check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
++ forbidden |= res;
++ }
+
+ /* Check the PFN directly */
+ res = protect_pci_bios(pfn, pfn + npg - 1);
+@@ -819,7 +827,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
+ * extra conditional required here.
+ */
+ chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
+- CPA_CONFLICT);
++ psize, CPA_CONFLICT);
+
+ if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
+ /*
+@@ -855,7 +863,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
+ * protection requirement in the large page.
+ */
+ new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
+- CPA_DETECT);
++ psize, CPA_DETECT);
+
+ /*
+ * If there is a conflict, split the large page.
+@@ -906,7 +914,8 @@ static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
+ if (!cpa->force_static_prot)
+ goto set;
+
+- prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT);
++ /* Hand in lpsize = 0 to enforce the protection mechanism */
++ prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
+
+ if (pgprot_val(prot) == pgprot_val(ref_prot))
+ goto set;
+@@ -1503,7 +1512,8 @@ repeat:
+ pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
+
+ cpa_inc_4k_install();
+- new_prot = static_protections(new_prot, address, pfn, 1,
++ /* Hand in lpsize = 0 to enforce the protection mechanism */
++ new_prot = static_protections(new_prot, address, pfn, 1, 0,
+ CPA_PROTECT);
+
+ new_prot = pgprot_clear_protnone_bits(new_prot);
+diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
+index e06de63497cf..e6bd727da503 100644
+--- a/drivers/auxdisplay/panel.c
++++ b/drivers/auxdisplay/panel.c
+@@ -1617,6 +1617,8 @@ static void panel_attach(struct parport *port)
+ return;
+
+ err_lcd_unreg:
++ if (scan_timer.function)
++ del_timer_sync(&scan_timer);
+ if (lcd.enabled)
+ charlcd_unregister(lcd.charlcd);
+ err_unreg_device:
+diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
+index 3ac6a5d18071..b90dbcd99c03 100644
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -965,6 +965,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
+ }
+ }
+
++ err = -ENOMEM;
+ for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+@@ -987,7 +988,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
+ err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
+- return err;
++ goto fail;
+ }
+
+ return 0;
+@@ -1007,8 +1008,7 @@ fail:
+ }
+ kfree(req);
+ }
+- return -ENOMEM;
+-
++ return err;
+ }
+
+ static int connect_ring(struct backend_info *be)
+diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
+index 19d7b6ff2f17..20c957185af2 100644
+--- a/drivers/bus/hisi_lpc.c
++++ b/drivers/bus/hisi_lpc.c
+@@ -456,6 +456,17 @@ struct hisi_lpc_acpi_cell {
+ size_t pdata_size;
+ };
+
++static void hisi_lpc_acpi_remove(struct device *hostdev)
++{
++ struct acpi_device *adev = ACPI_COMPANION(hostdev);
++ struct acpi_device *child;
++
++ device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
++
++ list_for_each_entry(child, &adev->children, node)
++ acpi_device_clear_enumerated(child);
++}
++
+ /*
+ * hisi_lpc_acpi_probe - probe children for ACPI FW
+ * @hostdev: LPC host device pointer
+@@ -555,8 +566,7 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
+ return 0;
+
+ fail:
+- device_for_each_child(hostdev, NULL,
+- hisi_lpc_acpi_remove_subdev);
++ hisi_lpc_acpi_remove(hostdev);
+ return ret;
+ }
+
+@@ -569,6 +579,10 @@ static int hisi_lpc_acpi_probe(struct device *dev)
+ {
+ return -ENODEV;
+ }
++
++static void hisi_lpc_acpi_remove(struct device *hostdev)
++{
++}
+ #endif // CONFIG_ACPI
+
+ /*
+@@ -606,24 +620,27 @@ static int hisi_lpc_probe(struct platform_device *pdev)
+ range->fwnode = dev->fwnode;
+ range->flags = LOGIC_PIO_INDIRECT;
+ range->size = PIO_INDIRECT_SIZE;
++ range->hostdata = lpcdev;
++ range->ops = &hisi_lpc_ops;
++ lpcdev->io_host = range;
+
+ ret = logic_pio_register_range(range);
+ if (ret) {
+ dev_err(dev, "register IO range failed (%d)!\n", ret);
+ return ret;
+ }
+- lpcdev->io_host = range;
+
+ /* register the LPC host PIO resources */
+ if (acpi_device)
+ ret = hisi_lpc_acpi_probe(dev);
+ else
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+- if (ret)
++ if (ret) {
++ logic_pio_unregister_range(range);
+ return ret;
++ }
+
+- lpcdev->io_host->hostdata = lpcdev;
+- lpcdev->io_host->ops = &hisi_lpc_ops;
++ dev_set_drvdata(dev, lpcdev);
+
+ io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
+ dev_info(dev, "registered range [%pa - %pa]\n",
+@@ -632,6 +649,23 @@ static int hisi_lpc_probe(struct platform_device *pdev)
+ return ret;
+ }
+
++static int hisi_lpc_remove(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct acpi_device *acpi_device = ACPI_COMPANION(dev);
++ struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
++ struct logic_pio_hwaddr *range = lpcdev->io_host;
++
++ if (acpi_device)
++ hisi_lpc_acpi_remove(dev);
++ else
++ of_platform_depopulate(dev);
++
++ logic_pio_unregister_range(range);
++
++ return 0;
++}
++
+ static const struct of_device_id hisi_lpc_of_match[] = {
+ { .compatible = "hisilicon,hip06-lpc", },
+ { .compatible = "hisilicon,hip07-lpc", },
+@@ -645,5 +679,6 @@ static struct platform_driver hisi_lpc_driver = {
+ .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
+ },
+ .probe = hisi_lpc_probe,
++ .remove = hisi_lpc_remove,
+ };
+ builtin_platform_driver(hisi_lpc_driver);
+diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
+index f79eede71c62..edefa669153f 100644
+--- a/drivers/crypto/ccp/ccp-dev.c
++++ b/drivers/crypto/ccp/ccp-dev.c
+@@ -540,6 +540,10 @@ int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
+ unsigned long flags;
+ unsigned int i;
+
++ /* If there's no device there's nothing to do */
++ if (!ccp)
++ return 0;
++
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ ccp->suspending = 1;
+@@ -564,6 +568,10 @@ int ccp_dev_resume(struct sp_device *sp)
+ unsigned long flags;
+ unsigned int i;
+
++ /* If there's no device there's nothing to do */
++ if (!ccp)
++ return 0;
++
+ spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+ ccp->suspending = 0;
+diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
+index 89d710899010..de8bfd9a76e9 100644
+--- a/drivers/dma/ste_dma40.c
++++ b/drivers/dma/ste_dma40.c
+@@ -142,7 +142,7 @@ enum d40_events {
+ * when the DMA hw is powered off.
+ * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
+ */
+-static u32 d40_backup_regs[] = {
++static __maybe_unused u32 d40_backup_regs[] = {
+ D40_DREG_LCPA,
+ D40_DREG_LCLA,
+ D40_DREG_PRMSE,
+@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
+
+ #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
+
+-static u32 d40_backup_regs_chan[] = {
++static __maybe_unused u32 d40_backup_regs_chan[] = {
+ D40_CHAN_REG_SSCFG,
+ D40_CHAN_REG_SSELT,
+ D40_CHAN_REG_SSPTR,
+diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
+index d6e919d3936a..1311de74bfdd 100644
+--- a/drivers/dma/stm32-mdma.c
++++ b/drivers/dma/stm32-mdma.c
+@@ -1366,7 +1366,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
+
+ chan = &dmadev->chan[id];
+ if (!chan) {
+- dev_err(chan2dev(chan), "MDMA channel not initialized\n");
++ dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
+ goto exit;
+ }
+
+diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
+index ba2489d4ea24..ba27802efcd0 100644
+--- a/drivers/dma/ti/omap-dma.c
++++ b/drivers/dma/ti/omap-dma.c
+@@ -1234,7 +1234,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
+ if (src_icg) {
+ d->ccr |= CCR_SRC_AMODE_DBLIDX;
+ d->ei = 1;
+- d->fi = src_icg;
++ d->fi = src_icg + 1;
+ } else if (xt->src_inc) {
+ d->ccr |= CCR_SRC_AMODE_POSTINC;
+ d->fi = 0;
+@@ -1249,7 +1249,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
+ if (dst_icg) {
+ d->ccr |= CCR_DST_AMODE_DBLIDX;
+ sg->ei = 1;
+- sg->fi = dst_icg;
++ sg->fi = dst_icg + 1;
+ } else if (xt->dst_inc) {
+ d->ccr |= CCR_DST_AMODE_POSTINC;
+ sg->fi = 0;
+diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
+index 343153d47e5b..004dc03ccf09 100644
+--- a/drivers/fsi/fsi-scom.c
++++ b/drivers/fsi/fsi-scom.c
+@@ -38,8 +38,7 @@
+ #define SCOM_STATUS_PIB_RESP_MASK 0x00007000
+ #define SCOM_STATUS_PIB_RESP_SHIFT 12
+
+-#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_ERR_SUMMARY | \
+- SCOM_STATUS_PROTECTION | \
++#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_PROTECTION | \
+ SCOM_STATUS_PARITY | \
+ SCOM_STATUS_PIB_ABORT | \
+ SCOM_STATUS_PIB_RESP_MASK)
+@@ -251,11 +250,6 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
+ /* Return -EBUSY on PIB abort to force a retry */
+ if (status & SCOM_STATUS_PIB_ABORT)
+ return -EBUSY;
+- if (status & SCOM_STATUS_ERR_SUMMARY) {
+- fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
+- sizeof(uint32_t));
+- return -EIO;
+- }
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+index 9b384a94d2f3..3e35a8f2c5e5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+@@ -574,6 +574,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
+ { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
++ { 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0, 0, 0, 0, 0 },
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 0332177c0302..2a3090c45e6b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -588,14 +588,14 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
+ case CHIP_VEGA20:
+ break;
+ case CHIP_RAVEN:
+- if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+- break;
+- if ((adev->gfx.rlc_fw_version != 106 &&
+- adev->gfx.rlc_fw_version < 531) ||
+- (adev->gfx.rlc_fw_version == 53815) ||
+- (adev->gfx.rlc_feature_version < 1) ||
+- !adev->gfx.rlc.is_rlc_v2_1)
++ if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
++ &&((adev->gfx.rlc_fw_version != 106 &&
++ adev->gfx.rlc_fw_version < 531) ||
++ (adev->gfx.rlc_fw_version == 53815) ||
++ (adev->gfx.rlc_feature_version < 1) ||
++ !adev->gfx.rlc.is_rlc_v2_1))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
++
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
+ adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_CP |
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index 2854399856ba..4aebe21e6ad9 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -131,8 +131,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
+
+
+ /* Enable extended register access */
+- ast_enable_mmio(dev);
+ ast_open_key(ast);
++ ast_enable_mmio(dev);
+
+ /* Find out whether P2A works or whether to use device-tree */
+ ast_detect_config_mode(dev, &scu_rev);
+@@ -576,6 +576,9 @@ void ast_driver_unload(struct drm_device *dev)
+ {
+ struct ast_private *ast = dev->dev_private;
+
++ /* enable standard VGA decode */
++ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
++
+ ast_release_firmware(dev);
+ kfree(ast->dp501_fw_addr);
+ ast_mode_fini(dev);
+diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
+index 97fed0627d1c..74da15a3341a 100644
+--- a/drivers/gpu/drm/ast/ast_mode.c
++++ b/drivers/gpu/drm/ast/ast_mode.c
+@@ -601,7 +601,7 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
+ return -EINVAL;
+ ast_open_key(ast);
+
+- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
++ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
+
+ ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
+diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
+index f7d421359d56..c1d1ac51d1c2 100644
+--- a/drivers/gpu/drm/ast/ast_post.c
++++ b/drivers/gpu/drm/ast/ast_post.c
+@@ -46,7 +46,7 @@ void ast_enable_mmio(struct drm_device *dev)
+ {
+ struct ast_private *ast = dev->dev_private;
+
+- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
++ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
+ }
+
+
+diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
+index 3a8af9978ebd..791f164bdadc 100644
+--- a/drivers/gpu/drm/bridge/ti-tfp410.c
++++ b/drivers/gpu/drm/bridge/ti-tfp410.c
+@@ -66,7 +66,12 @@ static int tfp410_get_modes(struct drm_connector *connector)
+
+ drm_connector_update_edid_property(connector, edid);
+
+- return drm_add_edid_modes(connector, edid);
++ ret = drm_add_edid_modes(connector, edid);
++
++ kfree(edid);
++
++ return ret;
++
+ fallback:
+ /* No EDID, fallback on the XGA standard modes */
+ ret = drm_add_modes_noedid(connector, 1920, 1200);
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 1ad88e6d7c04..d485d49c473b 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -1569,6 +1569,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
+
+ pci_set_master(pdev);
+
++ /*
++ * We don't have a max segment size, so set it to the max so sg's
++ * debugging layer doesn't complain
++ */
++ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
++
+ /* overlay on gen2 is broken and can't address above 1G */
+ if (IS_GEN(dev_priv, 2)) {
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
+diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
+index 94d3992b599d..724627afdedc 100644
+--- a/drivers/gpu/drm/i915/i915_vgpu.c
++++ b/drivers/gpu/drm/i915/i915_vgpu.c
+@@ -101,6 +101,9 @@ static struct _balloon_info_ bl_info;
+ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
+ struct drm_mm_node *node)
+ {
++ if (!drm_mm_node_allocated(node))
++ return;
++
+ DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
+ node->start,
+ node->start + node->size,
+diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
+index 8839eaea8371..d89120dcac67 100644
+--- a/drivers/gpu/drm/i915/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
+@@ -535,7 +535,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
+
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+- drm_connector_attach_max_bpc_property(connector, 6, 12);
++
++ /*
++ * Reuse the prop from the SST connector because we're
++ * not allowed to create new props after device registration.
++ */
++ connector->max_bpc_property =
++ intel_dp->attached_connector->base.max_bpc_property;
++ if (connector->max_bpc_property)
++ drm_connector_attach_max_bpc_property(connector, 6, 12);
+
+ return connector;
+
+diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
+index 3f9921ba4a76..eb978e7238c2 100644
+--- a/drivers/gpu/drm/i915/intel_vdsc.c
++++ b/drivers/gpu/drm/i915/intel_vdsc.c
+@@ -539,7 +539,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
+ pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
+ DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
+ DRM_INFO("PPS2 = 0x%08x\n", pps_val);
+- if (encoder->type == INTEL_OUTPUT_EDP) {
++ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
+index 35ddbec1375a..671c90f34ede 100644
+--- a/drivers/gpu/drm/scheduler/sched_entity.c
++++ b/drivers/gpu/drm/scheduler/sched_entity.c
+@@ -95,7 +95,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
+ rmb(); /* for list_empty to work without lock */
+
+ if (list_empty(&entity->list) ||
+- spsc_queue_peek(&entity->job_queue) == NULL)
++ spsc_queue_count(&entity->job_queue) == 0)
+ return true;
+
+ return false;
+@@ -281,7 +281,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
+ /* Consumption of existing IBs wasn't completed. Forcefully
+ * remove them here.
+ */
+- if (spsc_queue_peek(&entity->job_queue)) {
++ if (spsc_queue_count(&entity->job_queue)) {
+ if (sched) {
+ /* Park the kernel for a moment to make sure it isn't processing
+ * our enity.
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 34e2b3f9d540..4effce12607b 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -3769,8 +3769,6 @@ static const struct hid_device_id hidpp_devices[] = {
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC332) },
+ { /* Logitech G502 Hero Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08B) },
+- { /* Logitech G700 Gaming Mouse over USB */
+- HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) },
+ { /* Logitech G700s Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07C) },
+ { /* Logitech G703 Gaming Mouse over USB */
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index c0378c3de9a4..91dfeba62485 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -164,6 +164,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa1a6),
+ .driver_data = (kernel_ulong_t)0,
+ },
++ {
++ /* Lewisburg PCH */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa226),
++ .driver_data = (kernel_ulong_t)0,
++ },
+ {
+ /* Gemini Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
+@@ -199,6 +204,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
++ {
++ /* Tiger Lake PCH */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
++ .driver_data = (kernel_ulong_t)&intel_th_2x,
++ },
+ { 0 },
+ };
+
+diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
+index e55b902560de..181e7ff1ec4f 100644
+--- a/drivers/hwtracing/stm/core.c
++++ b/drivers/hwtracing/stm/core.c
+@@ -1276,7 +1276,6 @@ int stm_source_register_device(struct device *parent,
+
+ err:
+ put_device(&src->dev);
+- kfree(src);
+
+ return err;
+ }
+diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
+index 35b302d983e0..959d4912ec0d 100644
+--- a/drivers/i2c/busses/i2c-emev2.c
++++ b/drivers/i2c/busses/i2c-emev2.c
+@@ -69,6 +69,7 @@ struct em_i2c_device {
+ struct completion msg_done;
+ struct clk *sclk;
+ struct i2c_client *slave;
++ int irq;
+ };
+
+ static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
+@@ -339,6 +340,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave)
+
+ writeb(0, priv->base + I2C_OFS_SVA0);
+
++ /*
++ * Wait for interrupt to finish. New slave irqs cannot happen because we
++ * cleared the slave address and, thus, only extension codes will be
++ * detected which do not use the slave ptr.
++ */
++ synchronize_irq(priv->irq);
+ priv->slave = NULL;
+
+ return 0;
+@@ -355,7 +362,7 @@ static int em_i2c_probe(struct platform_device *pdev)
+ {
+ struct em_i2c_device *priv;
+ struct resource *r;
+- int irq, ret;
++ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+@@ -390,8 +397,8 @@ static int em_i2c_probe(struct platform_device *pdev)
+
+ em_i2c_reset(&priv->adap);
+
+- irq = platform_get_irq(pdev, 0);
+- ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0,
++ priv->irq = platform_get_irq(pdev, 0);
++ ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
+ "em_i2c", priv);
+ if (ret)
+ goto err_clk;
+@@ -401,7 +408,8 @@ static int em_i2c_probe(struct platform_device *pdev)
+ if (ret)
+ goto err_clk;
+
+- dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq);
++ dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
++ priv->irq);
+
+ return 0;
+
+diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
+index c46c4bddc7ca..cba325eb852f 100644
+--- a/drivers/i2c/busses/i2c-piix4.c
++++ b/drivers/i2c/busses/i2c-piix4.c
+@@ -91,7 +91,7 @@
+ #define SB800_PIIX4_PORT_IDX_MASK 0x06
+ #define SB800_PIIX4_PORT_IDX_SHIFT 1
+
+-/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
++/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+ #define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
+ #define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
+ #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
+@@ -358,18 +358,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
+ /* Find which register is used for port selection */
+ if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD ||
+ PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) {
+- switch (PIIX4_dev->device) {
+- case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
++ if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
++ (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
++ PIIX4_dev->revision >= 0x1F)) {
+ piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
+ piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
+ piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
+- break;
+- case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
+- default:
++ } else {
+ piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+ piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+ piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
+- break;
+ }
+ } else {
+ if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2,
+diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
+index d39a4606f72d..531c01100b56 100644
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -139,6 +139,7 @@ struct rcar_i2c_priv {
+ enum dma_data_direction dma_direction;
+
+ struct reset_control *rstc;
++ int irq;
+ };
+
+ #define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
+@@ -861,9 +862,11 @@ static int rcar_unreg_slave(struct i2c_client *slave)
+
+ WARN_ON(!priv->slave);
+
++ /* disable irqs and ensure none is running before clearing ptr */
+ rcar_i2c_write(priv, ICSIER, 0);
+ rcar_i2c_write(priv, ICSCR, 0);
+
++ synchronize_irq(priv->irq);
+ priv->slave = NULL;
+
+ pm_runtime_put(rcar_i2c_priv_to_dev(priv));
+@@ -918,7 +921,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ struct i2c_adapter *adap;
+ struct device *dev = &pdev->dev;
+ struct i2c_timings i2c_t;
+- int irq, ret;
++ int ret;
+
+ /* Otherwise logic will break because some bytes must always use PIO */
+ BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length");
+@@ -984,10 +987,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ pm_runtime_put(dev);
+
+
+- irq = platform_get_irq(pdev, 0);
+- ret = devm_request_irq(dev, irq, rcar_i2c_irq, 0, dev_name(dev), priv);
++ priv->irq = platform_get_irq(pdev, 0);
++ ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
+ if (ret < 0) {
+- dev_err(dev, "cannot get irq %d\n", irq);
++ dev_err(dev, "cannot get irq %d\n", priv->irq);
+ goto out_pm_disable;
+ }
+
+diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
+index e4b13a32692a..5e5f7dd82c50 100644
+--- a/drivers/infiniband/core/umem_odp.c
++++ b/drivers/infiniband/core/umem_odp.c
+@@ -114,10 +114,6 @@ static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
+ * prevent any further fault handling on this MR.
+ */
+ ib_umem_notifier_start_account(umem_odp);
+- umem_odp->dying = 1;
+- /* Make sure that the fact the umem is dying is out before we release
+- * all pending page faults. */
+- smp_wmb();
+ complete_all(&umem_odp->notifier_completion);
+ umem->context->invalidate_range(umem_odp, ib_umem_start(umem),
+ ib_umem_end(umem));
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index f6e5351ba4d5..fda3dfd6f87b 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -581,7 +581,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
+ u32 flags)
+ {
+ int npages = 0, current_seq, page_shift, ret, np;
+- bool implicit = false;
+ struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
+ bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
+ bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
+@@ -596,7 +595,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
+ if (IS_ERR(odp))
+ return PTR_ERR(odp);
+ mr = odp->private;
+- implicit = true;
+ } else {
+ odp = odp_mr;
+ }
+@@ -684,19 +682,15 @@ next_mr:
+
+ out:
+ if (ret == -EAGAIN) {
+- if (implicit || !odp->dying) {
+- unsigned long timeout =
+- msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
+-
+- if (!wait_for_completion_timeout(
+- &odp->notifier_completion,
+- timeout)) {
+- mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
+- current_seq, odp->notifiers_seq, odp->notifiers_count);
+- }
+- } else {
+- /* The MR is being killed, kill the QP as well. */
+- ret = -EFAULT;
++ unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
++
++ if (!wait_for_completion_timeout(&odp->notifier_completion,
++ timeout)) {
++ mlx5_ib_warn(
++ dev,
++ "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
++ current_seq, odp->notifiers_seq,
++ odp->notifiers_count);
+ }
+ }
+
+diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
+index 379318266468..8c02d2283d64 100644
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -710,7 +710,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
+ * - and wouldn't make the resulting output segment too long
+ */
+ if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
+- (cur_len + s_length <= max_len)) {
++ (max_len - cur_len >= s_length)) {
+ /* ...then concatenate it with the previous one */
+ cur_len += s_length;
+ } else {
+diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
+index 29e3f5da59c1..11ec048929e8 100644
+--- a/drivers/media/platform/omap/omap_vout_vrfb.c
++++ b/drivers/media/platform/omap/omap_vout_vrfb.c
+@@ -253,8 +253,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
+ */
+
+ pixsize = vout->bpp * vout->vrfb_bpp;
+- dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) -
+- (vout->pix.width * vout->bpp)) + 1;
++ dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp;
+
+ xt->src_start = vout->buf_phy_addr[vb->i];
+ xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
+diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
+index 02d116b01a1a..ac6b252a1ddc 100644
+--- a/drivers/misc/habanalabs/goya/goya.c
++++ b/drivers/misc/habanalabs/goya/goya.c
+@@ -2716,9 +2716,10 @@ void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
+ GOYA_ASYNC_EVENT_ID_PI_UPDATE);
+ }
+
+-void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
++void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
+ {
+- /* Not needed in Goya */
++ /* The QMANs are on the SRAM so need to copy to IO space */
++ memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
+ }
+
+ static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
+@@ -3310,9 +3311,11 @@ static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
+ int rc;
+
+ dev_dbg(hdev->dev, "DMA packet details:\n");
+- dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
+- dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
+- dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
++ dev_dbg(hdev->dev, "source == 0x%llx\n",
++ le64_to_cpu(user_dma_pkt->src_addr));
++ dev_dbg(hdev->dev, "destination == 0x%llx\n",
++ le64_to_cpu(user_dma_pkt->dst_addr));
++ dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
+
+ ctl = le32_to_cpu(user_dma_pkt->ctl);
+ user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
+@@ -3341,9 +3344,11 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
+ struct packet_lin_dma *user_dma_pkt)
+ {
+ dev_dbg(hdev->dev, "DMA packet details:\n");
+- dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
+- dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
+- dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
++ dev_dbg(hdev->dev, "source == 0x%llx\n",
++ le64_to_cpu(user_dma_pkt->src_addr));
++ dev_dbg(hdev->dev, "destination == 0x%llx\n",
++ le64_to_cpu(user_dma_pkt->dst_addr));
++ dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
+
+ /*
+ * WA for HW-23.
+@@ -3383,7 +3388,8 @@ static int goya_validate_wreg32(struct hl_device *hdev,
+
+ dev_dbg(hdev->dev, "WREG32 packet details:\n");
+ dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
+- dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value);
++ dev_dbg(hdev->dev, "value == 0x%x\n",
++ le32_to_cpu(wreg_pkt->value));
+
+ if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
+ dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
+@@ -3425,12 +3431,13 @@ static int goya_validate_cb(struct hl_device *hdev,
+ while (cb_parsed_length < parser->user_cb_size) {
+ enum packet_id pkt_id;
+ u16 pkt_size;
+- void *user_pkt;
++ struct goya_packet *user_pkt;
+
+- user_pkt = (void *) (uintptr_t)
++ user_pkt = (struct goya_packet *) (uintptr_t)
+ (parser->user_cb->kernel_address + cb_parsed_length);
+
+- pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
++ pkt_id = (enum packet_id) (
++ (le64_to_cpu(user_pkt->header) &
+ PACKET_HEADER_PACKET_ID_MASK) >>
+ PACKET_HEADER_PACKET_ID_SHIFT);
+
+@@ -3450,7 +3457,8 @@ static int goya_validate_cb(struct hl_device *hdev,
+ * need to validate here as well because patch_cb() is
+ * not called in MMU path while this function is called
+ */
+- rc = goya_validate_wreg32(hdev, parser, user_pkt);
++ rc = goya_validate_wreg32(hdev,
++ parser, (struct packet_wreg32 *) user_pkt);
+ break;
+
+ case PACKET_WREG_BULK:
+@@ -3478,10 +3486,10 @@ static int goya_validate_cb(struct hl_device *hdev,
+ case PACKET_LIN_DMA:
+ if (is_mmu)
+ rc = goya_validate_dma_pkt_mmu(hdev, parser,
+- user_pkt);
++ (struct packet_lin_dma *) user_pkt);
+ else
+ rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
+- user_pkt);
++ (struct packet_lin_dma *) user_pkt);
+ break;
+
+ case PACKET_MSG_LONG:
+@@ -3654,15 +3662,16 @@ static int goya_patch_cb(struct hl_device *hdev,
+ enum packet_id pkt_id;
+ u16 pkt_size;
+ u32 new_pkt_size = 0;
+- void *user_pkt, *kernel_pkt;
++ struct goya_packet *user_pkt, *kernel_pkt;
+
+- user_pkt = (void *) (uintptr_t)
++ user_pkt = (struct goya_packet *) (uintptr_t)
+ (parser->user_cb->kernel_address + cb_parsed_length);
+- kernel_pkt = (void *) (uintptr_t)
++ kernel_pkt = (struct goya_packet *) (uintptr_t)
+ (parser->patched_cb->kernel_address +
+ cb_patched_cur_length);
+
+- pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
++ pkt_id = (enum packet_id) (
++ (le64_to_cpu(user_pkt->header) &
+ PACKET_HEADER_PACKET_ID_MASK) >>
+ PACKET_HEADER_PACKET_ID_SHIFT);
+
+@@ -3677,15 +3686,18 @@ static int goya_patch_cb(struct hl_device *hdev,
+
+ switch (pkt_id) {
+ case PACKET_LIN_DMA:
+- rc = goya_patch_dma_packet(hdev, parser, user_pkt,
+- kernel_pkt, &new_pkt_size);
++ rc = goya_patch_dma_packet(hdev, parser,
++ (struct packet_lin_dma *) user_pkt,
++ (struct packet_lin_dma *) kernel_pkt,
++ &new_pkt_size);
+ cb_patched_cur_length += new_pkt_size;
+ break;
+
+ case PACKET_WREG_32:
+ memcpy(kernel_pkt, user_pkt, pkt_size);
+ cb_patched_cur_length += pkt_size;
+- rc = goya_validate_wreg32(hdev, parser, kernel_pkt);
++ rc = goya_validate_wreg32(hdev, parser,
++ (struct packet_wreg32 *) kernel_pkt);
+ break;
+
+ case PACKET_WREG_BULK:
+@@ -4245,6 +4257,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
+ size_t total_pkt_size;
+ long result;
+ int rc;
++ int irq_num_entries, irq_arr_index;
++ __le32 *goya_irq_arr;
+
+ total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
+ irq_arr_size;
+@@ -4262,8 +4276,16 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
+ if (!pkt)
+ return -ENOMEM;
+
+- pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
+- memcpy(&pkt->irqs, irq_arr, irq_arr_size);
++ irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
++ pkt->length = cpu_to_le32(irq_num_entries);
++
++ /* We must perform any necessary endianness conversation on the irq
++ * array being passed to the goya hardware
++ */
++ for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
++ irq_arr_index < irq_num_entries ; irq_arr_index++)
++ goya_irq_arr[irq_arr_index] =
++ cpu_to_le32(irq_arr[irq_arr_index]);
+
+ pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
+ ARMCP_PKT_CTL_OPCODE_SHIFT);
+@@ -4778,7 +4800,7 @@ static const struct hl_asic_funcs goya_funcs = {
+ .resume = goya_resume,
+ .cb_mmap = goya_cb_mmap,
+ .ring_doorbell = goya_ring_doorbell,
+- .flush_pq_write = goya_flush_pq_write,
++ .pqe_write = goya_pqe_write,
+ .asic_dma_alloc_coherent = goya_dma_alloc_coherent,
+ .asic_dma_free_coherent = goya_dma_free_coherent,
+ .get_int_queue_base = goya_get_int_queue_base,
+diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
+index c83cab0d641e..e2040fd331ca 100644
+--- a/drivers/misc/habanalabs/goya/goyaP.h
++++ b/drivers/misc/habanalabs/goya/goyaP.h
+@@ -170,7 +170,7 @@ int goya_late_init(struct hl_device *hdev);
+ void goya_late_fini(struct hl_device *hdev);
+
+ void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
+-void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val);
++void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd);
+ void goya_update_eq_ci(struct hl_device *hdev, u32 val);
+ void goya_restore_phase_topology(struct hl_device *hdev);
+ int goya_context_switch(struct hl_device *hdev, u32 asid);
+diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
+index adef7d9d7488..d56ab65d5b2a 100644
+--- a/drivers/misc/habanalabs/habanalabs.h
++++ b/drivers/misc/habanalabs/habanalabs.h
+@@ -449,7 +449,11 @@ enum hl_pll_frequency {
+ * @resume: handles IP specific H/W or SW changes for resume.
+ * @cb_mmap: maps a CB.
+ * @ring_doorbell: increment PI on a given QMAN.
+- * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed.
++ * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
++ * function because the PQs are located in different memory areas
++ * per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
++ * writing the PQE must match the destination memory area
++ * properties.
+ * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
+ * dma_alloc_coherent(). This is ASIC function because
+ * its implementation is not trivial when the driver
+@@ -518,7 +522,8 @@ struct hl_asic_funcs {
+ int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
+ u64 kaddress, phys_addr_t paddress, u32 size);
+ void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
+- void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val);
++ void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
++ struct hl_bd *bd);
+ void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+ void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
+diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
+index 2894d8975933..bb7679474727 100644
+--- a/drivers/misc/habanalabs/hw_queue.c
++++ b/drivers/misc/habanalabs/hw_queue.c
+@@ -290,23 +290,19 @@ static void int_hw_queue_schedule_job(struct hl_cs_job *job)
+ struct hl_device *hdev = job->cs->ctx->hdev;
+ struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
+ struct hl_bd bd;
+- u64 *pi, *pbd = (u64 *) &bd;
++ __le64 *pi;
+
+ bd.ctl = 0;
+- bd.len = __cpu_to_le32(job->job_cb_size);
+- bd.ptr = __cpu_to_le64((u64) (uintptr_t) job->user_cb);
++ bd.len = cpu_to_le32(job->job_cb_size);
++ bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
+
+- pi = (u64 *) (uintptr_t) (q->kernel_address +
++ pi = (__le64 *) (uintptr_t) (q->kernel_address +
+ ((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
+
+- pi[0] = pbd[0];
+- pi[1] = pbd[1];
+-
+ q->pi++;
+ q->pi &= ((q->int_queue_len << 1) - 1);
+
+- /* Flush PQ entry write. Relevant only for specific ASICs */
+- hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]);
++ hdev->asic_funcs->pqe_write(hdev, pi, &bd);
+
+ hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
+ }
+diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h
+index a14407b975e4..ef54bad20509 100644
+--- a/drivers/misc/habanalabs/include/goya/goya_packets.h
++++ b/drivers/misc/habanalabs/include/goya/goya_packets.h
+@@ -52,6 +52,19 @@ enum goya_dma_direction {
+ #define GOYA_PKT_CTL_MB_SHIFT 31
+ #define GOYA_PKT_CTL_MB_MASK 0x80000000
+
++/* All packets have, at least, an 8-byte header, which contains
++ * the packet type. The kernel driver uses the packet header for packet
++ * validation and to perform any necessary required preparation before
++ * sending them off to the hardware.
++ */
++struct goya_packet {
++ __le64 header;
++ /* The rest of the packet data follows. Use the corresponding
++ * packet_XXX struct to deference the data, based on packet type
++ */
++ u8 contents[0];
++};
++
+ struct packet_nop {
+ __le32 reserved;
+ __le32 ctl;
+diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c
+index ea9f72ff456c..199791b57caf 100644
+--- a/drivers/misc/habanalabs/irq.c
++++ b/drivers/misc/habanalabs/irq.c
+@@ -80,8 +80,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
+ struct hl_cs_job *job;
+ bool shadow_index_valid;
+ u16 shadow_index;
+- u32 *cq_entry;
+- u32 *cq_base;
++ struct hl_cq_entry *cq_entry, *cq_base;
+
+ if (hdev->disabled) {
+ dev_dbg(hdev->dev,
+@@ -90,29 +89,29 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
+ return IRQ_HANDLED;
+ }
+
+- cq_base = (u32 *) (uintptr_t) cq->kernel_address;
++ cq_base = (struct hl_cq_entry *) (uintptr_t) cq->kernel_address;
+
+ while (1) {
+- bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK)
++ bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
++ CQ_ENTRY_READY_MASK)
+ >> CQ_ENTRY_READY_SHIFT);
+
+ if (!entry_ready)
+ break;
+
+- cq_entry = (u32 *) &cq_base[cq->ci];
++ cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
+
+- /*
+- * Make sure we read CQ entry contents after we've
++ /* Make sure we read CQ entry contents after we've
+ * checked the ownership bit.
+ */
+ dma_rmb();
+
+- shadow_index_valid =
+- ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
++ shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
++ CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
+ >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
+
+- shadow_index = (u16)
+- ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK)
++ shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
++ CQ_ENTRY_SHADOW_INDEX_MASK)
+ >> CQ_ENTRY_SHADOW_INDEX_SHIFT);
+
+ queue = &hdev->kernel_queues[cq->hw_queue_id];
+@@ -122,8 +121,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
+ queue_work(hdev->cq_wq, &job->finish_work);
+ }
+
+- /*
+- * Update ci of the context's queue. There is no
++ /* Update ci of the context's queue. There is no
+ * need to protect it with spinlock because this update is
+ * done only inside IRQ and there is a different IRQ per
+ * queue
+@@ -131,7 +129,8 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
+ queue->ci = hl_queue_inc_ptr(queue->ci);
+
+ /* Clear CQ entry ready bit */
+- cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK;
++ cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
++ ~CQ_ENTRY_READY_MASK);
+
+ cq->ci = hl_cq_inc_ptr(cq->ci);
+
+diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
+index 693877e37fd8..924a438ba973 100644
+--- a/drivers/misc/habanalabs/memory.c
++++ b/drivers/misc/habanalabs/memory.c
+@@ -1629,6 +1629,8 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
+ dev_dbg(hdev->dev,
+ "page list 0x%p of asid %d is still alive\n",
+ phys_pg_list, ctx->asid);
++ atomic64_sub(phys_pg_list->total_size,
++ &hdev->dram_used_mem);
+ free_phys_pg_pack(hdev, phys_pg_list);
+ idr_remove(&vm->phys_pg_pack_handles, i);
+ }
+diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
+index 17f839dee976..018da2c3f92b 100644
+--- a/drivers/misc/lkdtm/bugs.c
++++ b/drivers/misc/lkdtm/bugs.c
+@@ -22,7 +22,7 @@ struct lkdtm_list {
+ * recurse past the end of THREAD_SIZE by default.
+ */
+ #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
+-#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
++#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
+ #else
+ #define REC_STACK_SIZE (THREAD_SIZE / 8)
+ #endif
+@@ -91,7 +91,7 @@ void lkdtm_LOOP(void)
+
+ void lkdtm_EXHAUST_STACK(void)
+ {
+- pr_info("Calling function with %d frame size to depth %d ...\n",
++ pr_info("Calling function with %lu frame size to depth %d ...\n",
+ REC_STACK_SIZE, recur_count);
+ recursive_loop(recur_count);
+ pr_info("FAIL: survived without exhausting stack?!\n");
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index 6c0173772162..77f7dff7098d 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -81,6 +81,8 @@
+
+ #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
+
++#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
++
+ #define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
+ #define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
+
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 57cb68f5cc64..541538eff8b1 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -98,6 +98,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
+
++ {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
++
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
+
+diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
+index bad89b6e0802..345addd9306d 100644
+--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
++++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
+@@ -310,7 +310,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
+
+ entry = container_of(resource, struct dbell_entry, resource);
+ if (entry->run_delayed) {
+- schedule_work(&entry->work);
++ if (!schedule_work(&entry->work))
++ vmci_resource_put(resource);
+ } else {
+ entry->notify_cb(entry->client_data);
+ vmci_resource_put(resource);
+@@ -361,7 +362,8 @@ static void dbell_fire_entries(u32 notify_idx)
+ atomic_read(&dbell->active) == 1) {
+ if (dbell->run_delayed) {
+ vmci_resource_get(&dbell->resource);
+- schedule_work(&dbell->work);
++ if (!schedule_work(&dbell->work))
++ vmci_resource_put(&dbell->resource);
+ } else {
+ dbell->notify_cb(dbell->client_data);
+ }
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index d681e8aaca83..fe914ff5f5d6 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -1292,6 +1292,12 @@ int mmc_attach_sd(struct mmc_host *host)
+ goto err;
+ }
+
++ /*
++ * Some SD cards claims an out of spec VDD voltage range. Let's treat
++ * these bits as being in-valid and especially also bit7.
++ */
++ ocr &= ~0x7FFF;
++
+ rocr = mmc_select_voltage(host, ocr);
+
+ /*
+diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
+index 163d1cf4367e..44139fceac24 100644
+--- a/drivers/mmc/host/sdhci-cadence.c
++++ b/drivers/mmc/host/sdhci-cadence.c
+@@ -369,6 +369,7 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
+ host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
+ host->mmc_host_ops.hs400_enhanced_strobe =
+ sdhci_cdns_hs400_enhanced_strobe;
++ sdhci_enable_v4_mode(host);
+
+ sdhci_get_of_property(pdev);
+
+diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
+index e377b9bc55a4..d4993582f0f6 100644
+--- a/drivers/mmc/host/sdhci-of-at91.c
++++ b/drivers/mmc/host/sdhci-of-at91.c
+@@ -357,6 +357,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
++ /* HS200 is broken at this moment */
++ host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
++
+ ret = sdhci_add_host(host);
+ if (ret)
+ goto pm_runtime_disable;
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index 06f84a4d79e0..fc892a8d882f 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -174,10 +174,11 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
+ struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
+ u32 div, val, mask;
+
+- div = sdhci_sprd_calc_div(sprd_host->base_rate, clk);
++ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+- clk |= ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
+- sdhci_enable_clk(host, clk);
++ div = sdhci_sprd_calc_div(sprd_host->base_rate, clk);
++ div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
++ sdhci_enable_clk(host, div);
+
+ /* enable auto gate sdhc_enable_auto_gate */
+ val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
+@@ -284,6 +285,17 @@ static void sdhci_sprd_hw_reset(struct sdhci_host *host)
+ usleep_range(300, 500);
+ }
+
++static unsigned int sdhci_sprd_get_max_timeout_count(struct sdhci_host *host)
++{
++ /* The Spredtrum controller actual maximum timeout count is 1 << 31 */
++ return 1 << 31;
++}
++
++static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host)
++{
++ return 0;
++}
++
+ static struct sdhci_ops sdhci_sprd_ops = {
+ .read_l = sdhci_sprd_readl,
+ .write_l = sdhci_sprd_writel,
+@@ -295,6 +307,8 @@ static struct sdhci_ops sdhci_sprd_ops = {
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_sprd_set_uhs_signaling,
+ .hw_reset = sdhci_sprd_hw_reset,
++ .get_max_timeout_count = sdhci_sprd_get_max_timeout_count,
++ .get_ro = sdhci_sprd_get_ro,
+ };
+
+ static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
+@@ -318,9 +332,12 @@ static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ }
+
+ static const struct sdhci_pltfm_data sdhci_sprd_pdata = {
+- .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
++ .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
++ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
++ SDHCI_QUIRK_MISSING_CAPS,
+ .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
+- SDHCI_QUIRK2_USE_32BIT_BLK_CNT,
++ SDHCI_QUIRK2_USE_32BIT_BLK_CNT |
++ SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .ops = &sdhci_sprd_ops,
+ };
+
+@@ -386,6 +403,16 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
+
+ sdhci_enable_v4_mode(host);
+
++ /*
++ * Supply the existing CAPS, but clear the UHS-I modes. This
++ * will allow these modes to be specified only by device
++ * tree properties through mmc_of_parse().
++ */
++ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
++ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
++ host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
++ SDHCI_SUPPORT_DDR50);
++
+ ret = sdhci_setup_host(host);
+ if (ret)
+ goto pm_runtime_disable;
+diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
+index 781a3e106d9a..e6b0f21679c1 100644
+--- a/drivers/mmc/host/sdhci-tegra.c
++++ b/drivers/mmc/host/sdhci-tegra.c
+@@ -258,6 +258,16 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
+ }
+ }
+
++static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
++{
++ /*
++ * Write-enable shall be assumed if GPIO is missing in a board's
++ * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
++ * Tegra.
++ */
++ return mmc_gpio_get_ro(host->mmc);
++}
++
+ static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
+ {
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+@@ -1224,6 +1234,7 @@ static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
+ };
+
+ static const struct sdhci_ops tegra_sdhci_ops = {
++ .get_ro = tegra_sdhci_get_ro,
+ .read_w = tegra_sdhci_readw,
+ .write_l = tegra_sdhci_writel,
+ .set_clock = tegra_sdhci_set_clock,
+@@ -1279,6 +1290,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
+ };
+
+ static const struct sdhci_ops tegra114_sdhci_ops = {
++ .get_ro = tegra_sdhci_get_ro,
+ .read_w = tegra_sdhci_readw,
+ .write_w = tegra_sdhci_writew,
+ .write_l = tegra_sdhci_writel,
+@@ -1332,6 +1344,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
+ };
+
+ static const struct sdhci_ops tegra210_sdhci_ops = {
++ .get_ro = tegra_sdhci_get_ro,
+ .read_w = tegra_sdhci_readw,
+ .write_w = tegra210_sdhci_writew,
+ .write_l = tegra_sdhci_writel,
+@@ -1366,6 +1379,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
+ };
+
+ static const struct sdhci_ops tegra186_sdhci_ops = {
++ .get_ro = tegra_sdhci_get_ro,
+ .read_w = tegra_sdhci_readw,
+ .write_l = tegra_sdhci_writel,
+ .set_clock = tegra_sdhci_set_clock,
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 4e3026f9abed..962dbb3acd77 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -2372,6 +2372,7 @@ static int cpsw_probe(struct platform_device *pdev)
+ if (!cpsw)
+ return -ENOMEM;
+
++ platform_set_drvdata(pdev, cpsw);
+ cpsw->dev = dev;
+
+ mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
+@@ -2476,7 +2477,6 @@ static int cpsw_probe(struct platform_device *pdev)
+ goto clean_cpts;
+ }
+
+- platform_set_drvdata(pdev, ndev);
+ priv = netdev_priv(ndev);
+ priv->cpsw = cpsw;
+ priv->ndev = ndev;
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+index a9c846c59289..55b713255b8e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+@@ -80,8 +80,11 @@
+ #define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
+ #define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
+ #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
++#define IWL_QU_C_HR_B_FW_PRE "iwlwifi-Qu-c0-hr-b0-"
+ #define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
++#define IWL_QU_C_JF_B_FW_PRE "iwlwifi-Qu-c0-jf-b0-"
+ #define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-"
++#define IWL_QUZ_A_JF_B_FW_PRE "iwlwifi-QuZ-a0-jf-b0-"
+ #define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
+ #define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
+ #define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-"
+@@ -106,6 +109,10 @@
+ IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
+ #define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
+ IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
++#define IWL_QUZ_A_JF_B_MODULE_FIRMWARE(api) \
++ IWL_QUZ_A_JF_B_FW_PRE __stringify(api) ".ucode"
++#define IWL_QU_C_HR_B_MODULE_FIRMWARE(api) \
++ IWL_QU_C_HR_B_FW_PRE __stringify(api) ".ucode"
+ #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
+ IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
+ #define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \
+@@ -241,6 +248,42 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ };
+
++const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
++ .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
++ .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
++ IWL_DEVICE_22500,
++ /*
++ * This device doesn't support receiving BlockAck with a large bitmap
++ * so we need to restrict the size of transmitted aggregation to the
++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
++ */
++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++};
++
++const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = {
++ .name = "Intel(R) Wi-Fi 6 AX101",
++ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
++ IWL_DEVICE_22500,
++ /*
++ * This device doesn't support receiving BlockAck with a large bitmap
++ * so we need to restrict the size of transmitted aggregation to the
++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
++ */
++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++};
++
++const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
++ .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
++ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
++ IWL_DEVICE_22500,
++ /*
++ * This device doesn't support receiving BlockAck with a large bitmap
++ * so we need to restrict the size of transmitted aggregation to the
++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
++ */
++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++};
++
+ const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
+ .name = "Intel(R) Wi-Fi 6 AX101",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+@@ -253,6 +296,42 @@ const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ };
+
++const struct iwl_cfg iwl_ax201_cfg_quz_hr = {
++ .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
++ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
++ IWL_DEVICE_22500,
++ /*
++ * This device doesn't support receiving BlockAck with a large bitmap
++ * so we need to restrict the size of transmitted aggregation to the
++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
++ */
++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++};
++
++const struct iwl_cfg iwl_ax1650s_cfg_quz_hr = {
++ .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
++ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
++ IWL_DEVICE_22500,
++ /*
++ * This device doesn't support receiving BlockAck with a large bitmap
++ * so we need to restrict the size of transmitted aggregation to the
++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
++ */
++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++};
++
++const struct iwl_cfg iwl_ax1650i_cfg_quz_hr = {
++ .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
++ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
++ IWL_DEVICE_22500,
++ /*
++ * This device doesn't support receiving BlockAck with a large bitmap
++ * so we need to restrict the size of transmitted aggregation to the
++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
++ */
++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++};
++
+ const struct iwl_cfg iwl_ax200_cfg_cc = {
+ .name = "Intel(R) Wi-Fi 6 AX200 160MHz",
+ .fw_name_pre = IWL_CC_A_FW_PRE,
+@@ -321,6 +400,30 @@ const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0 = {
+ IWL_DEVICE_22500,
+ };
+
++const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0 = {
++ .name = "Intel(R) Wireless-AC 9461",
++ .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
++ IWL_DEVICE_22500,
++};
++
++const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0 = {
++ .name = "Intel(R) Wireless-AC 9462",
++ .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
++ IWL_DEVICE_22500,
++};
++
++const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0 = {
++ .name = "Intel(R) Wireless-AC 9560",
++ .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
++ IWL_DEVICE_22500,
++};
++
++const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0 = {
++ .name = "Intel(R) Wireless-AC 9560 160MHz",
++ .fw_name_pre = IWL_QU_C_JF_B_FW_PRE,
++ IWL_DEVICE_22500,
++};
++
+ const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
+ .name = "Intel(R) Wireless-AC 9560 160MHz",
+ .fw_name_pre = IWL_QNJ_B_JF_B_FW_PRE,
+@@ -333,6 +436,90 @@ const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = {
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ };
+
++const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc = {
++ .name = "Intel(R) Wireless-AC 9560 160MHz",
++ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
++ IWL_DEVICE_22500,
++ /*
++ * This device doesn't support receiving BlockAck with a large bitmap
++ * so we need to restrict the size of transmitted aggregation to the
++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
++ */
++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++ .integrated = true,
++ .soc_latency = 5000,
++};
++
++const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc = {
++ .name = "Intel(R) Wireless-AC 9560 160MHz",
++ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
++ IWL_DEVICE_22500,
++ /*
++ * This device doesn't support receiving BlockAck with a large bitmap
++ * so we need to restrict the size of transmitted aggregation to the
++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
++ */
++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++ .integrated = true,
++ .soc_latency = 5000,
++};
++
++const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc = {
++ .name = "Intel(R) Dual Band Wireless AC 9461",
++ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
++ IWL_DEVICE_22500,
++ /*
++ * This device doesn't support receiving BlockAck with a large bitmap
++ * so we need to restrict the size of transmitted aggregation to the
++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
++ */
++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++ .integrated = true,
++ .soc_latency = 5000,
++};
++
++const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc = {
++ .name = "Intel(R) Dual Band Wireless AC 9462",
++ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
++ IWL_DEVICE_22500,
++ /*
++ * This device doesn't support receiving BlockAck with a large bitmap
++ * so we need to restrict the size of transmitted aggregation to the
++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
++ */
++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++ .integrated = true,
++ .soc_latency = 5000,
++};
++
++const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc = {
++ .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
++ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
++ IWL_DEVICE_22500,
++ /*
++ * This device doesn't support receiving BlockAck with a large bitmap
++ * so we need to restrict the size of transmitted aggregation to the
++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
++ */
++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++ .integrated = true,
++ .soc_latency = 5000,
++};
++
++const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc = {
++ .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
++ .fw_name_pre = IWL_QUZ_A_JF_B_FW_PRE,
++ IWL_DEVICE_22500,
++ /*
++ * This device doesn't support receiving BlockAck with a large bitmap
++ * so we need to restrict the size of transmitted aggregation to the
++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
++ */
++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++ .integrated = true,
++ .soc_latency = 5000,
++};
++
+ const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
+ .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+@@ -369,6 +556,30 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+ };
+
++const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
++ .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
++ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
++ IWL_DEVICE_22500,
++ /*
++ * This device doesn't support receiving BlockAck with a large bitmap
++ * so we need to restrict the size of transmitted aggregation to the
++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
++ */
++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++};
++
++const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
++ .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
++ .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
++ IWL_DEVICE_22500,
++ /*
++ * This device doesn't support receiving BlockAck with a large bitmap
++ * so we need to restrict the size of transmitted aggregation to the
++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
++ */
++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
++};
++
+ const struct iwl_cfg iwl22000_2ax_cfg_jf = {
+ .name = "Intel(R) Dual Band Wireless AX 22000",
+ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+@@ -424,12 +635,12 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
+ };
+
+ const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
+- .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
++ .name = "Intel(R) Wi-Fi 7 AX210 160MHz",
+ .fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE,
+ IWL_DEVICE_AX210,
+ };
+
+-const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0 = {
++const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
+ .name = "Intel(R) Wi-Fi 7 AX211 160MHz",
+ .fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
+ .uhb_supported = true,
+@@ -443,8 +654,8 @@ const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
+ IWL_DEVICE_AX210,
+ };
+
+-const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0 = {
+- .name = "Intel(R) Wi-Fi 7 AX210 160MHz",
++const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
++ .name = "Intel(R) Wi-Fi 7 AX411 160MHz",
+ .fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
+ IWL_DEVICE_AX210,
+ };
+@@ -455,8 +666,10 @@ MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
++MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
++MODULE_FIRMWARE(IWL_QUZ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+ MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+index f3e69edf8907..6c04f8223aff 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+@@ -540,14 +540,20 @@ extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
+ extern const struct iwl_cfg iwl9270_2ac_cfg;
+ extern const struct iwl_cfg iwl9460_2ac_cfg;
+ extern const struct iwl_cfg iwl9560_2ac_cfg;
++extern const struct iwl_cfg iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
+ extern const struct iwl_cfg iwl9560_2ac_160_cfg;
++extern const struct iwl_cfg iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
+ extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
+ extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
++extern const struct iwl_cfg iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
+ extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
++extern const struct iwl_cfg iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
+ extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
+ extern const struct iwl_cfg iwl9560_2ac_160_cfg_soc;
+ extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
+ extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
++extern const struct iwl_cfg iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc;
++extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc;
+ extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
+ extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
+ extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
+@@ -559,17 +565,30 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
+ extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
+ extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
+ extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
++extern const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0;
+ extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
+ extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
+ extern const struct iwl_cfg iwl_ax200_cfg_cc;
++extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
++extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
++extern const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0;
++extern const struct iwl_cfg iwl_ax201_cfg_quz_hr;
++extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
++extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr;
+ extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
+ extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
++extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0;
++extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0;
+ extern const struct iwl_cfg killer1650x_2ax_cfg;
+ extern const struct iwl_cfg killer1650w_2ax_cfg;
+ extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
+ extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
+ extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
+ extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0;
++extern const struct iwl_cfg iwl9461_2ac_cfg_qu_c0_jf_b0;
++extern const struct iwl_cfg iwl9462_2ac_cfg_qu_c0_jf_b0;
++extern const struct iwl_cfg iwl9560_2ac_cfg_qu_c0_jf_b0;
++extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_c0_jf_b0;
+ extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0;
+ extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0;
+ extern const struct iwl_cfg iwl22000_2ax_cfg_jf;
+@@ -580,9 +599,9 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0;
+ extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
+ extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
+ extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
+-extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0;
++extern const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0;
+ extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
+-extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0;
++extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
+ #endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
+
+ #endif /* __IWL_CONFIG_H__ */
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+index 93da96a7247c..cb4c5514a556 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+@@ -328,6 +328,8 @@ enum {
+ #define CSR_HW_REV_TYPE_NONE (0x00001F0)
+ #define CSR_HW_REV_TYPE_QNJ (0x0000360)
+ #define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364)
++#define CSR_HW_REV_TYPE_QU_B0 (0x0000334)
++#define CSR_HW_REV_TYPE_QU_C0 (0x0000338)
+ #define CSR_HW_REV_TYPE_QUZ (0x0000354)
+ #define CSR_HW_REV_TYPE_HR_CDB (0x0000340)
+ #define CSR_HW_REV_TYPE_SO (0x0000370)
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index cd035061cdd5..54cb4950f32f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -513,62 +513,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
+
+ /* 9000 Series */
+- {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0040, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0044, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0244, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0040, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0044, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0244, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
++ {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)},
+@@ -610,6 +604,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
++ {IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
+@@ -621,7 +616,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)},
+- {IWL_PCI_DEVICE(0x2720, 0x0044, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
+@@ -630,7 +624,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
+- {IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
+@@ -708,7 +701,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+- {IWL_PCI_DEVICE(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+@@ -717,7 +709,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+- {IWL_PCI_DEVICE(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+@@ -764,7 +755,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+@@ -773,7 +763,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+@@ -833,7 +822,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+@@ -842,7 +830,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+@@ -890,63 +877,80 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x2720, 0x0030, iwl9560_2ac_cfg_qnj_jf_b0)},
+
+ /* 22000 Series */
+- {IWL_PCI_DEVICE(0x02F0, 0x0070, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0074, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0078, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x02F0, 0x007C, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x02F0, 0x0310, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x02F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+- {IWL_PCI_DEVICE(0x02F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+- {IWL_PCI_DEVICE(0x02F0, 0x4070, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0070, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0074, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0078, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x06F0, 0x007C, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x06F0, 0x0310, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x06F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+- {IWL_PCI_DEVICE(0x06F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+- {IWL_PCI_DEVICE(0x06F0, 0x4070, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0244, iwl_ax101_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x02F0, 0x1651, iwl_ax1650s_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x02F0, 0x1652, iwl_ax1650i_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x02F0, 0x2074, iwl_ax201_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x02F0, 0x4070, iwl_ax201_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x02F0, 0x4244, iwl_ax101_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0070, iwl_ax201_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0074, iwl_ax201_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0078, iwl_ax201_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x06F0, 0x007C, iwl_ax201_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0244, iwl_ax101_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x06F0, 0x0310, iwl_ax201_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x06F0, 0x1651, iwl_ax1650s_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x06F0, 0x1652, iwl_ax1650i_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x06F0, 0x2074, iwl_ax201_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x06F0, 0x4070, iwl_ax201_cfg_quz_hr)},
++ {IWL_PCI_DEVICE(0x06F0, 0x4244, iwl_ax101_cfg_quz_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0000, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0040, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
+- {IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x2720, 0x0044, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x2720, 0x0070, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
+- {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
+- {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
++ {IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x2720, 0x0310, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x1080, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x2720, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x2720, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+- {IWL_PCI_DEVICE(0x2720, 0x4070, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x34F0, 0x0074, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x34F0, 0x007C, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x2720, 0x2074, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x2720, 0x4070, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x2720, 0x4244, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x34F0, 0x0074, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x34F0, 0x007C, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+- {IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0x43F0, 0x007C, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x34F0, 0x2074, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x34F0, 0x4244, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+- {IWL_PCI_DEVICE(0x43F0, 0x4070, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl_ax101_cfg_qu_hr)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0x43F0, 0x4244, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl_ax201_cfg_qu_hr)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
+ {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
+- {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr)},
++ {IWL_PCI_DEVICE(0xA0F0, 0x4244, iwl_ax101_cfg_qu_hr)},
+
+ {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
+@@ -958,13 +962,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
+
+- {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
+- {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
+- {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax210_2ax_cfg_so_hr_a0)},
+- {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_so_hr_a0)},
+- {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_so_hr_a0)},
+- {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_so_hr_a0)},
+- {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax210_2ax_cfg_so_hr_a0)},
++ {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
++ {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0)},
++ {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)},
++ {IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
++ {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
++ {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
++ {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
++ {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
++ {IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
++ {IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
++ {IWL_PCI_DEVICE(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
++ {IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
++ {IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
++ {IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
+
+ #endif /* CONFIG_IWLMVM */
+
+@@ -1028,6 +1039,31 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ }
+ iwl_trans->cfg = cfg;
+ }
++
++ /*
++ * This is a hack to switch from Qu B0 to Qu C0. We need to
++ * do this for all cfgs that use Qu B0. All this code is in
++ * urgent need for a refactor, but for now this is the easiest
++ * thing to do to support Qu C-step.
++ */
++ if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QU_C0) {
++ if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
++ iwl_trans->cfg = &iwl_ax101_cfg_qu_c0_hr_b0;
++ else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
++ iwl_trans->cfg = &iwl_ax201_cfg_qu_c0_hr_b0;
++ else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
++ iwl_trans->cfg = &iwl9461_2ac_cfg_qu_c0_jf_b0;
++ else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
++ iwl_trans->cfg = &iwl9462_2ac_cfg_qu_c0_jf_b0;
++ else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
++ iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
++ else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
++ iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
++ else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
++ iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
++ else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
++ iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
++ }
+ #endif
+
+ pci_set_drvdata(pdev, iwl_trans);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index 199eddea82a9..dc95a5abc4d6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -3569,10 +3569,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+ trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
+- trans->cfg = &iwlax210_2ax_cfg_so_gf_a0;
++ trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
+- trans->cfg = &iwlax210_2ax_cfg_so_gf4_a0;
++ trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
+ }
+ } else if (cfg == &iwl_ax101_cfg_qu_hr) {
+ if ((CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+@@ -3600,10 +3600,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+ }
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+- ((trans->cfg != &iwl_ax200_cfg_cc &&
+- trans->cfg != &killer1650x_2ax_cfg &&
+- trans->cfg != &killer1650w_2ax_cfg) ||
+- trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
++ trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
+ u32 hw_status;
+
+ hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
+index 8ecbf81a906f..889b76deb703 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76.h
++++ b/drivers/net/wireless/mediatek/mt76/mt76.h
+@@ -30,6 +30,7 @@
+ #define MT_TX_RING_SIZE 256
+ #define MT_MCU_RING_SIZE 32
+ #define MT_RX_BUF_SIZE 2048
++#define MT_SKB_HEAD_LEN 128
+
+ struct mt76_dev;
+ struct mt76_wcid;
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+index 2dc67e68c6a2..109309b5d24a 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+@@ -136,11 +136,11 @@ static const struct ieee80211_ops mt76x0u_ops = {
+ .release_buffered_frames = mt76_release_buffered_frames,
+ };
+
+-static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
++static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
+ {
+ int err;
+
+- mt76x0_chip_onoff(dev, true, true);
++ mt76x0_chip_onoff(dev, true, reset);
+
+ if (!mt76x02_wait_for_mac(&dev->mt76))
+ return -ETIMEDOUT;
+@@ -173,7 +173,7 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
+ if (err < 0)
+ goto out_err;
+
+- err = mt76x0u_init_hardware(dev);
++ err = mt76x0u_init_hardware(dev, true);
+ if (err < 0)
+ goto out_err;
+
+@@ -309,7 +309,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
+ if (ret < 0)
+ goto err;
+
+- ret = mt76x0u_init_hardware(dev);
++ ret = mt76x0u_init_hardware(dev, false);
+ if (ret)
+ goto err;
+
+diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
+index bbaa1365bbda..dd90427b2d67 100644
+--- a/drivers/net/wireless/mediatek/mt76/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/usb.c
+@@ -429,6 +429,42 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
+ return dma_len;
+ }
+
++static struct sk_buff *
++mt76u_build_rx_skb(void *data, int len, int buf_size)
++{
++ struct sk_buff *skb;
++
++ if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) {
++ struct page *page;
++
++ /* slow path, not enough space for data and
++ * skb_shared_info
++ */
++ skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
++ if (!skb)
++ return NULL;
++
++ skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN);
++ data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN);
++ page = virt_to_head_page(data);
++ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
++ page, data - page_address(page),
++ len - MT_SKB_HEAD_LEN, buf_size);
++
++ return skb;
++ }
++
++ /* fast path */
++ skb = build_skb(data, buf_size);
++ if (!skb)
++ return NULL;
++
++ skb_reserve(skb, MT_DMA_HDR_LEN);
++ __skb_put(skb, len);
++
++ return skb;
++}
++
+ static int
+ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
+ {
+@@ -446,19 +482,11 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
+ return 0;
+
+ data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
+- if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size)) {
+- dev_err_ratelimited(dev->dev, "rx data too big %d\n", data_len);
+- return 0;
+- }
+-
+- skb = build_skb(data, q->buf_size);
++ skb = mt76u_build_rx_skb(data, data_len, q->buf_size);
+ if (!skb)
+ return 0;
+
+- skb_reserve(skb, MT_DMA_HDR_LEN);
+- __skb_put(skb, data_len);
+ len -= data_len;
+-
+ while (len > 0 && nsgs < urb->num_sgs) {
+ data_len = min_t(int, len, urb->sg[nsgs].length);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 5deb4deb3820..601509b3251a 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1268,6 +1268,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+ */
+ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+ mutex_lock(&ctrl->scan_lock);
++ mutex_lock(&ctrl->subsys->lock);
++ nvme_mpath_start_freeze(ctrl->subsys);
++ nvme_mpath_wait_freeze(ctrl->subsys);
+ nvme_start_freeze(ctrl);
+ nvme_wait_freeze(ctrl);
+ }
+@@ -1298,6 +1301,8 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
+ nvme_update_formats(ctrl);
+ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+ nvme_unfreeze(ctrl);
++ nvme_mpath_unfreeze(ctrl->subsys);
++ mutex_unlock(&ctrl->subsys->lock);
+ mutex_unlock(&ctrl->scan_lock);
+ }
+ if (effects & NVME_CMD_EFFECTS_CCC)
+@@ -1668,6 +1673,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+ if (ns->head->disk) {
+ nvme_update_disk_info(ns->head->disk, ns, id);
+ blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
++ revalidate_disk(ns->head->disk);
+ }
+ #endif
+ }
+@@ -2439,6 +2445,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+ if (ret) {
+ dev_err(ctrl->device,
+ "failed to register subsystem device.\n");
++ put_device(&subsys->dev);
+ goto out_unlock;
+ }
+ ida_init(&subsys->ns_ida);
+@@ -2461,7 +2468,6 @@ out_put_subsystem:
+ nvme_put_subsystem(subsys);
+ out_unlock:
+ mutex_unlock(&nvme_subsystems_lock);
+- put_device(&subsys->dev);
+ return ret;
+ }
+
+@@ -3523,6 +3529,13 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
+ struct nvme_ns *ns, *next;
+ LIST_HEAD(ns_list);
+
++ /*
++ * make sure to requeue I/O to all namespaces as these
++ * might result from the scan itself and must complete
++ * for the scan_work to make progress
++ */
++ nvme_mpath_clear_ctrl_paths(ctrl);
++
+ /* prevent racing with ns scanning */
+ flush_work(&ctrl->scan_work);
+
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index e942b3e84068..747c0d4f9ff5 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -12,6 +12,36 @@ module_param(multipath, bool, 0444);
+ MODULE_PARM_DESC(multipath,
+ "turn on native support for multiple controllers per subsystem");
+
++void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
++{
++ struct nvme_ns_head *h;
++
++ lockdep_assert_held(&subsys->lock);
++ list_for_each_entry(h, &subsys->nsheads, entry)
++ if (h->disk)
++ blk_mq_unfreeze_queue(h->disk->queue);
++}
++
++void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
++{
++ struct nvme_ns_head *h;
++
++ lockdep_assert_held(&subsys->lock);
++ list_for_each_entry(h, &subsys->nsheads, entry)
++ if (h->disk)
++ blk_mq_freeze_queue_wait(h->disk->queue);
++}
++
++void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
++{
++ struct nvme_ns_head *h;
++
++ lockdep_assert_held(&subsys->lock);
++ list_for_each_entry(h, &subsys->nsheads, entry)
++ if (h->disk)
++ blk_freeze_queue_start(h->disk->queue);
++}
++
+ /*
+ * If multipathing is enabled we need to always use the subsystem instance
+ * number for numbering our devices to avoid conflicts between subsystems that
+@@ -104,18 +134,34 @@ static const char *nvme_ana_state_names[] = {
+ [NVME_ANA_CHANGE] = "change",
+ };
+
+-void nvme_mpath_clear_current_path(struct nvme_ns *ns)
++bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
+ {
+ struct nvme_ns_head *head = ns->head;
++ bool changed = false;
+ int node;
+
+ if (!head)
+- return;
++ goto out;
+
+ for_each_node(node) {
+- if (ns == rcu_access_pointer(head->current_path[node]))
++ if (ns == rcu_access_pointer(head->current_path[node])) {
+ rcu_assign_pointer(head->current_path[node], NULL);
++ changed = true;
++ }
+ }
++out:
++ return changed;
++}
++
++void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
++{
++ struct nvme_ns *ns;
++
++ mutex_lock(&ctrl->scan_lock);
++ list_for_each_entry(ns, &ctrl->namespaces, list)
++ if (nvme_mpath_clear_current_path(ns))
++ kblockd_schedule_work(&ns->head->requeue_work);
++ mutex_unlock(&ctrl->scan_lock);
+ }
+
+ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
+@@ -218,6 +264,24 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
+ return ns;
+ }
+
++static bool nvme_available_path(struct nvme_ns_head *head)
++{
++ struct nvme_ns *ns;
++
++ list_for_each_entry_rcu(ns, &head->list, siblings) {
++ switch (ns->ctrl->state) {
++ case NVME_CTRL_LIVE:
++ case NVME_CTRL_RESETTING:
++ case NVME_CTRL_CONNECTING:
++ /* fallthru */
++ return true;
++ default:
++ break;
++ }
++ }
++ return false;
++}
++
+ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
+ struct bio *bio)
+ {
+@@ -244,14 +308,14 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
+ disk_devt(ns->head->disk),
+ bio->bi_iter.bi_sector);
+ ret = direct_make_request(bio);
+- } else if (!list_empty_careful(&head->list)) {
+- dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
++ } else if (nvme_available_path(head)) {
++ dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
+
+ spin_lock_irq(&head->requeue_lock);
+ bio_list_add(&head->requeue_list, bio);
+ spin_unlock_irq(&head->requeue_lock);
+ } else {
+- dev_warn_ratelimited(dev, "no path - failing I/O\n");
++ dev_warn_ratelimited(dev, "no available path - failing I/O\n");
+
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 7391cd0a7739..81215ca32671 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -477,6 +477,9 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+ return ctrl->ana_log_buf != NULL;
+ }
+
++void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
++void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
++void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
+ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ struct nvme_ctrl *ctrl, int *flags);
+ void nvme_failover_req(struct request *req);
+@@ -487,7 +490,8 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head);
+ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
+ void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
+ void nvme_mpath_stop(struct nvme_ctrl *ctrl);
+-void nvme_mpath_clear_current_path(struct nvme_ns *ns);
++bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
++void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
+ struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
+
+ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+@@ -535,7 +539,11 @@ static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
+ static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+ {
+ }
+-static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
++static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
++{
++ return false;
++}
++static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
+ {
+ }
+ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+@@ -555,6 +563,15 @@ static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
+ static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
+ {
+ }
++static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
++{
++}
++static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
++{
++}
++static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
++{
++}
+ #endif /* CONFIG_NVME_MULTIPATH */
+
+ #ifdef CONFIG_NVM
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index f9959eaaa185..09ffd21d1809 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2712,7 +2712,7 @@ static void nvme_async_probe(void *data, async_cookie_t cookie)
+ {
+ struct nvme_dev *dev = data;
+
+- nvme_reset_ctrl_sync(&dev->ctrl);
++ flush_work(&dev->ctrl.reset_work);
+ flush_work(&dev->ctrl.scan_work);
+ nvme_put_ctrl(&dev->ctrl);
+ }
+@@ -2778,6 +2778,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
+
++ nvme_reset_ctrl(&dev->ctrl);
+ nvme_get_ctrl(&dev->ctrl);
+ async_schedule(nvme_async_probe, dev);
+
+diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
+index 97f668a39ae1..7b074323bcdf 100644
+--- a/drivers/nvme/host/rdma.c
++++ b/drivers/nvme/host/rdma.c
+@@ -562,13 +562,17 @@ out_destroy_cm_id:
+ return ret;
+ }
+
++static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
++{
++ rdma_disconnect(queue->cm_id);
++ ib_drain_qp(queue->qp);
++}
++
+ static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
+ {
+ if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
+ return;
+-
+- rdma_disconnect(queue->cm_id);
+- ib_drain_qp(queue->qp);
++ __nvme_rdma_stop_queue(queue);
+ }
+
+ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
+@@ -607,11 +611,13 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
+ else
+ ret = nvmf_connect_admin_queue(&ctrl->ctrl);
+
+- if (!ret)
++ if (!ret) {
+ set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
+- else
++ } else {
++ __nvme_rdma_stop_queue(queue);
+ dev_info(ctrl->ctrl.device,
+ "failed to connect queue: %d ret=%d\n", idx, ret);
++ }
+ return ret;
+ }
+
+diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
+index 08dd5af357f7..3854363118cc 100644
+--- a/drivers/nvme/target/configfs.c
++++ b/drivers/nvme/target/configfs.c
+@@ -673,6 +673,7 @@ static void nvmet_port_subsys_drop_link(struct config_item *parent,
+
+ found:
+ list_del(&p->entry);
++ nvmet_port_del_ctrls(port, subsys);
+ nvmet_port_disc_changed(port, subsys);
+
+ if (list_empty(&port->subsystems))
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 7734a6acff85..396cbc7ea353 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -43,6 +43,9 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
+ u16 status;
+
+ switch (errno) {
++ case 0:
++ status = NVME_SC_SUCCESS;
++ break;
+ case -ENOSPC:
+ req->error_loc = offsetof(struct nvme_rw_command, length);
+ status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
+@@ -277,6 +280,18 @@ void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
+ }
+ EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
+
++void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
++{
++ struct nvmet_ctrl *ctrl;
++
++ mutex_lock(&subsys->lock);
++ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
++ if (ctrl->port == port)
++ ctrl->ops->delete_ctrl(ctrl);
++ }
++ mutex_unlock(&subsys->lock);
++}
++
+ int nvmet_enable_port(struct nvmet_port *port)
+ {
+ const struct nvmet_fabrics_ops *ops;
+diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
+index 9e211ad6bdd3..da9cd07461fb 100644
+--- a/drivers/nvme/target/loop.c
++++ b/drivers/nvme/target/loop.c
+@@ -654,6 +654,14 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
+ mutex_lock(&nvme_loop_ports_mutex);
+ list_del_init(&port->entry);
+ mutex_unlock(&nvme_loop_ports_mutex);
++
++ /*
++ * Ensure any ctrls that are in the process of being
++ * deleted are in fact deleted before we return
++ * and free the port. This is to prevent active
++ * ctrls from using a port after it's freed.
++ */
++ flush_workqueue(nvme_delete_wq);
+ }
+
+ static const struct nvmet_fabrics_ops nvme_loop_ops = {
+diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
+index c25d88fc9dec..b6b0d483e0c5 100644
+--- a/drivers/nvme/target/nvmet.h
++++ b/drivers/nvme/target/nvmet.h
+@@ -415,6 +415,9 @@ void nvmet_port_send_ana_event(struct nvmet_port *port);
+ int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
+ void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
+
++void nvmet_port_del_ctrls(struct nvmet_port *port,
++ struct nvmet_subsys *subsys);
++
+ int nvmet_enable_port(struct nvmet_port *port);
+ void nvmet_disable_port(struct nvmet_port *port);
+
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index 682789bb8ab3..57ed2e2024bf 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -80,8 +80,8 @@
+
+ #define CDNS_MCP_INTSET 0x4C
+
+-#define CDNS_SDW_SLAVE_STAT 0x50
+-#define CDNS_MCP_SLAVE_STAT_MASK BIT(1, 0)
++#define CDNS_MCP_SLAVE_STAT 0x50
++#define CDNS_MCP_SLAVE_STAT_MASK GENMASK(1, 0)
+
+ #define CDNS_MCP_SLAVE_INTSTAT0 0x54
+ #define CDNS_MCP_SLAVE_INTSTAT1 0x58
+@@ -95,8 +95,8 @@
+ #define CDNS_MCP_SLAVE_INTMASK0 0x5C
+ #define CDNS_MCP_SLAVE_INTMASK1 0x60
+
+-#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(30, 0)
+-#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(16, 0)
++#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(31, 0)
++#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(15, 0)
+
+ #define CDNS_MCP_PORT_INTSTAT 0x64
+ #define CDNS_MCP_PDI_STAT 0x6C
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 6a5ee8e6da10..67ad40b0a05b 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -709,12 +709,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
+ struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
+ unsigned long flags;
+
+- spin_lock_irqsave(&ci->lock, flags);
+- ci->gadget.speed = USB_SPEED_UNKNOWN;
+- ci->remote_wakeup = 0;
+- ci->suspended = 0;
+- spin_unlock_irqrestore(&ci->lock, flags);
+-
+ /* flush all endpoints */
+ gadget_for_each_ep(ep, gadget) {
+ usb_ep_fifo_flush(ep);
+@@ -732,6 +726,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
+ ci->status = NULL;
+ }
+
++ spin_lock_irqsave(&ci->lock, flags);
++ ci->gadget.speed = USB_SPEED_UNKNOWN;
++ ci->remote_wakeup = 0;
++ ci->suspended = 0;
++ spin_unlock_irqrestore(&ci->lock, flags);
++
+ return 0;
+ }
+
+@@ -1303,6 +1303,10 @@ static int ep_disable(struct usb_ep *ep)
+ return -EBUSY;
+
+ spin_lock_irqsave(hwep->lock, flags);
++ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
++ spin_unlock_irqrestore(hwep->lock, flags);
++ return 0;
++ }
+
+ /* only internal SW should disable ctrl endpts */
+
+@@ -1392,6 +1396,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
+ return -EINVAL;
+
+ spin_lock_irqsave(hwep->lock, flags);
++ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
++ spin_unlock_irqrestore(hwep->lock, flags);
++ return 0;
++ }
+ retval = _ep_queue(ep, req, gfp_flags);
+ spin_unlock_irqrestore(hwep->lock, flags);
+ return retval;
+@@ -1415,8 +1423,8 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+ return -EINVAL;
+
+ spin_lock_irqsave(hwep->lock, flags);
+-
+- hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
++ if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
++ hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+
+ list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
+ dma_pool_free(hwep->td_pool, node->ptr, node->dma);
+@@ -1487,6 +1495,10 @@ static void ep_fifo_flush(struct usb_ep *ep)
+ }
+
+ spin_lock_irqsave(hwep->lock, flags);
++ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
++ spin_unlock_irqrestore(hwep->lock, flags);
++ return;
++ }
+
+ hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
+
+@@ -1559,6 +1571,10 @@ static int ci_udc_wakeup(struct usb_gadget *_gadget)
+ int ret = 0;
+
+ spin_lock_irqsave(&ci->lock, flags);
++ if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
++ spin_unlock_irqrestore(&ci->lock, flags);
++ return 0;
++ }
+ if (!ci->remote_wakeup) {
+ ret = -EOPNOTSUPP;
+ goto out;
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index 9e9caff905d5..4929c5883068 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -587,10 +587,20 @@ static int wdm_flush(struct file *file, fl_owner_t id)
+ {
+ struct wdm_device *desc = file->private_data;
+
+- wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags));
++ wait_event(desc->wait,
++ /*
++ * needs both flags. We cannot do with one
++ * because resetting it would cause a race
++ * with write() yet we need to signal
++ * a disconnect
++ */
++ !test_bit(WDM_IN_USE, &desc->flags) ||
++ test_bit(WDM_DISCONNECTING, &desc->flags));
+
+ /* cannot dereference desc->intf if WDM_DISCONNECTING */
+- if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags))
++ if (test_bit(WDM_DISCONNECTING, &desc->flags))
++ return -ENODEV;
++ if (desc->werr < 0)
+ dev_err(&desc->intf->dev, "Error in flush path: %d\n",
+ desc->werr);
+
+@@ -974,8 +984,6 @@ static void wdm_disconnect(struct usb_interface *intf)
+ spin_lock_irqsave(&desc->iuspin, flags);
+ set_bit(WDM_DISCONNECTING, &desc->flags);
+ set_bit(WDM_READ, &desc->flags);
+- /* to terminate pending flushes */
+- clear_bit(WDM_IN_USE, &desc->flags);
+ spin_unlock_irqrestore(&desc->iuspin, flags);
+ wake_up_all(&desc->wait);
+ mutex_lock(&desc->rlock);
+diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
+index 4942122b2346..36858ddd8d9b 100644
+--- a/drivers/usb/class/usbtmc.c
++++ b/drivers/usb/class/usbtmc.c
+@@ -2362,8 +2362,11 @@ static int usbtmc_probe(struct usb_interface *intf,
+ goto err_put;
+ }
+
++ retcode = -EINVAL;
+ data->bulk_in = bulk_in->bEndpointAddress;
+ data->wMaxPacketSize = usb_endpoint_maxp(bulk_in);
++ if (!data->wMaxPacketSize)
++ goto err_put;
+ dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in);
+
+ data->bulk_out = bulk_out->bEndpointAddress;
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index 03432467b05f..7537681355f6 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -216,17 +216,18 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ /* EHCI, OHCI */
+ hcd->rsrc_start = pci_resource_start(dev, 0);
+ hcd->rsrc_len = pci_resource_len(dev, 0);
+- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+- driver->description)) {
++ if (!devm_request_mem_region(&dev->dev, hcd->rsrc_start,
++ hcd->rsrc_len, driver->description)) {
+ dev_dbg(&dev->dev, "controller already in use\n");
+ retval = -EBUSY;
+ goto put_hcd;
+ }
+- hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
++ hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start,
++ hcd->rsrc_len);
+ if (hcd->regs == NULL) {
+ dev_dbg(&dev->dev, "error mapping memory\n");
+ retval = -EFAULT;
+- goto release_mem_region;
++ goto put_hcd;
+ }
+
+ } else {
+@@ -240,8 +241,8 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+
+ hcd->rsrc_start = pci_resource_start(dev, region);
+ hcd->rsrc_len = pci_resource_len(dev, region);
+- if (request_region(hcd->rsrc_start, hcd->rsrc_len,
+- driver->description))
++ if (devm_request_region(&dev->dev, hcd->rsrc_start,
++ hcd->rsrc_len, driver->description))
+ break;
+ }
+ if (region == PCI_ROM_RESOURCE) {
+@@ -275,20 +276,13 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ }
+
+ if (retval != 0)
+- goto unmap_registers;
++ goto put_hcd;
+ device_wakeup_enable(hcd->self.controller);
+
+ if (pci_dev_run_wake(dev))
+ pm_runtime_put_noidle(&dev->dev);
+ return retval;
+
+-unmap_registers:
+- if (driver->flags & HCD_MEMORY) {
+- iounmap(hcd->regs);
+-release_mem_region:
+- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+- } else
+- release_region(hcd->rsrc_start, hcd->rsrc_len);
+ put_hcd:
+ usb_put_hcd(hcd);
+ disable_pci:
+@@ -347,14 +341,6 @@ void usb_hcd_pci_remove(struct pci_dev *dev)
+ dev_set_drvdata(&dev->dev, NULL);
+ up_read(&companions_rwsem);
+ }
+-
+- if (hcd->driver->flags & HCD_MEMORY) {
+- iounmap(hcd->regs);
+- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+- } else {
+- release_region(hcd->rsrc_start, hcd->rsrc_len);
+- }
+-
+ usb_put_hcd(hcd);
+ pci_disable_device(dev);
+ }
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index b8a15840b4ff..dfcabadeed01 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -1976,6 +1976,7 @@ void composite_disconnect(struct usb_gadget *gadget)
+ * disconnect callbacks?
+ */
+ spin_lock_irqsave(&cdev->lock, flags);
++ cdev->suspended = 0;
+ if (cdev->config)
+ reset_config(cdev);
+ if (cdev->driver->disconnect)
+diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
+index 043f97ad8f22..f2bc8d037067 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.c
++++ b/drivers/usb/gadget/function/f_mass_storage.c
+@@ -261,7 +261,7 @@ struct fsg_common;
+ struct fsg_common {
+ struct usb_gadget *gadget;
+ struct usb_composite_dev *cdev;
+- struct fsg_dev *fsg, *new_fsg;
++ struct fsg_dev *fsg;
+ wait_queue_head_t io_wait;
+ wait_queue_head_t fsg_wait;
+
+@@ -290,6 +290,7 @@ struct fsg_common {
+ unsigned int bulk_out_maxpacket;
+ enum fsg_state state; /* For exception handling */
+ unsigned int exception_req_tag;
++ void *exception_arg;
+
+ enum data_direction data_dir;
+ u32 data_size;
+@@ -391,7 +392,8 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
+
+ /* These routines may be called in process context or in_irq */
+
+-static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
++static void __raise_exception(struct fsg_common *common, enum fsg_state new_state,
++ void *arg)
+ {
+ unsigned long flags;
+
+@@ -404,6 +406,7 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+ if (common->state <= new_state) {
+ common->exception_req_tag = common->ep0_req_tag;
+ common->state = new_state;
++ common->exception_arg = arg;
+ if (common->thread_task)
+ send_sig_info(SIGUSR1, SEND_SIG_PRIV,
+ common->thread_task);
+@@ -411,6 +414,10 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+ spin_unlock_irqrestore(&common->lock, flags);
+ }
+
++static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
++{
++ __raise_exception(common, new_state, NULL);
++}
+
+ /*-------------------------------------------------------------------------*/
+
+@@ -2285,16 +2292,16 @@ reset:
+ static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ {
+ struct fsg_dev *fsg = fsg_from_func(f);
+- fsg->common->new_fsg = fsg;
+- raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
++
++ __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg);
+ return USB_GADGET_DELAYED_STATUS;
+ }
+
+ static void fsg_disable(struct usb_function *f)
+ {
+ struct fsg_dev *fsg = fsg_from_func(f);
+- fsg->common->new_fsg = NULL;
+- raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
++
++ __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
+ }
+
+
+@@ -2307,6 +2314,7 @@ static void handle_exception(struct fsg_common *common)
+ enum fsg_state old_state;
+ struct fsg_lun *curlun;
+ unsigned int exception_req_tag;
++ struct fsg_dev *new_fsg;
+
+ /*
+ * Clear the existing signals. Anything but SIGUSR1 is converted
+@@ -2360,6 +2368,7 @@ static void handle_exception(struct fsg_common *common)
+ common->next_buffhd_to_fill = &common->buffhds[0];
+ common->next_buffhd_to_drain = &common->buffhds[0];
+ exception_req_tag = common->exception_req_tag;
++ new_fsg = common->exception_arg;
+ old_state = common->state;
+ common->state = FSG_STATE_NORMAL;
+
+@@ -2413,8 +2422,8 @@ static void handle_exception(struct fsg_common *common)
+ break;
+
+ case FSG_STATE_CONFIG_CHANGE:
+- do_set_interface(common, common->new_fsg);
+- if (common->new_fsg)
++ do_set_interface(common, new_fsg);
++ if (new_fsg)
+ usb_composite_setup_continue(common->cdev);
+ break;
+
+@@ -2989,8 +2998,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
+
+ DBG(fsg, "unbind\n");
+ if (fsg->common->fsg == fsg) {
+- fsg->common->new_fsg = NULL;
+- raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
++ __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
+ /* FIXME: make interruptible or killable somehow? */
+ wait_event(common->fsg_wait, common->fsg != fsg);
+ }
+diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
+index 0da68df259c8..7bf621d40c5a 100644
+--- a/drivers/usb/host/fotg210-hcd.c
++++ b/drivers/usb/host/fotg210-hcd.c
+@@ -1628,6 +1628,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ /* see what we found out */
+ temp = check_reset_complete(fotg210, wIndex, status_reg,
+ fotg210_readl(fotg210, status_reg));
++
++ /* restart schedule */
++ fotg210->command |= CMD_RUN;
++ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+ }
+
+ if (!(temp & (PORT_RESUME|PORT_RESET))) {
+diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
+index 210181fd98d2..af11887f5f9e 100644
+--- a/drivers/usb/host/ohci-hcd.c
++++ b/drivers/usb/host/ohci-hcd.c
+@@ -418,8 +418,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
+ * other cases where the next software may expect clean state from the
+ * "firmware". this is bus-neutral, unlike shutdown() methods.
+ */
+-static void
+-ohci_shutdown (struct usb_hcd *hcd)
++static void _ohci_shutdown(struct usb_hcd *hcd)
+ {
+ struct ohci_hcd *ohci;
+
+@@ -435,6 +434,16 @@ ohci_shutdown (struct usb_hcd *hcd)
+ ohci->rh_state = OHCI_RH_HALTED;
+ }
+
++static void ohci_shutdown(struct usb_hcd *hcd)
++{
++ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
++ unsigned long flags;
++
++ spin_lock_irqsave(&ohci->lock, flags);
++ _ohci_shutdown(hcd);
++ spin_unlock_irqrestore(&ohci->lock, flags);
++}
++
+ /*-------------------------------------------------------------------------*
+ * HC functions
+ *-------------------------------------------------------------------------*/
+@@ -752,7 +761,7 @@ static void io_watchdog_func(struct timer_list *t)
+ died:
+ usb_hc_died(ohci_to_hcd(ohci));
+ ohci_dump(ohci);
+- ohci_shutdown(ohci_to_hcd(ohci));
++ _ohci_shutdown(ohci_to_hcd(ohci));
+ goto done;
+ } else {
+ /* No write back because the done queue was empty */
+diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
+index 8616c52849c6..2b0ccd150209 100644
+--- a/drivers/usb/host/xhci-rcar.c
++++ b/drivers/usb/host/xhci-rcar.c
+@@ -104,7 +104,7 @@ static int xhci_rcar_is_gen2(struct device *dev)
+ return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
+ of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
+ of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
+- of_device_is_compatible(node, "renensas,rcar-gen2-xhci");
++ of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
+ }
+
+ static int xhci_rcar_is_gen3(struct device *dev)
+diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
+index cc794e25a0b6..1d9ce9cbc831 100644
+--- a/drivers/usb/storage/realtek_cr.c
++++ b/drivers/usb/storage/realtek_cr.c
+@@ -38,7 +38,7 @@ MODULE_LICENSE("GPL");
+
+ static int auto_delink_en = 1;
+ module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
+-MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
++MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
+
+ #ifdef CONFIG_REALTEK_AUTOPM
+ static int ss_en = 1;
+@@ -996,12 +996,15 @@ static int init_realtek_cr(struct us_data *us)
+ goto INIT_FAIL;
+ }
+
+- if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
+- CHECK_FW_VER(chip, 0x5901))
+- SET_AUTO_DELINK(chip);
+- if (STATUS_LEN(chip) == 16) {
+- if (SUPPORT_AUTO_DELINK(chip))
++ if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
++ CHECK_PID(chip, 0x0159)) {
++ if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
++ CHECK_FW_VER(chip, 0x5901))
+ SET_AUTO_DELINK(chip);
++ if (STATUS_LEN(chip) == 16) {
++ if (SUPPORT_AUTO_DELINK(chip))
++ SET_AUTO_DELINK(chip);
++ }
+ }
+ #ifdef CONFIG_REALTEK_AUTOPM
+ if (ss_en)
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index ea0d27a94afe..1cd9b6305b06 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2100,7 +2100,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
+ US_FL_IGNORE_RESIDUE ),
+
+ /* Reported by Michael Büsch <m@bues.ch> */
+-UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
++UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0117,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index 15abe1d9958f..bcfdb55fd198 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -1446,7 +1446,7 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
+ else if ((pdo_min_voltage(pdo[i]) ==
+ pdo_min_voltage(pdo[i - 1])) &&
+ (pdo_max_voltage(pdo[i]) ==
+- pdo_min_voltage(pdo[i - 1])))
++ pdo_max_voltage(pdo[i - 1])))
+ return PDO_ERR_DUPE_PDO;
+ break;
+ /*
+diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
+index 560c1c54c177..f4937a91e516 100644
+--- a/drivers/watchdog/bcm2835_wdt.c
++++ b/drivers/watchdog/bcm2835_wdt.c
+@@ -240,6 +240,7 @@ module_param(nowayout, bool, 0);
+ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
++MODULE_ALIAS("platform:bcm2835-wdt");
+ MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+ MODULE_DESCRIPTION("Driver for Broadcom BCM2835 watchdog timer");
+ MODULE_LICENSE("GPL");
+diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
+index 3451be03667f..00033a481ba0 100644
+--- a/fs/afs/cmservice.c
++++ b/fs/afs/cmservice.c
+@@ -502,18 +502,14 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
+ struct afs_call *call = container_of(work, struct afs_call, work);
+ struct afs_uuid *r = call->request;
+
+- struct {
+- __be32 match;
+- } reply;
+-
+ _enter("");
+
+ if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
+- reply.match = htonl(0);
++ afs_send_empty_reply(call);
+ else
+- reply.match = htonl(1);
++ rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
++ 1, 1, "K-1");
+
+- afs_send_simple_reply(call, &reply, sizeof(reply));
+ afs_put_call(call);
+ _leave("");
+ }
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index da9563d62b32..9620f19308f5 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -441,7 +441,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
+ * iterate through the data blob that lists the contents of an AFS directory
+ */
+ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
+- struct key *key)
++ struct key *key, afs_dataversion_t *_dir_version)
+ {
+ struct afs_vnode *dvnode = AFS_FS_I(dir);
+ struct afs_xdr_dir_page *dbuf;
+@@ -461,6 +461,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
+ req = afs_read_dir(dvnode, key);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
++ *_dir_version = req->data_version;
+
+ /* round the file position up to the next entry boundary */
+ ctx->pos += sizeof(union afs_xdr_dirent) - 1;
+@@ -515,7 +516,10 @@ out:
+ */
+ static int afs_readdir(struct file *file, struct dir_context *ctx)
+ {
+- return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file));
++ afs_dataversion_t dir_version;
++
++ return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file),
++ &dir_version);
+ }
+
+ /*
+@@ -556,7 +560,8 @@ static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
+ * - just returns the FID the dentry name maps to if found
+ */
+ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
+- struct afs_fid *fid, struct key *key)
++ struct afs_fid *fid, struct key *key,
++ afs_dataversion_t *_dir_version)
+ {
+ struct afs_super_info *as = dir->i_sb->s_fs_info;
+ struct afs_lookup_one_cookie cookie = {
+@@ -569,7 +574,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
+ _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
+
+ /* search the directory */
+- ret = afs_dir_iterate(dir, &cookie.ctx, key);
++ ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version);
+ if (ret < 0) {
+ _leave(" = %d [iter]", ret);
+ return ret;
+@@ -643,6 +648,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
+ struct afs_server *server;
+ struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
+ struct inode *inode = NULL, *ti;
++ afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
+ int ret, i;
+
+ _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
+@@ -670,12 +676,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
+ cookie->fids[i].vid = as->volume->vid;
+
+ /* search the directory */
+- ret = afs_dir_iterate(dir, &cookie->ctx, key);
++ ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version);
+ if (ret < 0) {
+ inode = ERR_PTR(ret);
+ goto out;
+ }
+
++ dentry->d_fsdata = (void *)(unsigned long)data_version;
++
+ inode = ERR_PTR(-ENOENT);
+ if (!cookie->found)
+ goto out;
+@@ -969,7 +977,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ struct dentry *parent;
+ struct inode *inode;
+ struct key *key;
+- long dir_version, de_version;
++ afs_dataversion_t dir_version;
++ long de_version;
+ int ret;
+
+ if (flags & LOOKUP_RCU)
+@@ -1015,20 +1024,20 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ * on a 32-bit system, we only have 32 bits in the dentry to store the
+ * version.
+ */
+- dir_version = (long)dir->status.data_version;
++ dir_version = dir->status.data_version;
+ de_version = (long)dentry->d_fsdata;
+- if (de_version == dir_version)
+- goto out_valid;
++ if (de_version == (long)dir_version)
++ goto out_valid_noupdate;
+
+- dir_version = (long)dir->invalid_before;
+- if (de_version - dir_version >= 0)
++ dir_version = dir->invalid_before;
++ if (de_version - (long)dir_version >= 0)
+ goto out_valid;
+
+ _debug("dir modified");
+ afs_stat_v(dir, n_reval);
+
+ /* search the directory for this vnode */
+- ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key);
++ ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version);
+ switch (ret) {
+ case 0:
+ /* the filename maps to something */
+@@ -1081,7 +1090,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ }
+
+ out_valid:
+- dentry->d_fsdata = (void *)dir_version;
++ dentry->d_fsdata = (void *)(unsigned long)dir_version;
++out_valid_noupdate:
+ dput(parent);
+ key_put(key);
+ _leave(" = 1 [valid]");
+@@ -1186,6 +1196,20 @@ static void afs_prep_for_new_inode(struct afs_fs_cursor *fc,
+ iget_data->cb_s_break = fc->cbi->server->cb_s_break;
+ }
+
++/*
++ * Note that a dentry got changed. We need to set d_fsdata to the data version
++ * number derived from the result of the operation. It doesn't matter if
++ * d_fsdata goes backwards as we'll just revalidate.
++ */
++static void afs_update_dentry_version(struct afs_fs_cursor *fc,
++ struct dentry *dentry,
++ struct afs_status_cb *scb)
++{
++ if (fc->ac.error == 0)
++ dentry->d_fsdata =
++ (void *)(unsigned long)scb->status.data_version;
++}
++
+ /*
+ * create a directory on an AFS filesystem
+ */
+@@ -1228,6 +1252,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ afs_check_for_remote_deletion(&fc, dvnode);
+ afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
+ &data_version, &scb[0]);
++ afs_update_dentry_version(&fc, dentry, &scb[0]);
+ afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
+ ret = afs_end_vnode_operation(&fc);
+ if (ret < 0)
+@@ -1320,6 +1345,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
+
+ afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
+ &data_version, scb);
++ afs_update_dentry_version(&fc, dentry, scb);
+ ret = afs_end_vnode_operation(&fc);
+ if (ret == 0) {
+ afs_dir_remove_subdir(dentry);
+@@ -1461,6 +1487,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
+ &data_version, &scb[0]);
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
+ &data_version_2, &scb[1]);
++ afs_update_dentry_version(&fc, dentry, &scb[0]);
+ ret = afs_end_vnode_operation(&fc);
+ if (ret == 0 && !(scb[1].have_status || scb[1].have_error))
+ ret = afs_dir_remove_link(dvnode, dentry, key);
+@@ -1529,6 +1556,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
+ afs_check_for_remote_deletion(&fc, dvnode);
+ afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
+ &data_version, &scb[0]);
++ afs_update_dentry_version(&fc, dentry, &scb[0]);
+ afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
+ ret = afs_end_vnode_operation(&fc);
+ if (ret < 0)
+@@ -1610,6 +1638,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
+ afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
+ NULL, &scb[1]);
+ ihold(&vnode->vfs_inode);
++ afs_update_dentry_version(&fc, dentry, &scb[0]);
+ d_instantiate(dentry, &vnode->vfs_inode);
+
+ mutex_unlock(&vnode->io_lock);
+@@ -1689,6 +1718,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
+ afs_check_for_remote_deletion(&fc, dvnode);
+ afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
+ &data_version, &scb[0]);
++ afs_update_dentry_version(&fc, dentry, &scb[0]);
+ afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
+ ret = afs_end_vnode_operation(&fc);
+ if (ret < 0)
+@@ -1794,6 +1824,17 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ }
+ }
+
++ /* This bit is potentially nasty as there's a potential race with
++ * afs_d_revalidate{,_rcu}(). We have to change d_fsdata on the dentry
++ * to reflect it's new parent's new data_version after the op, but
++ * d_revalidate may see old_dentry between the op having taken place
++ * and the version being updated.
++ *
++ * So drop the old_dentry for now to make other threads go through
++ * lookup instead - which we hold a lock against.
++ */
++ d_drop(old_dentry);
++
+ ret = -ERESTARTSYS;
+ if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) {
+ afs_dataversion_t orig_data_version;
+@@ -1805,9 +1846,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ if (orig_dvnode != new_dvnode) {
+ if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
+ afs_end_vnode_operation(&fc);
+- goto error_rehash;
++ goto error_rehash_old;
+ }
+- new_data_version = new_dvnode->status.data_version;
++ new_data_version = new_dvnode->status.data_version + 1;
+ } else {
+ new_data_version = orig_data_version;
+ new_scb = &scb[0];
+@@ -1830,7 +1871,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ }
+ ret = afs_end_vnode_operation(&fc);
+ if (ret < 0)
+- goto error_rehash;
++ goto error_rehash_old;
+ }
+
+ if (ret == 0) {
+@@ -1856,10 +1897,26 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ drop_nlink(new_inode);
+ spin_unlock(&new_inode->i_lock);
+ }
++
++ /* Now we can update d_fsdata on the dentries to reflect their
++ * new parent's data_version.
++ *
++ * Note that if we ever implement RENAME_EXCHANGE, we'll have
++ * to update both dentries with opposing dir versions.
++ */
++ if (new_dvnode != orig_dvnode) {
++ afs_update_dentry_version(&fc, old_dentry, &scb[1]);
++ afs_update_dentry_version(&fc, new_dentry, &scb[1]);
++ } else {
++ afs_update_dentry_version(&fc, old_dentry, &scb[0]);
++ afs_update_dentry_version(&fc, new_dentry, &scb[0]);
++ }
+ d_move(old_dentry, new_dentry);
+ goto error_tmp;
+ }
+
++error_rehash_old:
++ d_rehash(new_dentry);
+ error_rehash:
+ if (rehash)
+ d_rehash(rehash);
+diff --git a/fs/afs/file.c b/fs/afs/file.c
+index 8fd7d3b9a1b1..87beabc7114e 100644
+--- a/fs/afs/file.c
++++ b/fs/afs/file.c
+@@ -191,11 +191,13 @@ void afs_put_read(struct afs_read *req)
+ int i;
+
+ if (refcount_dec_and_test(&req->usage)) {
+- for (i = 0; i < req->nr_pages; i++)
+- if (req->pages[i])
+- put_page(req->pages[i]);
+- if (req->pages != req->array)
+- kfree(req->pages);
++ if (req->pages) {
++ for (i = 0; i < req->nr_pages; i++)
++ if (req->pages[i])
++ put_page(req->pages[i]);
++ if (req->pages != req->array)
++ kfree(req->pages);
++ }
+ kfree(req);
+ }
+ }
+diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
+index d7e0fd3c00df..cfb0ac4bd039 100644
+--- a/fs/afs/vlclient.c
++++ b/fs/afs/vlclient.c
+@@ -56,23 +56,24 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
+ struct afs_uuid__xdr *xdr;
+ struct afs_uuid *uuid;
+ int j;
++ int n = entry->nr_servers;
+
+ tmp = ntohl(uvldb->serverFlags[i]);
+ if (tmp & AFS_VLSF_DONTUSE ||
+ (new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
+ continue;
+ if (tmp & AFS_VLSF_RWVOL) {
+- entry->fs_mask[i] |= AFS_VOL_VTM_RW;
++ entry->fs_mask[n] |= AFS_VOL_VTM_RW;
+ if (vlflags & AFS_VLF_BACKEXISTS)
+- entry->fs_mask[i] |= AFS_VOL_VTM_BAK;
++ entry->fs_mask[n] |= AFS_VOL_VTM_BAK;
+ }
+ if (tmp & AFS_VLSF_ROVOL)
+- entry->fs_mask[i] |= AFS_VOL_VTM_RO;
+- if (!entry->fs_mask[i])
++ entry->fs_mask[n] |= AFS_VOL_VTM_RO;
++ if (!entry->fs_mask[n])
+ continue;
+
+ xdr = &uvldb->serverNumber[i];
+- uuid = (struct afs_uuid *)&entry->fs_server[i];
++ uuid = (struct afs_uuid *)&entry->fs_server[n];
+ uuid->time_low = xdr->time_low;
+ uuid->time_mid = htons(ntohl(xdr->time_mid));
+ uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version));
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 5faf057f6f37..b8f472087902 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -11226,6 +11226,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
+ struct btrfs_device *device;
+ struct list_head *devices;
+ u64 group_trimmed;
++ u64 range_end = U64_MAX;
+ u64 start;
+ u64 end;
+ u64 trimmed = 0;
+@@ -11235,16 +11236,23 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
+ int dev_ret = 0;
+ int ret = 0;
+
++ /*
++ * Check range overflow if range->len is set.
++ * The default range->len is U64_MAX.
++ */
++ if (range->len != U64_MAX &&
++ check_add_overflow(range->start, range->len, &range_end))
++ return -EINVAL;
++
+ cache = btrfs_lookup_first_block_group(fs_info, range->start);
+ for (; cache; cache = next_block_group(cache)) {
+- if (cache->key.objectid >= (range->start + range->len)) {
++ if (cache->key.objectid >= range_end) {
+ btrfs_put_block_group(cache);
+ break;
+ }
+
+ start = max(range->start, cache->key.objectid);
+- end = min(range->start + range->len,
+- cache->key.objectid + cache->key.offset);
++ end = min(range_end, cache->key.objectid + cache->key.offset);
+
+ if (end - start >= range->minlen) {
+ if (!block_group_cache_done(cache)) {
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 0cb442406168..222d7115db71 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -401,15 +401,21 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
+ unsigned long bytes = 0;
+ struct nfs_direct_req *dreq = hdr->dreq;
+
+- if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+- goto out_put;
+-
+ spin_lock(&dreq->lock);
+- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
++ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
+ dreq->error = hdr->error;
+- else
++
++ if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
++ spin_unlock(&dreq->lock);
++ goto out_put;
++ }
++
++ if (hdr->good_bytes != 0)
+ nfs_direct_good_bytes(dreq, hdr);
+
++ if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
++ dreq->error = 0;
++
+ spin_unlock(&dreq->lock);
+
+ while (!list_empty(&hdr->pages)) {
+@@ -782,16 +788,19 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+ bool request_commit = false;
+ struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+
+- if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+- goto out_put;
+-
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
+
+ spin_lock(&dreq->lock);
+
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
+ dreq->error = hdr->error;
+- if (dreq->error == 0) {
++
++ if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
++ spin_unlock(&dreq->lock);
++ goto out_put;
++ }
++
++ if (hdr->good_bytes != 0) {
+ nfs_direct_good_bytes(dreq, hdr);
+ if (nfs_write_need_commit(hdr)) {
+ if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index bcff3bf5ae09..c67cdbb36ce7 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1128,8 +1128,6 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
+ break;
+ case -NFS4ERR_RETRY_UNCACHED_REP:
+ break;
+- case -EAGAIN:
+- return -NFS4ERR_RESET_TO_PNFS;
+ /* Invalidate Layout errors */
+ case -NFS4ERR_PNFS_NO_LAYOUT:
+ case -ESTALE: /* mapped NFS4ERR_STALE */
+@@ -1190,7 +1188,6 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
+ case -EBADHANDLE:
+ case -ELOOP:
+ case -ENOSPC:
+- case -EAGAIN:
+ break;
+ case -EJUKEBOX:
+ nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
+@@ -1425,16 +1422,6 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
+ ff_layout_read_prepare_common(task, hdr);
+ }
+
+-static void
+-ff_layout_io_prepare_transmit(struct rpc_task *task,
+- void *data)
+-{
+- struct nfs_pgio_header *hdr = data;
+-
+- if (!pnfs_is_valid_lseg(hdr->lseg))
+- rpc_exit(task, -EAGAIN);
+-}
+-
+ static void ff_layout_read_call_done(struct rpc_task *task, void *data)
+ {
+ struct nfs_pgio_header *hdr = data;
+@@ -1720,7 +1707,6 @@ static void ff_layout_commit_release(void *data)
+
+ static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
+ .rpc_call_prepare = ff_layout_read_prepare_v3,
+- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
+ .rpc_call_done = ff_layout_read_call_done,
+ .rpc_count_stats = ff_layout_read_count_stats,
+ .rpc_release = ff_layout_read_release,
+@@ -1728,7 +1714,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
+
+ static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
+ .rpc_call_prepare = ff_layout_read_prepare_v4,
+- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
+ .rpc_call_done = ff_layout_read_call_done,
+ .rpc_count_stats = ff_layout_read_count_stats,
+ .rpc_release = ff_layout_read_release,
+@@ -1736,7 +1721,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
+
+ static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
+ .rpc_call_prepare = ff_layout_write_prepare_v3,
+- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
+ .rpc_call_done = ff_layout_write_call_done,
+ .rpc_count_stats = ff_layout_write_count_stats,
+ .rpc_release = ff_layout_write_release,
+@@ -1744,7 +1728,6 @@ static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
+
+ static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
+ .rpc_call_prepare = ff_layout_write_prepare_v4,
+- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
+ .rpc_call_done = ff_layout_write_call_done,
+ .rpc_count_stats = ff_layout_write_count_stats,
+ .rpc_release = ff_layout_write_release,
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 6ef5278326b6..8b6211753228 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -1253,20 +1253,23 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
+ int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
+ {
+- LIST_HEAD(failed);
++ LIST_HEAD(pages);
+
+ desc->pg_io_completion = hdr->io_completion;
+ desc->pg_dreq = hdr->dreq;
+- while (!list_empty(&hdr->pages)) {
+- struct nfs_page *req = nfs_list_entry(hdr->pages.next);
++ list_splice_init(&hdr->pages, &pages);
++ while (!list_empty(&pages)) {
++ struct nfs_page *req = nfs_list_entry(pages.next);
+
+ if (!nfs_pageio_add_request(desc, req))
+- nfs_list_move_request(req, &failed);
++ break;
+ }
+ nfs_pageio_complete(desc);
+- if (!list_empty(&failed)) {
+- list_move(&failed, &hdr->pages);
+- return desc->pg_error < 0 ? desc->pg_error : -EIO;
++ if (!list_empty(&pages)) {
++ int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
++ hdr->completion_ops->error_cleanup(&pages, err);
++ nfs_set_pgio_error(hdr, err, hdr->io_start);
++ return err;
+ }
+ return 0;
+ }
+diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
+index cbd9d8495690..88e1e6304a71 100644
+--- a/include/linux/logic_pio.h
++++ b/include/linux/logic_pio.h
+@@ -117,6 +117,7 @@ struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
+ unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
+ resource_size_t hw_addr, resource_size_t size);
+ int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
++void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
+ resource_size_t logic_pio_to_hwaddr(unsigned long pio);
+ unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
+
+diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
+index d0e451868f02..c72dfd518782 100644
+--- a/include/linux/sunrpc/sched.h
++++ b/include/linux/sunrpc/sched.h
+@@ -98,7 +98,6 @@ typedef void (*rpc_action)(struct rpc_task *);
+
+ struct rpc_call_ops {
+ void (*rpc_call_prepare)(struct rpc_task *, void *);
+- void (*rpc_call_prepare_transmit)(struct rpc_task *, void *);
+ void (*rpc_call_done)(struct rpc_task *, void *);
+ void (*rpc_count_stats)(struct rpc_task *, void *);
+ void (*rpc_release)(void *);
+diff --git a/include/net/addrconf.h b/include/net/addrconf.h
+index becdad576859..3f62b347b04a 100644
+--- a/include/net/addrconf.h
++++ b/include/net/addrconf.h
+@@ -206,7 +206,7 @@ static inline int ipv6_mc_may_pull(struct sk_buff *skb,
+ unsigned int len)
+ {
+ if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
+- return -EINVAL;
++ return 0;
+
+ return pskb_may_pull(skb, len);
+ }
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 42d17f730780..d2146277071f 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -1686,20 +1686,26 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
+ if (err)
+ goto free_used_maps;
+
+- err = bpf_prog_new_fd(prog);
+- if (err < 0) {
+- /* failed to allocate fd.
+- * bpf_prog_put() is needed because the above
+- * bpf_prog_alloc_id() has published the prog
+- * to the userspace and the userspace may
+- * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
+- */
+- bpf_prog_put(prog);
+- return err;
+- }
+-
++ /* Upon success of bpf_prog_alloc_id(), the BPF prog is
++ * effectively publicly exposed. However, retrieving via
++ * bpf_prog_get_fd_by_id() will take another reference,
++ * therefore it cannot be gone underneath us.
++ *
++ * Only for the time /after/ successful bpf_prog_new_fd()
++ * and before returning to userspace, we might just hold
++ * one reference and any parallel close on that fd could
++ * rip everything out. Hence, below notifications must
++ * happen before bpf_prog_new_fd().
++ *
++ * Also, any failure handling from this point onwards must
++ * be using bpf_prog_put() given the program is exposed.
++ */
+ bpf_prog_kallsyms_add(prog);
+ perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
++
++ err = bpf_prog_new_fd(prog);
++ if (err < 0)
++ bpf_prog_put(prog);
+ return err;
+
+ free_used_maps:
+diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
+index 2c2772e9702a..9912be7a970d 100644
+--- a/kernel/dma/direct.c
++++ b/kernel/dma/direct.c
+@@ -55,9 +55,6 @@ u64 dma_direct_get_required_mask(struct device *dev)
+ {
+ u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
+
+- if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
+- max_dma = dev->bus_dma_mask;
+-
+ return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
+ }
+
+diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
+index 0b1f77957240..385ebcfc31a6 100644
+--- a/kernel/locking/rwsem-xadd.c
++++ b/kernel/locking/rwsem-xadd.c
+@@ -454,6 +454,8 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
+ * been set in the count.
+ */
+ if (atomic_long_read(&sem->count) >= 0) {
++ /* Provide lock ACQUIRE */
++ smp_acquire__after_ctrl_dep();
+ raw_spin_unlock_irq(&sem->wait_lock);
+ rwsem_set_reader_owned(sem);
+ lockevent_inc(rwsem_rlock_fast);
+@@ -483,8 +485,10 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
+ /* wait to be given the lock */
+ while (true) {
+ set_current_state(state);
+- if (!waiter.task)
++ if (!smp_load_acquire(&waiter.task)) {
++ /* Orders against rwsem_mark_wake()'s smp_store_release() */
+ break;
++ }
+ if (signal_pending_state(state, current)) {
+ raw_spin_lock_irq(&sem->wait_lock);
+ if (waiter.task)
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 208220d526e8..2373311b4a43 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3095,6 +3095,14 @@ t_probe_next(struct seq_file *m, loff_t *pos)
+ hnd = &iter->probe_entry->hlist;
+
+ hash = iter->probe->ops.func_hash->filter_hash;
++
++ /*
++ * A probe being registered may temporarily have an empty hash
++ * and it's at the end of the func_probes list.
++ */
++ if (!hash || hash == EMPTY_HASH)
++ return NULL;
++
+ size = 1 << hash->size_bits;
+
+ retry:
+@@ -4320,12 +4328,21 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
+
+ mutex_unlock(&ftrace_lock);
+
++ /*
++ * Note, there's a small window here that the func_hash->filter_hash
++ * may be NULL or empty. Need to be carefule when reading the loop.
++ */
+ mutex_lock(&probe->ops.func_hash->regex_lock);
+
+ orig_hash = &probe->ops.func_hash->filter_hash;
+ old_hash = *orig_hash;
+ hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
+
++ if (!hash) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
+ ret = ftrace_match_records(hash, glob, strlen(glob));
+
+ /* Nothing found? */
+diff --git a/lib/logic_pio.c b/lib/logic_pio.c
+index feea48fd1a0d..905027574e5d 100644
+--- a/lib/logic_pio.c
++++ b/lib/logic_pio.c
+@@ -35,7 +35,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
+ struct logic_pio_hwaddr *range;
+ resource_size_t start;
+ resource_size_t end;
+- resource_size_t mmio_sz = 0;
++ resource_size_t mmio_end = 0;
+ resource_size_t iio_sz = MMIO_UPPER_LIMIT;
+ int ret = 0;
+
+@@ -46,7 +46,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
+ end = new_range->hw_start + new_range->size;
+
+ mutex_lock(&io_range_mutex);
+- list_for_each_entry_rcu(range, &io_range_list, list) {
++ list_for_each_entry(range, &io_range_list, list) {
+ if (range->fwnode == new_range->fwnode) {
+ /* range already there */
+ goto end_register;
+@@ -56,7 +56,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
+ /* for MMIO ranges we need to check for overlap */
+ if (start >= range->hw_start + range->size ||
+ end < range->hw_start) {
+- mmio_sz += range->size;
++ mmio_end = range->io_start + range->size;
+ } else {
+ ret = -EFAULT;
+ goto end_register;
+@@ -69,16 +69,16 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
+
+ /* range not registered yet, check for available space */
+ if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
+- if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) {
++ if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
+ /* if it's too big check if 64K space can be reserved */
+- if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
++ if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
+ ret = -E2BIG;
+ goto end_register;
+ }
+ new_range->size = SZ_64K;
+ pr_warn("Requested IO range too big, new size set to 64K\n");
+ }
+- new_range->io_start = mmio_sz;
++ new_range->io_start = mmio_end;
+ } else if (new_range->flags == LOGIC_PIO_INDIRECT) {
+ if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
+ ret = -E2BIG;
+@@ -98,6 +98,20 @@ end_register:
+ return ret;
+ }
+
++/**
++ * logic_pio_unregister_range - unregister a logical PIO range for a host
++ * @range: pointer to the IO range which has been already registered.
++ *
++ * Unregister a previously-registered IO range node.
++ */
++void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
++{
++ mutex_lock(&io_range_mutex);
++ list_del_rcu(&range->list);
++ mutex_unlock(&io_range_mutex);
++ synchronize_rcu();
++}
++
+ /**
+ * find_io_range_by_fwnode - find logical PIO range for given FW node
+ * @fwnode: FW node handle associated with logical PIO range
+@@ -108,26 +122,38 @@ end_register:
+ */
+ struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
+ {
+- struct logic_pio_hwaddr *range;
++ struct logic_pio_hwaddr *range, *found_range = NULL;
+
++ rcu_read_lock();
+ list_for_each_entry_rcu(range, &io_range_list, list) {
+- if (range->fwnode == fwnode)
+- return range;
++ if (range->fwnode == fwnode) {
++ found_range = range;
++ break;
++ }
+ }
+- return NULL;
++ rcu_read_unlock();
++
++ return found_range;
+ }
+
+ /* Return a registered range given an input PIO token */
+ static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
+ {
+- struct logic_pio_hwaddr *range;
++ struct logic_pio_hwaddr *range, *found_range = NULL;
+
++ rcu_read_lock();
+ list_for_each_entry_rcu(range, &io_range_list, list) {
+- if (in_range(pio, range->io_start, range->size))
+- return range;
++ if (in_range(pio, range->io_start, range->size)) {
++ found_range = range;
++ break;
++ }
+ }
+- pr_err("PIO entry token %lx invalid\n", pio);
+- return NULL;
++ rcu_read_unlock();
++
++ if (!found_range)
++ pr_err("PIO entry token 0x%lx invalid\n", pio);
++
++ return found_range;
+ }
+
+ /**
+@@ -180,14 +206,23 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
+ {
+ struct logic_pio_hwaddr *range;
+
++ rcu_read_lock();
+ list_for_each_entry_rcu(range, &io_range_list, list) {
+ if (range->flags != LOGIC_PIO_CPU_MMIO)
+ continue;
+- if (in_range(addr, range->hw_start, range->size))
+- return addr - range->hw_start + range->io_start;
++ if (in_range(addr, range->hw_start, range->size)) {
++ unsigned long cpuaddr;
++
++ cpuaddr = addr - range->hw_start + range->io_start;
++
++ rcu_read_unlock();
++ return cpuaddr;
++ }
+ }
+- pr_err("addr %llx not registered in io_range_list\n",
+- (unsigned long long) addr);
++ rcu_read_unlock();
++
++ pr_err("addr %pa not registered in io_range_list\n", &addr);
++
+ return ~0UL;
+ }
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index bb783c27ba21..30ebecf67527 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -748,15 +748,13 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+ /* Update memcg */
+ __mod_memcg_state(memcg, idx, val);
+
++ /* Update lruvec */
++ __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
++
+ x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
+ if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
+ struct mem_cgroup_per_node *pi;
+
+- /*
+- * Batch local counters to keep them in sync with
+- * the hierarchical ones.
+- */
+- __this_cpu_add(pn->lruvec_stat_local->count[idx], x);
+ for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
+ atomic_long_add(x, &pi->lruvec_stat[idx]);
+ x = 0;
+@@ -3161,7 +3159,7 @@ static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
+
+ for_each_online_cpu(cpu)
+ for (i = 0; i < MEMCG_NR_STAT; i++)
+- stat[i] += raw_cpu_read(memcg->vmstats_percpu->stat[i]);
++ stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
+
+ for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
+ for (i = 0; i < MEMCG_NR_STAT; i++)
+@@ -3176,8 +3174,8 @@ static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg)
+
+ for_each_online_cpu(cpu)
+ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+- stat[i] += raw_cpu_read(
+- pn->lruvec_stat_cpu->count[i]);
++ stat[i] += per_cpu(
++ pn->lruvec_stat_cpu->count[i], cpu);
+
+ for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
+ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+@@ -3196,8 +3194,8 @@ static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
+
+ for_each_online_cpu(cpu)
+ for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
+- events[i] += raw_cpu_read(
+- memcg->vmstats_percpu->events[i]);
++ events[i] += per_cpu(memcg->vmstats_percpu->events[i],
++ cpu);
+
+ for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
+ for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index 515b00801af2..7d62ef2daf83 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -2432,7 +2432,9 @@ struct zs_pool *zs_create_pool(const char *name)
+ if (!pool->name)
+ goto err;
+
++#ifdef CONFIG_COMPACTION
+ init_waitqueue_head(&pool->migration_wait);
++#endif
+
+ if (create_cache(pool))
+ goto err;
+diff --git a/net/core/stream.c b/net/core/stream.c
+index e94bb02a5629..4f1d4aa5fb38 100644
+--- a/net/core/stream.c
++++ b/net/core/stream.c
+@@ -120,7 +120,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
+ int err = 0;
+ long vm_wait = 0;
+ long current_timeo = *timeo_p;
+- bool noblock = (*timeo_p ? false : true);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+ if (sk_stream_memory_free(sk))
+@@ -133,11 +132,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
+
+ if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
+ goto do_error;
+- if (!*timeo_p) {
+- if (noblock)
+- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+- goto do_nonblock;
+- }
++ if (!*timeo_p)
++ goto do_eagain;
+ if (signal_pending(current))
+ goto do_interrupted;
+ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+@@ -169,7 +165,13 @@ out:
+ do_error:
+ err = -EPIPE;
+ goto out;
+-do_nonblock:
++do_eagain:
++ /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
++ * be generated later.
++ * When TCP receives ACK packets that make room, tcp_check_space()
++ * only calls tcp_new_space() if SOCK_NOSPACE is set.
++ */
++ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ err = -EAGAIN;
+ goto out;
+ do_interrupted:
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 15c72065df79..08c02dbb3d69 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -229,7 +229,6 @@ static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+ skb->dev = master->dev;
+ hsr_forward_skb(skb, master);
+-
+ return NETDEV_TX_OK;
+ }
+
+@@ -344,8 +343,9 @@ static void hsr_announce(struct timer_list *t)
+ rcu_read_unlock();
+ }
+
+-/* According to comments in the declaration of struct net_device, this function
+- * is "Called from unregister, can be used to call free_netdev". Ok then...
++/* This has to be called after all the readers are gone.
++ * Otherwise we would have to check the return value of
++ * hsr_port_get_hsr().
+ */
+ static void hsr_dev_destroy(struct net_device *hsr_dev)
+ {
+@@ -356,15 +356,14 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
+
+ hsr_debugfs_term(hsr);
+
+- rtnl_lock();
+ hsr_for_each_port(hsr, port)
+ hsr_del_port(port);
+- rtnl_unlock();
+
+ del_timer_sync(&hsr->prune_timer);
+ del_timer_sync(&hsr->announce_timer);
+
+- synchronize_rcu();
++ hsr_del_self_node(&hsr->self_node_db);
++ hsr_del_nodes(&hsr->node_db);
+ }
+
+ static const struct net_device_ops hsr_device_ops = {
+@@ -373,6 +372,7 @@ static const struct net_device_ops hsr_device_ops = {
+ .ndo_stop = hsr_dev_close,
+ .ndo_start_xmit = hsr_dev_xmit,
+ .ndo_fix_features = hsr_fix_features,
++ .ndo_uninit = hsr_dev_destroy,
+ };
+
+ static struct device_type hsr_type = {
+@@ -391,7 +391,6 @@ void hsr_dev_setup(struct net_device *dev)
+ dev->priv_flags |= IFF_NO_QUEUE;
+
+ dev->needs_free_netdev = true;
+- dev->priv_destructor = hsr_dev_destroy;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
+ NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
+@@ -495,7 +494,7 @@ fail:
+ hsr_for_each_port(hsr, port)
+ hsr_del_port(port);
+ err_add_port:
+- hsr_del_node(&hsr->self_node_db);
++ hsr_del_self_node(&hsr->self_node_db);
+
+ return res;
+ }
+diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
+index 2d7a19750436..292be446007b 100644
+--- a/net/hsr/hsr_framereg.c
++++ b/net/hsr/hsr_framereg.c
+@@ -104,7 +104,7 @@ int hsr_create_self_node(struct list_head *self_node_db,
+ return 0;
+ }
+
+-void hsr_del_node(struct list_head *self_node_db)
++void hsr_del_self_node(struct list_head *self_node_db)
+ {
+ struct hsr_node *node;
+
+@@ -117,6 +117,15 @@ void hsr_del_node(struct list_head *self_node_db)
+ }
+ }
+
++void hsr_del_nodes(struct list_head *node_db)
++{
++ struct hsr_node *node;
++ struct hsr_node *tmp;
++
++ list_for_each_entry_safe(node, tmp, node_db, mac_list)
++ kfree(node);
++}
++
+ /* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
+ * seq_out is used to initialize filtering of outgoing duplicate frames
+ * originating from the newly added node.
+diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
+index a3bdcdab469d..89a3ce38151d 100644
+--- a/net/hsr/hsr_framereg.h
++++ b/net/hsr/hsr_framereg.h
+@@ -12,7 +12,8 @@
+
+ struct hsr_node;
+
+-void hsr_del_node(struct list_head *self_node_db);
++void hsr_del_self_node(struct list_head *self_node_db);
++void hsr_del_nodes(struct list_head *node_db);
+ struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
+ u16 seq_out);
+ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index 7c857c72aad1..92b3d2d1139e 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -582,7 +582,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+
+ if (!rt)
+ goto out;
+- net = dev_net(rt->dst.dev);
++
++ if (rt->dst.dev)
++ net = dev_net(rt->dst.dev);
++ else if (skb_in->dev)
++ net = dev_net(skb_in->dev);
++ else
++ goto out;
+
+ /*
+ * Find the original header. It is expected to be valid, of course.
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 85107bf812f2..b5b0834ec5ee 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1474,7 +1474,7 @@ EXPORT_SYMBOL(__ip_mc_inc_group);
+
+ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
+ {
+- __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE);
++ __ip_mc_inc_group(in_dev, addr, GFP_KERNEL);
+ }
+ EXPORT_SYMBOL(ip_mc_inc_group);
+
+@@ -2196,7 +2196,7 @@ static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
+ iml->sflist = NULL;
+ iml->sfmode = mode;
+ rcu_assign_pointer(inet->mc_list, iml);
+- __ip_mc_inc_group(in_dev, addr, mode);
++ ____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL);
+ err = 0;
+ done:
+ return err;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 081bb517e40d..2454fce6fbfa 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1045,7 +1045,8 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
+ int err = 0;
+
+ if (addr_type == IPV6_ADDR_ANY ||
+- addr_type & IPV6_ADDR_MULTICAST ||
++ (addr_type & IPV6_ADDR_MULTICAST &&
++ !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
+ (!(idev->dev->flags & IFF_LOOPBACK) &&
+ !netif_is_l3_master(idev->dev) &&
+ addr_type & IPV6_ADDR_LOOPBACK))
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index b8288125e05d..1c55d3b7bc15 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -1543,6 +1543,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
+ if (is_multicast_ether_addr(mac))
+ return -EINVAL;
+
++ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
++ sdata->vif.type == NL80211_IFTYPE_STATION &&
++ !sdata->u.mgd.associated)
++ return -EINVAL;
++
+ sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
+ if (!sta)
+ return -ENOMEM;
+@@ -1550,10 +1555,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
+ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
+ sta->sta.tdls = true;
+
+- if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
+- !sdata->u.mgd.associated)
+- return -EINVAL;
+-
+ err = sta_apply_parameters(local, sta, params);
+ if (err) {
+ sta_info_free(local, sta);
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 3c1ab870fefe..768d14c9a716 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2447,11 +2447,13 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
+ skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) &&
+ sdata->control_port_over_nl80211)) {
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+- bool noencrypt = status->flag & RX_FLAG_DECRYPTED;
++ bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
+
+ cfg80211_rx_control_port(dev, skb, noencrypt);
+ dev_kfree_skb(skb);
+ } else {
++ memset(skb->cb, 0, sizeof(skb->cb));
++
+ /* deliver to local stack */
+ if (rx->napi)
+ napi_gro_receive(rx->napi, skb);
+@@ -2546,8 +2548,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
+
+ if (skb) {
+ skb->protocol = eth_type_trans(skb, dev);
+- memset(skb->cb, 0, sizeof(skb->cb));
+-
+ ieee80211_deliver_skb_to_local_stack(skb, rx);
+ }
+
+diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
+index d25e91d7bdc1..44b675016393 100644
+--- a/net/mpls/mpls_iptunnel.c
++++ b/net/mpls/mpls_iptunnel.c
+@@ -133,12 +133,12 @@ static int mpls_xmit(struct sk_buff *skb)
+ mpls_stats_inc_outucastpkts(out_dev, skb);
+
+ if (rt) {
+- if (rt->rt_gw_family == AF_INET)
+- err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
+- skb);
+- else if (rt->rt_gw_family == AF_INET6)
++ if (rt->rt_gw_family == AF_INET6)
+ err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6,
+ skb);
++ else
++ err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
++ skb);
+ } else if (rt6) {
+ if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
+ /* 6PE (RFC 4798) */
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index 848c6eb55064..4d7896135e73 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -67,6 +67,7 @@ struct ovs_conntrack_info {
+ struct md_mark mark;
+ struct md_labels labels;
+ char timeout[CTNL_TIMEOUT_NAME_MAX];
++ struct nf_ct_timeout *nf_ct_timeout;
+ #if IS_ENABLED(CONFIG_NF_NAT)
+ struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
+ #endif
+@@ -697,6 +698,14 @@ static bool skb_nfct_cached(struct net *net,
+ if (help && rcu_access_pointer(help->helper) != info->helper)
+ return false;
+ }
++ if (info->nf_ct_timeout) {
++ struct nf_conn_timeout *timeout_ext;
++
++ timeout_ext = nf_ct_timeout_find(ct);
++ if (!timeout_ext || info->nf_ct_timeout !=
++ rcu_dereference(timeout_ext->timeout))
++ return false;
++ }
+ /* Force conntrack entry direction to the current packet? */
+ if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
+ /* Delete the conntrack entry if confirmed, else just release
+@@ -1657,6 +1666,10 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
+ ct_info.timeout))
+ pr_info_ratelimited("Failed to associated timeout "
+ "policy `%s'\n", ct_info.timeout);
++ else
++ ct_info.nf_ct_timeout = rcu_dereference(
++ nf_ct_timeout_find(ct_info.ct)->timeout);
++
+ }
+
+ if (helper) {
+diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
+index f0de323d15d6..6c8f09c1ce51 100644
+--- a/net/smc/smc_tx.c
++++ b/net/smc/smc_tx.c
+@@ -76,13 +76,11 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct smc_connection *conn = &smc->conn;
+ struct sock *sk = &smc->sk;
+- bool noblock;
+ long timeo;
+ int rc = 0;
+
+ /* similar to sk_stream_wait_memory */
+ timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+- noblock = timeo ? false : true;
+ add_wait_queue(sk_sleep(sk), &wait);
+ while (1) {
+ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+@@ -97,8 +95,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
+ break;
+ }
+ if (!timeo) {
+- if (noblock)
+- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
++ /* ensure EPOLLOUT is subsequently generated */
++ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ rc = -EAGAIN;
+ break;
+ }
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 9e1743b364ec..a680d28c231e 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1893,6 +1893,7 @@ call_bind(struct rpc_task *task)
+ static void
+ call_bind_status(struct rpc_task *task)
+ {
++ struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
+ int status = -EIO;
+
+ if (rpc_task_transmitted(task)) {
+@@ -1900,14 +1901,15 @@ call_bind_status(struct rpc_task *task)
+ return;
+ }
+
+- if (task->tk_status >= 0) {
+- dprint_status(task);
++ dprint_status(task);
++ trace_rpc_bind_status(task);
++ if (task->tk_status >= 0)
++ goto out_next;
++ if (xprt_bound(xprt)) {
+ task->tk_status = 0;
+- task->tk_action = call_connect;
+- return;
++ goto out_next;
+ }
+
+- trace_rpc_bind_status(task);
+ switch (task->tk_status) {
+ case -ENOMEM:
+ dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
+@@ -1966,7 +1968,9 @@ call_bind_status(struct rpc_task *task)
+
+ rpc_call_rpcerror(task, status);
+ return;
+-
++out_next:
++ task->tk_action = call_connect;
++ return;
+ retry_timeout:
+ task->tk_status = 0;
+ task->tk_action = call_bind;
+@@ -2013,6 +2017,7 @@ call_connect(struct rpc_task *task)
+ static void
+ call_connect_status(struct rpc_task *task)
+ {
++ struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
+ struct rpc_clnt *clnt = task->tk_client;
+ int status = task->tk_status;
+
+@@ -2022,8 +2027,17 @@ call_connect_status(struct rpc_task *task)
+ }
+
+ dprint_status(task);
+-
+ trace_rpc_connect_status(task);
++
++ if (task->tk_status == 0) {
++ clnt->cl_stats->netreconn++;
++ goto out_next;
++ }
++ if (xprt_connected(xprt)) {
++ task->tk_status = 0;
++ goto out_next;
++ }
++
+ task->tk_status = 0;
+ switch (status) {
+ case -ECONNREFUSED:
+@@ -2054,13 +2068,12 @@ call_connect_status(struct rpc_task *task)
+ case -EAGAIN:
+ case -ETIMEDOUT:
+ goto out_retry;
+- case 0:
+- clnt->cl_stats->netreconn++;
+- task->tk_action = call_transmit;
+- return;
+ }
+ rpc_call_rpcerror(task, status);
+ return;
++out_next:
++ task->tk_action = call_transmit;
++ return;
+ out_retry:
+ /* Check for timeouts before looping back to call_bind */
+ task->tk_action = call_bind;
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 5ddd34ad64b9..f7a995bd2a6c 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1380,13 +1380,6 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
+ status = -EBADMSG;
+ goto out_dequeue;
+ }
+- if (task->tk_ops->rpc_call_prepare_transmit) {
+- task->tk_ops->rpc_call_prepare_transmit(task,
+- task->tk_calldata);
+- status = task->tk_status;
+- if (status < 0)
+- goto out_dequeue;
+- }
+ if (RPC_SIGNALLED(task)) {
+ status = -ERESTARTSYS;
+ goto out_dequeue;
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 4831ad745f91..327479ce69f5 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -2788,7 +2788,7 @@ static void reg_process_pending_hints(void)
+
+ /* When last_request->processed becomes true this will be rescheduled */
+ if (lr && !lr->processed) {
+- reg_process_hint(lr);
++ pr_debug("Pending regulatory request, waiting for it to be processed...\n");
+ return;
+ }
+
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index d0e35b7b9e35..e74837824cea 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -233,25 +233,30 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
+
+ switch (params->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
++ /* Extended Key ID can only be used with CCMP/GCMP ciphers */
++ if ((pairwise && key_idx) ||
++ params->mode != NL80211_KEY_RX_TX)
++ return -EINVAL;
++ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+- /* IEEE802.11-2016 allows only 0 and - when using Extended Key
+- * ID - 1 as index for pairwise keys.
++ /* IEEE802.11-2016 allows only 0 and - when supporting
++ * Extended Key ID - 1 as index for pairwise keys.
+ * @NL80211_KEY_NO_TX is only allowed for pairwise keys when
+ * the driver supports Extended Key ID.
+ * @NL80211_KEY_SET_TX can't be set when installing and
+ * validating a key.
+ */
+- if (params->mode == NL80211_KEY_NO_TX) {
+- if (!wiphy_ext_feature_isset(&rdev->wiphy,
+- NL80211_EXT_FEATURE_EXT_KEY_ID))
+- return -EINVAL;
+- else if (!pairwise || key_idx < 0 || key_idx > 1)
++ if ((params->mode == NL80211_KEY_NO_TX && !pairwise) ||
++ params->mode == NL80211_KEY_SET_TX)
++ return -EINVAL;
++ if (wiphy_ext_feature_isset(&rdev->wiphy,
++ NL80211_EXT_FEATURE_EXT_KEY_ID)) {
++ if (pairwise && (key_idx < 0 || key_idx > 1))
+ return -EINVAL;
+- } else if ((pairwise && key_idx) ||
+- params->mode == NL80211_KEY_SET_TX) {
++ } else if (pairwise && key_idx) {
+ return -EINVAL;
+ }
+ break;
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 82be7780bbe8..d5342687fdca 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3272,7 +3272,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
+ struct flowi4 *fl4 = &fl->u.ip4;
+ int oif = 0;
+
+- if (skb_dst(skb))
++ if (skb_dst(skb) && skb_dst(skb)->dev)
+ oif = skb_dst(skb)->dev->ifindex;
+
+ memset(fl4, 0, sizeof(struct flowi4));
+@@ -3390,7 +3390,7 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
+
+ nexthdr = nh[nhoff];
+
+- if (skb_dst(skb))
++ if (skb_dst(skb) && skb_dst(skb)->dev)
+ oif = skb_dst(skb)->dev->ifindex;
+
+ memset(fl6, 0, sizeof(struct flowi6));
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 7737b2670064..6d9592f0ae1d 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1835,8 +1835,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
+ if (cptr->type == USER_CLIENT) {
+ info->input_pool = cptr->data.user.fifo_pool_size;
+ info->input_free = info->input_pool;
+- if (cptr->data.user.fifo)
+- info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
++ info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
+ } else {
+ info->input_pool = 0;
+ info->input_free = 0;
+diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
+index ea69261f269a..eaaa8b5830bb 100644
+--- a/sound/core/seq/seq_fifo.c
++++ b/sound/core/seq/seq_fifo.c
+@@ -263,3 +263,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
+
+ return 0;
+ }
++
++/* get the number of unused cells safely */
++int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
++{
++ unsigned long flags;
++ int cells;
++
++ if (!f)
++ return 0;
++
++ snd_use_lock_use(&f->use_lock);
++ spin_lock_irqsave(&f->lock, flags);
++ cells = snd_seq_unused_cells(f->pool);
++ spin_unlock_irqrestore(&f->lock, flags);
++ snd_use_lock_free(&f->use_lock);
++ return cells;
++}
+diff --git a/sound/core/seq/seq_fifo.h b/sound/core/seq/seq_fifo.h
+index edc68743943d..b56a7b897c9c 100644
+--- a/sound/core/seq/seq_fifo.h
++++ b/sound/core/seq/seq_fifo.h
+@@ -53,5 +53,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
+ /* resize pool in fifo */
+ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
+
++/* get the number of unused cells safely */
++int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
+
+ #endif
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index c3096796ee05..c41865e1222c 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -1175,6 +1175,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
+ SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
+ SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
+ SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
++ SND_PCI_QUIRK(0x1102, 0x0027, "Sound Blaster Z", QUIRK_SBZ),
+ SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ),
+ SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
+ SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 14298ef45b21..968d3caab6ac 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -611,18 +611,20 @@ static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
+
+ /* update LED status via GPIO */
+ static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask,
+- bool enabled)
++ bool led_on)
+ {
+ struct conexant_spec *spec = codec->spec;
+ unsigned int oldval = spec->gpio_led;
+
+ if (spec->mute_led_polarity)
+- enabled = !enabled;
++ led_on = !led_on;
+
+- if (enabled)
+- spec->gpio_led &= ~mask;
+- else
++ if (led_on)
+ spec->gpio_led |= mask;
++ else
++ spec->gpio_led &= ~mask;
++ codec_dbg(codec, "mask:%d enabled:%d gpio_led:%d\n",
++ mask, led_on, spec->gpio_led);
+ if (spec->gpio_led != oldval)
+ snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
+ spec->gpio_led);
+@@ -633,8 +635,8 @@ static void cxt_fixup_gpio_mute_hook(void *private_data, int enabled)
+ {
+ struct hda_codec *codec = private_data;
+ struct conexant_spec *spec = codec->spec;
+-
+- cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled);
++ /* muted -> LED on */
++ cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, !enabled);
+ }
+
+ /* turn on/off mic-mute LED via GPIO per capture hook */
+@@ -656,7 +658,6 @@ static void cxt_fixup_mute_led_gpio(struct hda_codec *codec,
+ { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 },
+ {}
+ };
+- codec_info(codec, "action: %d gpio_led: %d\n", action, spec->gpio_led);
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook;
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index dd0f43a1c5e1..6aeba0d66ec5 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -1605,11 +1605,8 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
+ }
+ }
+
+- if (dai_link->dai_fmt) {
+- ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
+- if (ret)
+- return ret;
+- }
++ if (dai_link->dai_fmt)
++ snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
+
+ ret = soc_post_component_init(rtd, dai_link->name);
+ if (ret)
+diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
+index 2c03e0f6bf72..f70211e6b174 100644
+--- a/sound/usb/line6/pcm.c
++++ b/sound/usb/line6/pcm.c
+@@ -550,6 +550,15 @@ int line6_init_pcm(struct usb_line6 *line6,
+ line6pcm->volume_monitor = 255;
+ line6pcm->line6 = line6;
+
++ spin_lock_init(&line6pcm->out.lock);
++ spin_lock_init(&line6pcm->in.lock);
++ line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
++
++ line6->line6pcm = line6pcm;
++
++ pcm->private_data = line6pcm;
++ pcm->private_free = line6_cleanup_pcm;
++
+ line6pcm->max_packet_size_in =
+ usb_maxpacket(line6->usbdev,
+ usb_rcvisocpipe(line6->usbdev, ep_read), 0);
+@@ -562,15 +571,6 @@ int line6_init_pcm(struct usb_line6 *line6,
+ return -EINVAL;
+ }
+
+- spin_lock_init(&line6pcm->out.lock);
+- spin_lock_init(&line6pcm->in.lock);
+- line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
+-
+- line6->line6pcm = line6pcm;
+-
+- pcm->private_data = line6pcm;
+- pcm->private_free = line6_cleanup_pcm;
+-
+ err = line6_create_audio_out_urbs(line6pcm);
+ if (err < 0)
+ return err;
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index b5927c3d5bc0..eceab19766db 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -739,7 +739,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
+ struct uac_mixer_unit_descriptor *desc)
+ {
+ int mu_channels;
+- void *c;
+
+ if (desc->bLength < sizeof(*desc))
+ return -EINVAL;
+@@ -762,13 +761,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
+ break;
+ }
+
+- if (!mu_channels)
+- return 0;
+-
+- c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
+- if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
+- return 0; /* no bmControls -> skip */
+-
+ return mu_channels;
+ }
+
+@@ -2009,6 +2001,31 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
+ * Mixer Unit
+ */
+
++/* check whether the given in/out overflows bmMixerControls matrix */
++static bool mixer_bitmap_overflow(struct uac_mixer_unit_descriptor *desc,
++ int protocol, int num_ins, int num_outs)
++{
++ u8 *hdr = (u8 *)desc;
++ u8 *c = uac_mixer_unit_bmControls(desc, protocol);
++ size_t rest; /* remaining bytes after bmMixerControls */
++
++ switch (protocol) {
++ case UAC_VERSION_1:
++ default:
++ rest = 1; /* iMixer */
++ break;
++ case UAC_VERSION_2:
++ rest = 2; /* bmControls + iMixer */
++ break;
++ case UAC_VERSION_3:
++ rest = 6; /* bmControls + wMixerDescrStr */
++ break;
++ }
++
++ /* overflow? */
++ return c + (num_ins * num_outs + 7) / 8 + rest > hdr + hdr[0];
++}
++
+ /*
+ * build a mixer unit control
+ *
+@@ -2137,6 +2154,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
+ if (err < 0)
+ return err;
+ num_ins += iterm.channels;
++ if (mixer_bitmap_overflow(desc, state->mixer->protocol,
++ num_ins, num_outs))
++ break;
+ for (; ich < num_ins; ich++) {
+ int och, ich_has_controls = 0;
+
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 199fa157a411..27dcb3743690 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -1155,17 +1155,17 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
+ {
+ struct usb_mixer_interface *mixer;
+ struct usb_mixer_elem_info *cval;
+- int unitid = 12; /* SamleRate ExtensionUnit ID */
++ int unitid = 12; /* SampleRate ExtensionUnit ID */
+
+ list_for_each_entry(mixer, &chip->mixer_list, list) {
+- cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
+- if (cval) {
++ if (mixer->id_elems[unitid]) {
++ cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
+ snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
+ cval->control << 8,
+ samplerate_id);
+ snd_usb_mixer_notify_id(mixer, unitid);
++ break;
+ }
+- break;
+ }
+ }
+
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 75b96929f76c..e4bbf79de956 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -339,6 +339,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
+ ep = 0x81;
+ ifnum = 2;
+ goto add_sync_ep_from_ifnum;
++ case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */
+ case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
+ ep = 0x81;
+ ifnum = 1;
+diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
+index d7e06fe0270e..0ce50c319cfd 100644
+--- a/tools/hv/hv_kvp_daemon.c
++++ b/tools/hv/hv_kvp_daemon.c
+@@ -1386,6 +1386,8 @@ int main(int argc, char *argv[])
+ daemonize = 0;
+ break;
+ case 'h':
++ print_usage(argv);
++ exit(0);
+ default:
+ print_usage(argv);
+ exit(EXIT_FAILURE);
+diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
+index efe1e34dd91b..8f813f5233d4 100644
+--- a/tools/hv/hv_vss_daemon.c
++++ b/tools/hv/hv_vss_daemon.c
+@@ -218,6 +218,8 @@ int main(int argc, char *argv[])
+ daemonize = 0;
+ break;
+ case 'h':
++ print_usage(argv);
++ exit(0);
+ default:
+ print_usage(argv);
+ exit(EXIT_FAILURE);
+diff --git a/tools/hv/lsvmbus b/tools/hv/lsvmbus
+index 55e7374bade0..099f2c44dbed 100644
+--- a/tools/hv/lsvmbus
++++ b/tools/hv/lsvmbus
+@@ -4,10 +4,10 @@
+ import os
+ from optparse import OptionParser
+
++help_msg = "print verbose messages. Try -vv, -vvv for more verbose messages"
+ parser = OptionParser()
+-parser.add_option("-v", "--verbose", dest="verbose",
+- help="print verbose messages. Try -vv, -vvv for \
+- more verbose messages", action="count")
++parser.add_option(
++ "-v", "--verbose", dest="verbose", help=help_msg, action="count")
+
+ (options, args) = parser.parse_args()
+
+@@ -21,27 +21,28 @@ if not os.path.isdir(vmbus_sys_path):
+ exit(-1)
+
+ vmbus_dev_dict = {
+- '{0e0b6031-5213-4934-818b-38d90ced39db}' : '[Operating system shutdown]',
+- '{9527e630-d0ae-497b-adce-e80ab0175caf}' : '[Time Synchronization]',
+- '{57164f39-9115-4e78-ab55-382f3bd5422d}' : '[Heartbeat]',
+- '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}' : '[Data Exchange]',
+- '{35fa2e29-ea23-4236-96ae-3a6ebacba440}' : '[Backup (volume checkpoint)]',
+- '{34d14be3-dee4-41c8-9ae7-6b174977c192}' : '[Guest services]',
+- '{525074dc-8985-46e2-8057-a307dc18a502}' : '[Dynamic Memory]',
+- '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}' : 'Synthetic mouse',
+- '{f912ad6d-2b17-48ea-bd65-f927a61c7684}' : 'Synthetic keyboard',
+- '{da0a7802-e377-4aac-8e77-0558eb1073f8}' : 'Synthetic framebuffer adapter',
+- '{f8615163-df3e-46c5-913f-f2d2f965ed0e}' : 'Synthetic network adapter',
+- '{32412632-86cb-44a2-9b5c-50d1417354f5}' : 'Synthetic IDE Controller',
+- '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}' : 'Synthetic SCSI Controller',
+- '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}' : 'Synthetic fiber channel adapter',
+- '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}' : 'Synthetic RDMA adapter',
+- '{44c4f61d-4444-4400-9d52-802e27ede19f}' : 'PCI Express pass-through',
+- '{276aacf4-ac15-426c-98dd-7521ad3f01fe}' : '[Reserved system device]',
+- '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}' : '[Reserved system device]',
+- '{3375baf4-9e15-4b30-b765-67acb10d607b}' : '[Reserved system device]',
++ '{0e0b6031-5213-4934-818b-38d90ced39db}': '[Operating system shutdown]',
++ '{9527e630-d0ae-497b-adce-e80ab0175caf}': '[Time Synchronization]',
++ '{57164f39-9115-4e78-ab55-382f3bd5422d}': '[Heartbeat]',
++ '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}': '[Data Exchange]',
++ '{35fa2e29-ea23-4236-96ae-3a6ebacba440}': '[Backup (volume checkpoint)]',
++ '{34d14be3-dee4-41c8-9ae7-6b174977c192}': '[Guest services]',
++ '{525074dc-8985-46e2-8057-a307dc18a502}': '[Dynamic Memory]',
++ '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}': 'Synthetic mouse',
++ '{f912ad6d-2b17-48ea-bd65-f927a61c7684}': 'Synthetic keyboard',
++ '{da0a7802-e377-4aac-8e77-0558eb1073f8}': 'Synthetic framebuffer adapter',
++ '{f8615163-df3e-46c5-913f-f2d2f965ed0e}': 'Synthetic network adapter',
++ '{32412632-86cb-44a2-9b5c-50d1417354f5}': 'Synthetic IDE Controller',
++ '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}': 'Synthetic SCSI Controller',
++ '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}': 'Synthetic fiber channel adapter',
++ '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}': 'Synthetic RDMA adapter',
++ '{44c4f61d-4444-4400-9d52-802e27ede19f}': 'PCI Express pass-through',
++ '{276aacf4-ac15-426c-98dd-7521ad3f01fe}': '[Reserved system device]',
++ '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}': '[Reserved system device]',
++ '{3375baf4-9e15-4b30-b765-67acb10d607b}': '[Reserved system device]',
+ }
+
++
+ def get_vmbus_dev_attr(dev_name, attr):
+ try:
+ f = open('%s/%s/%s' % (vmbus_sys_path, dev_name, attr), 'r')
+@@ -52,6 +53,7 @@ def get_vmbus_dev_attr(dev_name, attr):
+
+ return lines
+
++
+ class VMBus_Dev:
+ pass
+
+@@ -66,12 +68,13 @@ for f in os.listdir(vmbus_sys_path):
+
+ chn_vp_mapping = get_vmbus_dev_attr(f, 'channel_vp_mapping')
+ chn_vp_mapping = [c.strip() for c in chn_vp_mapping]
+- chn_vp_mapping = sorted(chn_vp_mapping,
+- key = lambda c : int(c.split(':')[0]))
++ chn_vp_mapping = sorted(
++ chn_vp_mapping, key=lambda c: int(c.split(':')[0]))
+
+- chn_vp_mapping = ['\tRel_ID=%s, target_cpu=%s' %
+- (c.split(':')[0], c.split(':')[1])
+- for c in chn_vp_mapping]
++ chn_vp_mapping = [
++ '\tRel_ID=%s, target_cpu=%s' %
++ (c.split(':')[0], c.split(':')[1]) for c in chn_vp_mapping
++ ]
+ d = VMBus_Dev()
+ d.sysfs_path = '%s/%s' % (vmbus_sys_path, f)
+ d.vmbus_id = vmbus_id
+@@ -85,7 +88,7 @@ for f in os.listdir(vmbus_sys_path):
+ vmbus_dev_list.append(d)
+
+
+-vmbus_dev_list = sorted(vmbus_dev_list, key = lambda d : int(d.vmbus_id))
++vmbus_dev_list = sorted(vmbus_dev_list, key=lambda d: int(d.vmbus_id))
+
+ format0 = '%2s: %s'
+ format1 = '%2s: Class_ID = %s - %s\n%s'
+@@ -95,9 +98,15 @@ for d in vmbus_dev_list:
+ if verbose == 0:
+ print(('VMBUS ID ' + format0) % (d.vmbus_id, d.dev_desc))
+ elif verbose == 1:
+- print (('VMBUS ID ' + format1) % \
+- (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping))
++ print(
++ ('VMBUS ID ' + format1) %
++ (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping)
++ )
+ else:
+- print (('VMBUS ID ' + format2) % \
+- (d.vmbus_id, d.class_id, d.dev_desc, \
+- d.device_id, d.sysfs_path, d.chn_vp_mapping))
++ print(
++ ('VMBUS ID ' + format2) %
++ (
++ d.vmbus_id, d.class_id, d.dev_desc,
++ d.device_id, d.sysfs_path, d.chn_vp_mapping
++ )
++ )
+diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
+index 75fc4fb9901c..1cd28ebf8443 100644
+--- a/tools/power/x86/turbostat/turbostat.c
++++ b/tools/power/x86/turbostat/turbostat.c
+@@ -4002,7 +4002,7 @@ void rapl_probe_amd(unsigned int family, unsigned int model)
+ rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
+ rapl_power_units = ldexp(1.0, -(msr & 0xf));
+
+- tdp = get_tdp_amd(model);
++ tdp = get_tdp_amd(family);
+
+ rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
+ if (!quiet)
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index b9e88ccc289b..adced69d026e 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -61,7 +61,8 @@ TEST_PROGS := test_kmod.sh \
+ TEST_PROGS_EXTENDED := with_addr.sh \
+ with_tunnels.sh \
+ tcp_client.py \
+- tcp_server.py
++ tcp_server.py \
++ test_xdp_vlan.sh
+
+ # Compile but not part of 'make run_tests'
+ TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
+diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
+index 3ba7278fb533..b249220025bc 100644
+--- a/virt/kvm/arm/vgic/vgic-mmio.c
++++ b/virt/kvm/arm/vgic/vgic-mmio.c
+@@ -195,6 +195,12 @@ static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+ vgic_irq_set_phys_active(irq, true);
+ }
+
++static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
++{
++ return (vgic_irq_is_sgi(irq->intid) &&
++ vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
++}
++
+ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+@@ -207,6 +213,12 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
+ for_each_set_bit(i, &val, len * 8) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
++ /* GICD_ISPENDR0 SGI bits are WI */
++ if (is_vgic_v2_sgi(vcpu, irq)) {
++ vgic_put_irq(vcpu->kvm, irq);
++ continue;
++ }
++
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ if (irq->hw)
+ vgic_hw_irq_spending(vcpu, irq, is_uaccess);
+@@ -254,6 +266,12 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
+ for_each_set_bit(i, &val, len * 8) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
++ /* GICD_ICPENDR0 SGI bits are WI */
++ if (is_vgic_v2_sgi(vcpu, irq)) {
++ vgic_put_irq(vcpu->kvm, irq);
++ continue;
++ }
++
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+
+ if (irq->hw)
+diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
+index 96aab77d0471..b00aa304c260 100644
+--- a/virt/kvm/arm/vgic/vgic-v2.c
++++ b/virt/kvm/arm/vgic/vgic-v2.c
+@@ -184,7 +184,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
+ if (vgic_irq_is_sgi(irq->intid)) {
+ u32 src = ffs(irq->source);
+
+- BUG_ON(!src);
++ if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
++ irq->intid))
++ return;
++
+ val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
+ irq->source &= ~(1 << (src - 1));
+ if (irq->source) {
+diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
+index 0c653a1e5215..a4ad431c92a9 100644
+--- a/virt/kvm/arm/vgic/vgic-v3.c
++++ b/virt/kvm/arm/vgic/vgic-v3.c
+@@ -167,7 +167,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
+ model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+ u32 src = ffs(irq->source);
+
+- BUG_ON(!src);
++ if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
++ irq->intid))
++ return;
++
+ val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
+ irq->source &= ~(1 << (src - 1));
+ if (irq->source) {
+diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
+index 13d4b38a94ec..e7bde65ba67c 100644
+--- a/virt/kvm/arm/vgic/vgic.c
++++ b/virt/kvm/arm/vgic/vgic.c
+@@ -254,6 +254,13 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
+ bool penda, pendb;
+ int ret;
+
++ /*
++ * list_sort may call this function with the same element when
++ * the list is fairly long.
++ */
++ if (unlikely(irqa == irqb))
++ return 0;
++
+ raw_spin_lock(&irqa->irq_lock);
+ raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
+
diff --git a/1012_linux-5.2.13.patch b/1012_linux-5.2.13.patch
new file mode 100644
index 0000000..c8f98ac
--- /dev/null
+++ b/1012_linux-5.2.13.patch
@@ -0,0 +1,92 @@
+diff --git a/Makefile b/Makefile
+index e26d52d93bb1..288284de8858 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index a47c7add4e0e..a4345052abd2 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1807,30 +1807,6 @@ static int elantech_create_smbus(struct psmouse *psmouse,
+ leave_breadcrumbs);
+ }
+
+-static bool elantech_use_host_notify(struct psmouse *psmouse,
+- struct elantech_device_info *info)
+-{
+- if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+- return true;
+-
+- switch (info->bus) {
+- case ETP_BUS_PS2_ONLY:
+- /* expected case */
+- break;
+- case ETP_BUS_SMB_HST_NTFY_ONLY:
+- case ETP_BUS_PS2_SMB_HST_NTFY:
+- /* SMbus implementation is stable since 2018 */
+- if (dmi_get_bios_year() >= 2018)
+- return true;
+- default:
+- psmouse_dbg(psmouse,
+- "Ignoring SMBus bus provider %d\n", info->bus);
+- break;
+- }
+-
+- return false;
+-}
+-
+ /**
+ * elantech_setup_smbus - called once the PS/2 devices are enumerated
+ * and decides to instantiate a SMBus InterTouch device.
+@@ -1850,7 +1826,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
+ * i2c_blacklist_pnp_ids.
+ * Old ICs are up to the user to decide.
+ */
+- if (!elantech_use_host_notify(psmouse, info) ||
++ if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
+ psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
+ return -ENXIO;
+ }
+@@ -1870,6 +1846,34 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
+ return 0;
+ }
+
++static bool elantech_use_host_notify(struct psmouse *psmouse,
++ struct elantech_device_info *info)
++{
++ if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
++ return true;
++
++ switch (info->bus) {
++ case ETP_BUS_PS2_ONLY:
++ /* expected case */
++ break;
++ case ETP_BUS_SMB_ALERT_ONLY:
++ /* fall-through */
++ case ETP_BUS_PS2_SMB_ALERT:
++ psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
++ break;
++ case ETP_BUS_SMB_HST_NTFY_ONLY:
++ /* fall-through */
++ case ETP_BUS_PS2_SMB_HST_NTFY:
++ return true;
++ default:
++ psmouse_dbg(psmouse,
++ "Ignoring SMBus bus provider %d.\n",
++ info->bus);
++ }
++
++ return false;
++}
++
+ int elantech_init_smbus(struct psmouse *psmouse)
+ {
+ struct elantech_device_info info;
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-08-28 18:18 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-08-28 18:18 UTC (permalink / raw
To: gentoo-commits
commit: d88d44f37b55b17ba04b20f4b973af7a9299f35c
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 28 18:18:17 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug 28 18:18:17 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d88d44f3
add gcc cpu opt patch to support gcc >= 9.1
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
5012_enable-cpu-optimizations-for-gcc91.patch | 632 ++++++++++++++++++++++++++
1 file changed, 632 insertions(+)
diff --git a/5012_enable-cpu-optimizations-for-gcc91.patch b/5012_enable-cpu-optimizations-for-gcc91.patch
new file mode 100644
index 0000000..dffd36d
--- /dev/null
+++ b/5012_enable-cpu-optimizations-for-gcc91.patch
@@ -0,0 +1,632 @@
+WARNING
+This patch works with gcc versions 9.1+ and with kernel version 4.13+ and should
+NOT be applied when compiling on older versions of gcc due to key name changes
+of the march flags introduced with the version 4.9 release of gcc.[1]
+
+Use the older version of this patch hosted on the same github for older
+versions of gcc.
+
+FEATURES
+This patch adds additional CPU options to the Linux kernel accessible under:
+ Processor type and features --->
+ Processor family --->
+
+The expanded microarchitectures include:
+* AMD Improved K8-family
+* AMD K10-family
+* AMD Family 10h (Barcelona)
+* AMD Family 14h (Bobcat)
+* AMD Family 16h (Jaguar)
+* AMD Family 15h (Bulldozer)
+* AMD Family 15h (Piledriver)
+* AMD Family 15h (Steamroller)
+* AMD Family 15h (Excavator)
+* AMD Family 17h (Zen)
+* AMD Family 17h (Zen 2)
+* Intel Silvermont low-power processors
+* Intel Goldmont low-power processors (Apollo Lake and Denverton)
+* Intel Goldmont Plus low-power processors (Gemini Lake)
+* Intel 1st Gen Core i3/i5/i7 (Nehalem)
+* Intel 1.5 Gen Core i3/i5/i7 (Westmere)
+* Intel 2nd Gen Core i3/i5/i7 (Sandybridge)
+* Intel 3rd Gen Core i3/i5/i7 (Ivybridge)
+* Intel 4th Gen Core i3/i5/i7 (Haswell)
+* Intel 5th Gen Core i3/i5/i7 (Broadwell)
+* Intel 6th Gen Core i3/i5/i7 (Skylake)
+* Intel 6th Gen Core i7/i9 (Skylake X)
+* Intel 8th Gen Core i3/i5/i7 (Cannon Lake)
+* Intel 10th Gen Core i7/i9 (Ice Lake)
+* Intel Xeon (Cascade Lake)
+
+It also offers to compile passing the 'native' option which, "selects the CPU
+to generate code for at compilation time by determining the processor type of
+the compiling machine. Using -march=native enables all instruction subsets
+supported by the local machine and will produce code optimized for the local
+machine under the constraints of the selected instruction set."[3]
+
+MINOR NOTES
+This patch also changes 'atom' to 'bonnell' in accordance with the gcc v4.9
+changes. Note that upstream is using the deprecated 'match=atom' flags when I
+believe it should use the newer 'march=bonnell' flag for atom processors.[2]
+
+It is not recommended to compile on Atom-CPUs with the 'native' option.[4] The
+recommendation is to use the 'atom' option instead.
+
+BENEFITS
+Small but real speed increases are measurable using a make endpoint comparing
+a generic kernel to one built with one of the respective microarchs.
+
+See the following experimental evidence supporting this statement:
+https://github.com/graysky2/kernel_gcc_patch
+
+REQUIREMENTS
+linux version >=4.13
+gcc version >=9.1
+
+ACKNOWLEDGMENTS
+This patch builds on the seminal work by Jeroen.[5]
+
+REFERENCES
+1. https://gcc.gnu.org/gcc-4.9/changes.html
+2. https://bugzilla.kernel.org/show_bug.cgi?id=77461
+3. https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html
+4. https://github.com/graysky2/kernel_gcc_patch/issues/15
+5. http://www.linuxforge.net/docs/linux/linux-gcc.php
+
+--- a/arch/x86/include/asm/module.h 2019-08-16 04:11:12.000000000 -0400
++++ b/arch/x86/include/asm/module.h 2019-08-22 15:56:23.988050322 -0400
+@@ -25,6 +25,36 @@ struct mod_arch_specific {
+ #define MODULE_PROC_FAMILY "586MMX "
+ #elif defined CONFIG_MCORE2
+ #define MODULE_PROC_FAMILY "CORE2 "
++#elif defined CONFIG_MNATIVE
++#define MODULE_PROC_FAMILY "NATIVE "
++#elif defined CONFIG_MNEHALEM
++#define MODULE_PROC_FAMILY "NEHALEM "
++#elif defined CONFIG_MWESTMERE
++#define MODULE_PROC_FAMILY "WESTMERE "
++#elif defined CONFIG_MSILVERMONT
++#define MODULE_PROC_FAMILY "SILVERMONT "
++#elif defined CONFIG_MGOLDMONT
++#define MODULE_PROC_FAMILY "GOLDMONT "
++#elif defined CONFIG_MGOLDMONTPLUS
++#define MODULE_PROC_FAMILY "GOLDMONTPLUS "
++#elif defined CONFIG_MSANDYBRIDGE
++#define MODULE_PROC_FAMILY "SANDYBRIDGE "
++#elif defined CONFIG_MIVYBRIDGE
++#define MODULE_PROC_FAMILY "IVYBRIDGE "
++#elif defined CONFIG_MHASWELL
++#define MODULE_PROC_FAMILY "HASWELL "
++#elif defined CONFIG_MBROADWELL
++#define MODULE_PROC_FAMILY "BROADWELL "
++#elif defined CONFIG_MSKYLAKE
++#define MODULE_PROC_FAMILY "SKYLAKE "
++#elif defined CONFIG_MSKYLAKEX
++#define MODULE_PROC_FAMILY "SKYLAKEX "
++#elif defined CONFIG_MCANNONLAKE
++#define MODULE_PROC_FAMILY "CANNONLAKE "
++#elif defined CONFIG_MICELAKE
++#define MODULE_PROC_FAMILY "ICELAKE "
++#elif defined CONFIG_MCASCADELAKE
++#define MODULE_PROC_FAMILY "CASCADELAKE "
+ #elif defined CONFIG_MATOM
+ #define MODULE_PROC_FAMILY "ATOM "
+ #elif defined CONFIG_M686
+@@ -43,6 +73,28 @@ struct mod_arch_specific {
+ #define MODULE_PROC_FAMILY "K7 "
+ #elif defined CONFIG_MK8
+ #define MODULE_PROC_FAMILY "K8 "
++#elif defined CONFIG_MK8SSE3
++#define MODULE_PROC_FAMILY "K8SSE3 "
++#elif defined CONFIG_MK10
++#define MODULE_PROC_FAMILY "K10 "
++#elif defined CONFIG_MBARCELONA
++#define MODULE_PROC_FAMILY "BARCELONA "
++#elif defined CONFIG_MBOBCAT
++#define MODULE_PROC_FAMILY "BOBCAT "
++#elif defined CONFIG_MBULLDOZER
++#define MODULE_PROC_FAMILY "BULLDOZER "
++#elif defined CONFIG_MPILEDRIVER
++#define MODULE_PROC_FAMILY "PILEDRIVER "
++#elif defined CONFIG_MSTEAMROLLER
++#define MODULE_PROC_FAMILY "STEAMROLLER "
++#elif defined CONFIG_MJAGUAR
++#define MODULE_PROC_FAMILY "JAGUAR "
++#elif defined CONFIG_MEXCAVATOR
++#define MODULE_PROC_FAMILY "EXCAVATOR "
++#elif defined CONFIG_MZEN
++#define MODULE_PROC_FAMILY "ZEN "
++#elif defined CONFIG_MZEN2
++#define MODULE_PROC_FAMILY "ZEN2 "
+ #elif defined CONFIG_MELAN
+ #define MODULE_PROC_FAMILY "ELAN "
+ #elif defined CONFIG_MCRUSOE
+--- a/arch/x86/Kconfig.cpu 2019-08-16 04:11:12.000000000 -0400
++++ b/arch/x86/Kconfig.cpu 2019-08-22 15:59:31.596946943 -0400
+@@ -116,6 +116,7 @@ config MPENTIUMM
+ config MPENTIUM4
+ bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
+ depends on X86_32
++ select X86_P6_NOP
+ ---help---
+ Select this for Intel Pentium 4 chips. This includes the
+ Pentium 4, Pentium D, P4-based Celeron and Xeon, and
+@@ -148,9 +149,8 @@ config MPENTIUM4
+ -Paxville
+ -Dempsey
+
+-
+ config MK6
+- bool "K6/K6-II/K6-III"
++ bool "AMD K6/K6-II/K6-III"
+ depends on X86_32
+ ---help---
+ Select this for an AMD K6-family processor. Enables use of
+@@ -158,7 +158,7 @@ config MK6
+ flags to GCC.
+
+ config MK7
+- bool "Athlon/Duron/K7"
++ bool "AMD Athlon/Duron/K7"
+ depends on X86_32
+ ---help---
+ Select this for an AMD Athlon K7-family processor. Enables use of
+@@ -166,12 +166,90 @@ config MK7
+ flags to GCC.
+
+ config MK8
+- bool "Opteron/Athlon64/Hammer/K8"
++ bool "AMD Opteron/Athlon64/Hammer/K8"
+ ---help---
+ Select this for an AMD Opteron or Athlon64 Hammer-family processor.
+ Enables use of some extended instructions, and passes appropriate
+ optimization flags to GCC.
+
++config MK8SSE3
++ bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
++ ---help---
++ Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
++ Enables use of some extended instructions, and passes appropriate
++ optimization flags to GCC.
++
++config MK10
++ bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
++ ---help---
++ Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
++ Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
++ Enables use of some extended instructions, and passes appropriate
++ optimization flags to GCC.
++
++config MBARCELONA
++ bool "AMD Barcelona"
++ ---help---
++ Select this for AMD Family 10h Barcelona processors.
++
++ Enables -march=barcelona
++
++config MBOBCAT
++ bool "AMD Bobcat"
++ ---help---
++ Select this for AMD Family 14h Bobcat processors.
++
++ Enables -march=btver1
++
++config MJAGUAR
++ bool "AMD Jaguar"
++ ---help---
++ Select this for AMD Family 16h Jaguar processors.
++
++ Enables -march=btver2
++
++config MBULLDOZER
++ bool "AMD Bulldozer"
++ ---help---
++ Select this for AMD Family 15h Bulldozer processors.
++
++ Enables -march=bdver1
++
++config MPILEDRIVER
++ bool "AMD Piledriver"
++ ---help---
++ Select this for AMD Family 15h Piledriver processors.
++
++ Enables -march=bdver2
++
++config MSTEAMROLLER
++ bool "AMD Steamroller"
++ ---help---
++ Select this for AMD Family 15h Steamroller processors.
++
++ Enables -march=bdver3
++
++config MEXCAVATOR
++ bool "AMD Excavator"
++ ---help---
++ Select this for AMD Family 15h Excavator processors.
++
++ Enables -march=bdver4
++
++config MZEN
++ bool "AMD Zen"
++ ---help---
++ Select this for AMD Family 17h Zen processors.
++
++ Enables -march=znver1
++
++config MZEN2
++ bool "AMD Zen 2"
++ ---help---
++ Select this for AMD Family 17h Zen 2 processors.
++
++ Enables -march=znver2
++
+ config MCRUSOE
+ bool "Crusoe"
+ depends on X86_32
+@@ -253,6 +331,7 @@ config MVIAC7
+
+ config MPSC
+ bool "Intel P4 / older Netburst based Xeon"
++ select X86_P6_NOP
+ depends on X86_64
+ ---help---
+ Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
+@@ -262,8 +341,19 @@ config MPSC
+ using the cpu family field
+ in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
+
++config MATOM
++ bool "Intel Atom"
++ select X86_P6_NOP
++ ---help---
++
++ Select this for the Intel Atom platform. Intel Atom CPUs have an
++ in-order pipelining architecture and thus can benefit from
++ accordingly optimized code. Use a recent GCC with specific Atom
++ support in order to fully benefit from selecting this option.
++
+ config MCORE2
+- bool "Core 2/newer Xeon"
++ bool "Intel Core 2"
++ select X86_P6_NOP
+ ---help---
+
+ Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
+@@ -271,14 +361,133 @@ config MCORE2
+ family in /proc/cpuinfo. Newer ones have 6 and older ones 15
+ (not a typo)
+
+-config MATOM
+- bool "Intel Atom"
++ Enables -march=core2
++
++config MNEHALEM
++ bool "Intel Nehalem"
++ select X86_P6_NOP
+ ---help---
+
+- Select this for the Intel Atom platform. Intel Atom CPUs have an
+- in-order pipelining architecture and thus can benefit from
+- accordingly optimized code. Use a recent GCC with specific Atom
+- support in order to fully benefit from selecting this option.
++ Select this for 1st Gen Core processors in the Nehalem family.
++
++ Enables -march=nehalem
++
++config MWESTMERE
++ bool "Intel Westmere"
++ select X86_P6_NOP
++ ---help---
++
++ Select this for the Intel Westmere formerly Nehalem-C family.
++
++ Enables -march=westmere
++
++config MSILVERMONT
++ bool "Intel Silvermont"
++ select X86_P6_NOP
++ ---help---
++
++ Select this for the Intel Silvermont platform.
++
++ Enables -march=silvermont
++
++config MGOLDMONT
++ bool "Intel Goldmont"
++ select X86_P6_NOP
++ ---help---
++
++ Select this for the Intel Goldmont platform including Apollo Lake and Denverton.
++
++ Enables -march=goldmont
++
++config MGOLDMONTPLUS
++ bool "Intel Goldmont Plus"
++ select X86_P6_NOP
++ ---help---
++
++ Select this for the Intel Goldmont Plus platform including Gemini Lake.
++
++ Enables -march=goldmont-plus
++
++config MSANDYBRIDGE
++ bool "Intel Sandy Bridge"
++ select X86_P6_NOP
++ ---help---
++
++ Select this for 2nd Gen Core processors in the Sandy Bridge family.
++
++ Enables -march=sandybridge
++
++config MIVYBRIDGE
++ bool "Intel Ivy Bridge"
++ select X86_P6_NOP
++ ---help---
++
++ Select this for 3rd Gen Core processors in the Ivy Bridge family.
++
++ Enables -march=ivybridge
++
++config MHASWELL
++ bool "Intel Haswell"
++ select X86_P6_NOP
++ ---help---
++
++ Select this for 4th Gen Core processors in the Haswell family.
++
++ Enables -march=haswell
++
++config MBROADWELL
++ bool "Intel Broadwell"
++ select X86_P6_NOP
++ ---help---
++
++ Select this for 5th Gen Core processors in the Broadwell family.
++
++ Enables -march=broadwell
++
++config MSKYLAKE
++ bool "Intel Skylake"
++ select X86_P6_NOP
++ ---help---
++
++ Select this for 6th Gen Core processors in the Skylake family.
++
++ Enables -march=skylake
++
++config MSKYLAKEX
++ bool "Intel Skylake X"
++ select X86_P6_NOP
++ ---help---
++
++ Select this for 6th Gen Core processors in the Skylake X family.
++
++ Enables -march=skylake-avx512
++
++config MCANNONLAKE
++ bool "Intel Cannon Lake"
++ select X86_P6_NOP
++ ---help---
++
++ Select this for 8th Gen Core processors
++
++ Enables -march=cannonlake
++
++config MICELAKE
++ bool "Intel Ice Lake"
++ select X86_P6_NOP
++ ---help---
++
++ Select this for 10th Gen Core processors in the Ice Lake family.
++
++ Enables -march=icelake-client
++
++config MCASCADELAKE
++ bool "Intel Cascade Lake"
++ select X86_P6_NOP
++ ---help---
++
++ Select this for Xeon processors in the Cascade Lake family.
++
++ Enables -march=cascadelake
+
+ config GENERIC_CPU
+ bool "Generic-x86-64"
+@@ -287,6 +496,19 @@ config GENERIC_CPU
+ Generic x86-64 CPU.
+ Run equally well on all x86-64 CPUs.
+
++config MNATIVE
++ bool "Native optimizations autodetected by GCC"
++ ---help---
++
++ GCC 4.2 and above support -march=native, which automatically detects
++ the optimum settings to use based on your processor. -march=native
++ also detects and applies additional settings beyond -march specific
++ to your CPU, (eg. -msse4). Unless you have a specific reason not to
++ (e.g. distcc cross-compiling), you should probably be using
++ -march=native rather than anything listed below.
++
++ Enables -march=native
++
+ endchoice
+
+ config X86_GENERIC
+@@ -311,7 +533,7 @@ config X86_INTERNODE_CACHE_SHIFT
+ config X86_L1_CACHE_SHIFT
+ int
+ default "7" if MPENTIUM4 || MPSC
+- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++ default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
+ default "4" if MELAN || M486 || MGEODEGX1
+ default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
+
+@@ -329,35 +551,36 @@ config X86_ALIGNMENT_16
+
+ config X86_INTEL_USERCOPY
+ def_bool y
+- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
++ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE
+
+ config X86_USE_PPRO_CHECKSUM
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MATOM || MNATIVE
+
+ config X86_USE_3DNOW
+ def_bool y
+ depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
+
+-#
+-# P6_NOPs are a relatively minor optimization that require a family >=
+-# 6 processor, except that it is broken on certain VIA chips.
+-# Furthermore, AMD chips prefer a totally different sequence of NOPs
+-# (which work on all CPUs). In addition, it looks like Virtual PC
+-# does not understand them.
+-#
+-# As a result, disallow these if we're not compiling for X86_64 (these
+-# NOPs do work on all x86-64 capable chips); the list of processors in
+-# the right-hand clause are the cores that benefit from this optimization.
+-#
+ config X86_P6_NOP
+- def_bool y
+- depends on X86_64
+- depends on (MCORE2 || MPENTIUM4 || MPSC)
++ default n
++ bool "Support for P6_NOPs on Intel chips"
++ depends on (MCORE2 || MPENTIUM4 || MPSC || MATOM || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE)
++ ---help---
++ P6_NOPs are a relatively minor optimization that require a family >=
++ 6 processor, except that it is broken on certain VIA chips.
++ Furthermore, AMD chips prefer a totally different sequence of NOPs
++ (which work on all CPUs). In addition, it looks like Virtual PC
++ does not understand them.
++
++ As a result, disallow these if we're not compiling for X86_64 (these
++ NOPs do work on all x86-64 capable chips); the list of processors in
++ the right-hand clause are the cores that benefit from this optimization.
++
++ Say Y if you have Intel CPU newer than Pentium Pro, N otherwise.
+
+ config X86_TSC
+ def_bool y
+- depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
++ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MNATIVE || MATOM) || X86_64
+
+ config X86_CMPXCHG64
+ def_bool y
+@@ -367,7 +590,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+ def_bool y
+- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++ depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
+
+ config X86_MINIMUM_CPU_FAMILY
+ int
+--- a/arch/x86/Makefile 2019-08-16 04:11:12.000000000 -0400
++++ b/arch/x86/Makefile 2019-08-22 16:01:22.559789904 -0400
+@@ -118,13 +118,53 @@ else
+ KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
+
+ # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
++ cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+ cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
++ cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
++ cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
++ cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
++ cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
++ cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
++ cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
++ cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
++ cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
++ cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
++ cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
++ cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2)
+ cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+
+ cflags-$(CONFIG_MCORE2) += \
+- $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
+- cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++ $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
++ cflags-$(CONFIG_MNEHALEM) += \
++ $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
++ cflags-$(CONFIG_MWESTMERE) += \
++ $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
++ cflags-$(CONFIG_MSILVERMONT) += \
++ $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
++ cflags-$(CONFIG_MGOLDMONT) += \
++ $(call cc-option,-march=goldmont,$(call cc-option,-mtune=goldmont))
++ cflags-$(CONFIG_MGOLDMONTPLUS) += \
++ $(call cc-option,-march=goldmont-plus,$(call cc-option,-mtune=goldmont-plus))
++ cflags-$(CONFIG_MSANDYBRIDGE) += \
++ $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
++ cflags-$(CONFIG_MIVYBRIDGE) += \
++ $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
++ cflags-$(CONFIG_MHASWELL) += \
++ $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
++ cflags-$(CONFIG_MBROADWELL) += \
++ $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
++ cflags-$(CONFIG_MSKYLAKE) += \
++ $(call cc-option,-march=skylake,$(call cc-option,-mtune=skylake))
++ cflags-$(CONFIG_MSKYLAKEX) += \
++ $(call cc-option,-march=skylake-avx512,$(call cc-option,-mtune=skylake-avx512))
++ cflags-$(CONFIG_MCANNONLAKE) += \
++ $(call cc-option,-march=cannonlake,$(call cc-option,-mtune=cannonlake))
++ cflags-$(CONFIG_MICELAKE) += \
++ $(call cc-option,-march=icelake-client,$(call cc-option,-mtune=icelake-client))
++ cflags-$(CONFIG_MCASCADE) += \
++ $(call cc-option,-march=cascadelake,$(call cc-option,-mtune=cascadelake))
++ cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
++ $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
+ cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+ KBUILD_CFLAGS += $(cflags-y)
+
+--- a/arch/x86/Makefile_32.cpu 2019-08-16 04:11:12.000000000 -0400
++++ b/arch/x86/Makefile_32.cpu 2019-08-22 16:02:14.687701216 -0400
+@@ -23,7 +23,19 @@ cflags-$(CONFIG_MK6) += -march=k6
+ # Please note, that patches that add -march=athlon-xp and friends are pointless.
+ # They make zero difference whatsosever to performance at this time.
+ cflags-$(CONFIG_MK7) += -march=athlon
++cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+ cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
++cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-march=athlon)
++cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10,-march=athlon)
++cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon)
++cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1,-march=athlon)
++cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2,-march=athlon)
++cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon)
++cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2,-march=athlon)
++cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3,-march=athlon)
++cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4,-march=athlon)
++cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1,-march=athlon)
++cflags-$(CONFIG_MZEN2) += $(call cc-option,-march=znver2,-march=athlon)
+ cflags-$(CONFIG_MCRUSOE) += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
+ cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
+ cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
+@@ -32,8 +44,22 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-
+ cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
+ cflags-$(CONFIG_MVIAC7) += -march=i686
+ cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
+-cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
+- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++cflags-$(CONFIG_MNEHALEM) += -march=i686 $(call tune,nehalem)
++cflags-$(CONFIG_MWESTMERE) += -march=i686 $(call tune,westmere)
++cflags-$(CONFIG_MSILVERMONT) += -march=i686 $(call tune,silvermont)
++cflags-$(CONFIG_MGOLDMONT) += -march=i686 $(call tune,goldmont)
++cflags-$(CONFIG_MGOLDMONTPLUS) += -march=i686 $(call tune,goldmont-plus)
++cflags-$(CONFIG_MSANDYBRIDGE) += -march=i686 $(call tune,sandybridge)
++cflags-$(CONFIG_MIVYBRIDGE) += -march=i686 $(call tune,ivybridge)
++cflags-$(CONFIG_MHASWELL) += -march=i686 $(call tune,haswell)
++cflags-$(CONFIG_MBROADWELL) += -march=i686 $(call tune,broadwell)
++cflags-$(CONFIG_MSKYLAKE) += -march=i686 $(call tune,skylake)
++cflags-$(CONFIG_MSKYLAKEX) += -march=i686 $(call tune,skylake-avx512)
++cflags-$(CONFIG_MCANNONLAKE) += -march=i686 $(call tune,cannonlake)
++cflags-$(CONFIG_MICELAKE) += -march=i686 $(call tune,icelake-client)
++cflags-$(CONFIG_MCASCADELAKE) += -march=i686 $(call tune,cascadelake)
++cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
++ $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
+
+ # AMD Elan support
+ cflags-$(CONFIG_MELAN) += -march=i486
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-08-25 17:38 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-08-25 17:38 UTC (permalink / raw
To: gentoo-commits
commit: 4a453651128d111aace968223e7ed8dc5a8132b4
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Aug 25 17:38:15 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Aug 25 17:38:15 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4a453651
Linux patch 5.2.10
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1009_linux-5.2.10.patch | 5449 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 5453 insertions(+)
diff --git a/0000_README b/0000_README
index 04259bc..2056b84 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch: 1008_linux-5.2.9.patch
From: https://www.kernel.org
Desc: Linux 5.2.9
+Patch: 1009_linux-5.2.10.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.10
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1009_linux-5.2.10.patch b/1009_linux-5.2.10.patch
new file mode 100644
index 0000000..883c14a
--- /dev/null
+++ b/1009_linux-5.2.10.patch
@@ -0,0 +1,5449 @@
+diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst
+index cb85af559dff..178f4104f5cf 100644
+--- a/Documentation/networking/tls-offload.rst
++++ b/Documentation/networking/tls-offload.rst
+@@ -445,24 +445,6 @@ These flags will be acted upon accordingly by the core ``ktls`` code.
+ TLS device feature flags only control adding of new TLS connection
+ offloads, old connections will remain active after flags are cleared.
+
+-Known bugs
+-==========
+-
+-skb_orphan() leaks clear text
+------------------------------
+-
+-Currently drivers depend on the :c:member:`sk` member of
+-:c:type:`struct sk_buff <sk_buff>` to identify segments requiring
+-encryption. Any operation which removes or does not preserve the socket
+-association such as :c:func:`skb_orphan` or :c:func:`skb_clone`
+-will cause the driver to miss the packets and lead to clear text leaks.
+-
+-Redirects leak clear text
+--------------------------
+-
+-In the RX direction, if segment has already been decrypted by the device
+-and it gets redirected or mirrored - clear text will be transmitted out.
+-
+ .. _pre_tls_data:
+
+ Transmission of pre-TLS data
+diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
+index 7cdf7282e022..65b6c1109cc8 100644
+--- a/Documentation/vm/hmm.rst
++++ b/Documentation/vm/hmm.rst
+@@ -231,7 +231,7 @@ respect in order to keep things properly synchronized. The usage pattern is::
+ ret = hmm_range_snapshot(&range);
+ if (ret) {
+ up_read(&mm->mmap_sem);
+- if (ret == -EAGAIN) {
++ if (ret == -EBUSY) {
+ /*
+ * No need to check hmm_range_wait_until_valid() return value
+ * on retry we will get proper error with hmm_range_snapshot()
+diff --git a/Makefile b/Makefile
+index cfc667fe9959..35fee16d5006 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
+index 79155a8cfe7c..89e4c8b79349 100644
+--- a/arch/arm64/include/asm/arch_gicv3.h
++++ b/arch/arm64/include/asm/arch_gicv3.h
+@@ -155,6 +155,12 @@ static inline void gic_pmr_mask_irqs(void)
+ BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF |
+ GIC_PRIO_PSR_I_SET));
+ BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
++ /*
++ * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared
++ * and non-secure PMR accesses are not subject to the shifts that
++ * are applied to IRQ priorities
++ */
++ BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON);
+ gic_write_pmr(GIC_PRIO_IRQOFF);
+ }
+
+diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
+index ae7e605085d7..9c0e0178ea29 100644
+--- a/arch/arm64/include/asm/daifflags.h
++++ b/arch/arm64/include/asm/daifflags.h
+@@ -13,6 +13,8 @@
+ #define DAIF_PROCCTX 0
+ #define DAIF_PROCCTX_NOIRQ PSR_I_BIT
+ #define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT)
++#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
++
+
+ /* mask/save/unmask/restore all exceptions, including interrupts. */
+ static inline void local_daif_mask(void)
+diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
+index c9e9a6978e73..d3cb42fd51ec 100644
+--- a/arch/arm64/include/asm/efi.h
++++ b/arch/arm64/include/asm/efi.h
+@@ -105,7 +105,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
+ ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
+
+ #define alloc_screen_info(x...) &screen_info
+-#define free_screen_info(x...)
++
++static inline void free_screen_info(efi_system_table_t *sys_table_arg,
++ struct screen_info *si)
++{
++}
+
+ /* redeclare as 'hidden' so the compiler will generate relative references */
+ extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
+diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
+index b7ba75809751..fb04f10a78ab 100644
+--- a/arch/arm64/include/asm/memory.h
++++ b/arch/arm64/include/asm/memory.h
+@@ -210,7 +210,11 @@ extern u64 vabits_user;
+ #define __tag_reset(addr) untagged_addr(addr)
+ #define __tag_get(addr) (__u8)((u64)(addr) >> 56)
+ #else
+-#define __tag_set(addr, tag) (addr)
++static inline const void *__tag_set(const void *addr, u8 tag)
++{
++ return addr;
++}
++
+ #define __tag_reset(addr) (addr)
+ #define __tag_get(addr) 0
+ #endif
+@@ -301,8 +305,8 @@ static inline void *phys_to_virt(phys_addr_t x)
+ #define page_to_virt(page) ({ \
+ unsigned long __addr = \
+ ((__page_to_voff(page)) | PAGE_OFFSET); \
+- unsigned long __addr_tag = \
+- __tag_set(__addr, page_kasan_tag(page)); \
++ const void *__addr_tag = \
++ __tag_set((void *)__addr, page_kasan_tag(page)); \
+ ((void *)__addr_tag); \
+ })
+
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index fca26759081a..b9574d850f14 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -419,8 +419,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ PMD_TYPE_SECT)
+
+ #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
+-#define pud_sect(pud) (0)
+-#define pud_table(pud) (1)
++static inline bool pud_sect(pud_t pud) { return false; }
++static inline bool pud_table(pud_t pud) { return true; }
+ #else
+ #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
+ PUD_TYPE_SECT)
+diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
+index 81693244f58d..701eaa738187 100644
+--- a/arch/arm64/include/asm/ptrace.h
++++ b/arch/arm64/include/asm/ptrace.h
+@@ -30,7 +30,7 @@
+ * in the the priority mask, it indicates that PSR.I should be set and
+ * interrupt disabling temporarily does not rely on IRQ priorities.
+ */
+-#define GIC_PRIO_IRQON 0xc0
++#define GIC_PRIO_IRQON 0xe0
+ #define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
+ #define GIC_PRIO_PSR_I_SET (1 << 4)
+
+diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
+index 1285c7b2947f..171773257974 100644
+--- a/arch/arm64/kernel/ftrace.c
++++ b/arch/arm64/kernel/ftrace.c
+@@ -73,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+
+ if (offset < -SZ_128M || offset >= SZ_128M) {
+ #ifdef CONFIG_ARM64_MODULE_PLTS
+- struct plt_entry trampoline;
++ struct plt_entry trampoline, *dst;
+ struct module *mod;
+
+ /*
+@@ -106,23 +106,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+ * to check if the actual opcodes are in fact identical,
+ * regardless of the offset in memory so use memcmp() instead.
+ */
+- trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
+- if (memcmp(mod->arch.ftrace_trampoline, &trampoline,
+- sizeof(trampoline))) {
+- if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) {
++ dst = mod->arch.ftrace_trampoline;
++ trampoline = get_plt_entry(addr, dst);
++ if (memcmp(dst, &trampoline, sizeof(trampoline))) {
++ if (plt_entry_is_initialized(dst)) {
+ pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
+ return -EINVAL;
+ }
+
+ /* point the trampoline to our ftrace entry point */
+ module_disable_ro(mod);
+- *mod->arch.ftrace_trampoline = trampoline;
++ *dst = trampoline;
+ module_enable_ro(mod, true);
+
+- /* update trampoline before patching in the branch */
+- smp_wmb();
++ /*
++ * Ensure updated trampoline is visible to instruction
++ * fetch before we patch in the branch.
++ */
++ __flush_icache_range((unsigned long)&dst[0],
++ (unsigned long)&dst[1]);
+ }
+- addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
++ addr = (unsigned long)dst;
+ #else /* CONFIG_ARM64_MODULE_PLTS */
+ return -EINVAL;
+ #endif /* CONFIG_ARM64_MODULE_PLTS */
+diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
+index 88ce502c8e6f..624f2501f3f8 100644
+--- a/arch/arm64/kernel/probes/kprobes.c
++++ b/arch/arm64/kernel/probes/kprobes.c
+@@ -21,6 +21,7 @@
+ #include <asm/ptrace.h>
+ #include <asm/cacheflush.h>
+ #include <asm/debug-monitors.h>
++#include <asm/daifflags.h>
+ #include <asm/system_misc.h>
+ #include <asm/insn.h>
+ #include <linux/uaccess.h>
+@@ -165,33 +166,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
+ __this_cpu_write(current_kprobe, p);
+ }
+
+-/*
+- * When PSTATE.D is set (masked), then software step exceptions can not be
+- * generated.
+- * SPSR's D bit shows the value of PSTATE.D immediately before the
+- * exception was taken. PSTATE.D is set while entering into any exception
+- * mode, however software clears it for any normal (none-debug-exception)
+- * mode in the exception entry. Therefore, when we are entering into kprobe
+- * breakpoint handler from any normal mode then SPSR.D bit is already
+- * cleared, however it is set when we are entering from any debug exception
+- * mode.
+- * Since we always need to generate single step exception after a kprobe
+- * breakpoint exception therefore we need to clear it unconditionally, when
+- * we become sure that the current breakpoint exception is for kprobe.
+- */
+-static void __kprobes
+-spsr_set_debug_flag(struct pt_regs *regs, int mask)
+-{
+- unsigned long spsr = regs->pstate;
+-
+- if (mask)
+- spsr |= PSR_D_BIT;
+- else
+- spsr &= ~PSR_D_BIT;
+-
+- regs->pstate = spsr;
+-}
+-
+ /*
+ * Interrupts need to be disabled before single-step mode is set, and not
+ * reenabled until after single-step mode ends.
+@@ -203,17 +177,17 @@ spsr_set_debug_flag(struct pt_regs *regs, int mask)
+ static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+ {
+- kcb->saved_irqflag = regs->pstate;
++ kcb->saved_irqflag = regs->pstate & DAIF_MASK;
+ regs->pstate |= PSR_I_BIT;
++ /* Unmask PSTATE.D for enabling software step exceptions. */
++ regs->pstate &= ~PSR_D_BIT;
+ }
+
+ static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+ {
+- if (kcb->saved_irqflag & PSR_I_BIT)
+- regs->pstate |= PSR_I_BIT;
+- else
+- regs->pstate &= ~PSR_I_BIT;
++ regs->pstate &= ~DAIF_MASK;
++ regs->pstate |= kcb->saved_irqflag;
+ }
+
+ static void __kprobes
+@@ -250,8 +224,6 @@ static void __kprobes setup_singlestep(struct kprobe *p,
+
+ set_ss_context(kcb, slot); /* mark pending ss */
+
+- spsr_set_debug_flag(regs, 0);
+-
+ /* IRQs and single stepping do not mix well. */
+ kprobes_save_local_irqflag(kcb, regs);
+ kernel_enable_single_step(regs);
+diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
+index b21cba90f82d..491184a9f081 100644
+--- a/arch/arm64/kernel/return_address.c
++++ b/arch/arm64/kernel/return_address.c
+@@ -8,6 +8,7 @@
+
+ #include <linux/export.h>
+ #include <linux/ftrace.h>
++#include <linux/kprobes.h>
+
+ #include <asm/stack_pointer.h>
+ #include <asm/stacktrace.h>
+@@ -29,6 +30,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
+ return 0;
+ }
+ }
++NOKPROBE_SYMBOL(save_return_addr);
+
+ void *return_address(unsigned int level)
+ {
+@@ -52,3 +54,4 @@ void *return_address(unsigned int level)
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(return_address);
++NOKPROBE_SYMBOL(return_address);
+diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
+index 62d395151abe..cd7dab54d17b 100644
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -7,6 +7,7 @@
+ #include <linux/kernel.h>
+ #include <linux/export.h>
+ #include <linux/ftrace.h>
++#include <linux/kprobes.h>
+ #include <linux/sched.h>
+ #include <linux/sched/debug.h>
+ #include <linux/sched/task_stack.h>
+@@ -73,6 +74,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
+
+ return 0;
+ }
++NOKPROBE_SYMBOL(unwind_frame);
+
+ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
+ int (*fn)(struct stackframe *, void *), void *data)
+@@ -87,6 +89,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
+ break;
+ }
+ }
++NOKPROBE_SYMBOL(walk_stackframe);
+
+ #ifdef CONFIG_STACKTRACE
+ struct stack_trace_data {
+diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
+index d66613e6ad08..8a38ccf8dc02 100644
+--- a/arch/arm64/kvm/regmap.c
++++ b/arch/arm64/kvm/regmap.c
+@@ -178,13 +178,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
+ switch (spsr_idx) {
+ case KVM_SPSR_SVC:
+ write_sysreg_el1(v, spsr);
++ break;
+ case KVM_SPSR_ABT:
+ write_sysreg(v, spsr_abt);
++ break;
+ case KVM_SPSR_UND:
+ write_sysreg(v, spsr_und);
++ break;
+ case KVM_SPSR_IRQ:
+ write_sysreg(v, spsr_irq);
++ break;
+ case KVM_SPSR_FIQ:
+ write_sysreg(v, spsr_fiq);
++ break;
+ }
+ }
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 2d115016feb4..414b8e0f19e0 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -800,6 +800,53 @@ void __init hook_debug_fault_code(int nr,
+ debug_fault_info[nr].name = name;
+ }
+
++/*
++ * In debug exception context, we explicitly disable preemption despite
++ * having interrupts disabled.
++ * This serves two purposes: it makes it much less likely that we would
++ * accidentally schedule in exception context and it will force a warning
++ * if we somehow manage to schedule by accident.
++ */
++static void debug_exception_enter(struct pt_regs *regs)
++{
++ /*
++ * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
++ * already disabled to preserve the last enabled/disabled addresses.
++ */
++ if (interrupts_enabled(regs))
++ trace_hardirqs_off();
++
++ if (user_mode(regs)) {
++ RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
++ } else {
++ /*
++ * We might have interrupted pretty much anything. In
++ * fact, if we're a debug exception, we can even interrupt
++ * NMI processing. We don't want this code makes in_nmi()
++ * to return true, but we need to notify RCU.
++ */
++ rcu_nmi_enter();
++ }
++
++ preempt_disable();
++
++ /* This code is a bit fragile. Test it. */
++ RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
++}
++NOKPROBE_SYMBOL(debug_exception_enter);
++
++static void debug_exception_exit(struct pt_regs *regs)
++{
++ preempt_enable_no_resched();
++
++ if (!user_mode(regs))
++ rcu_nmi_exit();
++
++ if (interrupts_enabled(regs))
++ trace_hardirqs_on();
++}
++NOKPROBE_SYMBOL(debug_exception_exit);
++
+ #ifdef CONFIG_ARM64_ERRATUM_1463225
+ DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+@@ -840,12 +887,7 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
+ if (cortex_a76_erratum_1463225_debug_handler(regs))
+ return;
+
+- /*
+- * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
+- * already disabled to preserve the last enabled/disabled addresses.
+- */
+- if (interrupts_enabled(regs))
+- trace_hardirqs_off();
++ debug_exception_enter(regs);
+
+ if (user_mode(regs) && !is_ttbr0_addr(pc))
+ arm64_apply_bp_hardening();
+@@ -855,7 +897,6 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
+ inf->sig, inf->code, (void __user *)pc, esr);
+ }
+
+- if (interrupts_enabled(regs))
+- trace_hardirqs_on();
++ debug_exception_exit(regs);
+ }
+ NOKPROBE_SYMBOL(do_debug_exception);
+diff --git a/arch/mips/vdso/vdso.h b/arch/mips/vdso/vdso.h
+index 14b1931be69c..b65b169778e3 100644
+--- a/arch/mips/vdso/vdso.h
++++ b/arch/mips/vdso/vdso.h
+@@ -9,6 +9,7 @@
+ #if _MIPS_SIM != _MIPS_SIM_ABI64 && defined(CONFIG_64BIT)
+
+ /* Building 32-bit VDSO for the 64-bit kernel. Fake a 32-bit Kconfig. */
++#define BUILD_VDSO32_64
+ #undef CONFIG_64BIT
+ #define CONFIG_32BIT 1
+ #ifndef __ASSEMBLY__
+diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
+index dad9825e4087..3c17fc7c2b93 100644
+--- a/arch/powerpc/platforms/pseries/papr_scm.c
++++ b/arch/powerpc/platforms/pseries/papr_scm.c
+@@ -199,12 +199,32 @@ static const struct attribute_group *papr_scm_dimm_groups[] = {
+ NULL,
+ };
+
++static inline int papr_scm_node(int node)
++{
++ int min_dist = INT_MAX, dist;
++ int nid, min_node;
++
++ if ((node == NUMA_NO_NODE) || node_online(node))
++ return node;
++
++ min_node = first_online_node;
++ for_each_online_node(nid) {
++ dist = node_distance(node, nid);
++ if (dist < min_dist) {
++ min_dist = dist;
++ min_node = nid;
++ }
++ }
++ return min_node;
++}
++
+ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
+ {
+ struct device *dev = &p->pdev->dev;
+ struct nd_mapping_desc mapping;
+ struct nd_region_desc ndr_desc;
+ unsigned long dimm_flags;
++ int target_nid, online_nid;
+
+ p->bus_desc.ndctl = papr_scm_ndctl;
+ p->bus_desc.module = THIS_MODULE;
+@@ -243,8 +263,10 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
+
+ memset(&ndr_desc, 0, sizeof(ndr_desc));
+ ndr_desc.attr_groups = region_attr_groups;
+- ndr_desc.numa_node = dev_to_node(&p->pdev->dev);
+- ndr_desc.target_node = ndr_desc.numa_node;
++ target_nid = dev_to_node(&p->pdev->dev);
++ online_nid = papr_scm_node(target_nid);
++ ndr_desc.numa_node = online_nid;
++ ndr_desc.target_node = target_nid;
+ ndr_desc.res = &p->res;
+ ndr_desc.of_node = p->dn;
+ ndr_desc.provider_data = p;
+@@ -259,6 +281,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
+ ndr_desc.res, p->dn);
+ goto err;
+ }
++ if (target_nid != online_nid)
++ dev_info(dev, "Region registered with target node %d and online node %d",
++ target_nid, online_nid);
+
+ return 0;
+
+diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
+index 853b65ef656d..f0227bdce0f0 100644
+--- a/arch/riscv/include/asm/switch_to.h
++++ b/arch/riscv/include/asm/switch_to.h
+@@ -16,7 +16,13 @@ extern void __fstate_restore(struct task_struct *restore_from);
+
+ static inline void __fstate_clean(struct pt_regs *regs)
+ {
+- regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
++ regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
++}
++
++static inline void fstate_off(struct task_struct *task,
++ struct pt_regs *regs)
++{
++ regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF;
+ }
+
+ static inline void fstate_save(struct task_struct *task,
+diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
+index f23794bd1e90..fb3a082362eb 100644
+--- a/arch/riscv/kernel/process.c
++++ b/arch/riscv/kernel/process.c
+@@ -64,8 +64,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
+ unsigned long sp)
+ {
+ regs->sstatus = SR_SPIE;
+- if (has_fpu)
++ if (has_fpu) {
+ regs->sstatus |= SR_FS_INITIAL;
++ /*
++ * Restore the initial value to the FP register
++ * before starting the user program.
++ */
++ fstate_restore(current, regs);
++ }
+ regs->sepc = pc;
+ regs->sp = sp;
+ set_fs(USER_DS);
+@@ -75,10 +81,11 @@ void flush_thread(void)
+ {
+ #ifdef CONFIG_FPU
+ /*
+- * Reset FPU context
++ * Reset FPU state and context
+ * frm: round to nearest, ties to even (IEEE default)
+ * fflags: accrued exceptions cleared
+ */
++ fstate_off(current, task_pt_regs(current));
+ memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate));
+ #endif
+ }
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
+index f1d6ffe43e42..49a5852fd07d 100644
+--- a/arch/riscv/kernel/vdso/Makefile
++++ b/arch/riscv/kernel/vdso/Makefile
+@@ -37,7 +37,7 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
+ # these symbols in the kernel code rather than hand-coded addresses.
+
+ SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
+- -Wl,--hash-style=both
++ -Wl,--build-id -Wl,--hash-style=both
+ $(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
+ $(call if_changed,vdsold)
+
+diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
+index bc96b16288c1..af6a65ac04cf 100644
+--- a/arch/sh/kernel/hw_breakpoint.c
++++ b/arch/sh/kernel/hw_breakpoint.c
+@@ -157,6 +157,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
+ switch (sh_type) {
+ case SH_BREAKPOINT_READ:
+ *gen_type = HW_BREAKPOINT_R;
++ break;
+ case SH_BREAKPOINT_WRITE:
+ *gen_type = HW_BREAKPOINT_W;
+ break;
+diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
+index 176cb46bcf12..0634bfb82a0b 100644
+--- a/arch/xtensa/kernel/setup.c
++++ b/arch/xtensa/kernel/setup.c
+@@ -515,6 +515,7 @@ void cpu_reset(void)
+ "add %2, %2, %7\n\t"
+ "addi %0, %0, -1\n\t"
+ "bnez %0, 1b\n\t"
++ "isync\n\t"
+ /* Jump to identity mapping */
+ "jx %3\n"
+ "2:\n\t"
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index ce0f5f4ede70..68106a41f90d 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2674,8 +2674,6 @@ void blk_mq_release(struct request_queue *q)
+ struct blk_mq_hw_ctx *hctx, *next;
+ int i;
+
+- cancel_delayed_work_sync(&q->requeue_work);
+-
+ queue_for_each_hw_ctx(q, hctx, i)
+ WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
+
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index 977c659dcd18..9bfa3ea4ed63 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -892,6 +892,9 @@ static void __blk_release_queue(struct work_struct *work)
+
+ blk_free_queue_stats(q->stats);
+
++ if (queue_is_mq(q))
++ cancel_delayed_work_sync(&q->requeue_work);
++
+ blk_exit_queue(q);
+
+ blk_queue_free_zone_bitmaps(q);
+diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
+index 72312ad2e142..c25cdbf817f1 100644
+--- a/drivers/ata/libahci_platform.c
++++ b/drivers/ata/libahci_platform.c
+@@ -338,6 +338,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
+ hpriv->phys[port] = NULL;
+ rc = 0;
+ break;
++ case -EPROBE_DEFER:
++ /* Do not complain yet */
++ break;
+
+ default:
+ dev_err(dev,
+diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
+index 173e6f2dd9af..eefda51f97d3 100644
+--- a/drivers/ata/libata-zpodd.c
++++ b/drivers/ata/libata-zpodd.c
+@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
+ unsigned int ret;
+ struct rm_feature_desc *desc;
+ struct ata_taskfile tf;
+- static const char cdb[] = { GPCMD_GET_CONFIGURATION,
++ static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION,
+ 2, /* only 1 feature descriptor requested */
+ 0, 3, /* 3, removable medium feature */
+ 0, 0, 0,/* reserved */
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index bf868260f435..4838c6a9f0f2 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -554,6 +554,20 @@ static int tpm_add_hwrng(struct tpm_chip *chip)
+ return hwrng_register(&chip->hwrng);
+ }
+
++static int tpm_get_pcr_allocation(struct tpm_chip *chip)
++{
++ int rc;
++
++ rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ?
++ tpm2_get_pcr_allocation(chip) :
++ tpm1_get_pcr_allocation(chip);
++
++ if (rc > 0)
++ return -ENODEV;
++
++ return rc;
++}
++
+ /*
+ * tpm_chip_register() - create a character device for the TPM chip
+ * @chip: TPM chip to use.
+@@ -573,6 +587,12 @@ int tpm_chip_register(struct tpm_chip *chip)
+ if (rc)
+ return rc;
+ rc = tpm_auto_startup(chip);
++ if (rc) {
++ tpm_chip_stop(chip);
++ return rc;
++ }
++
++ rc = tpm_get_pcr_allocation(chip);
+ tpm_chip_stop(chip);
+ if (rc)
+ return rc;
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+index e503ffc3aa39..a7fea3e0ca86 100644
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -394,6 +394,7 @@ int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf);
+ ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
+ const char *desc, size_t min_cap_length);
+ int tpm1_get_random(struct tpm_chip *chip, u8 *out, size_t max);
++int tpm1_get_pcr_allocation(struct tpm_chip *chip);
+ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+ int tpm_pm_suspend(struct device *dev);
+ int tpm_pm_resume(struct device *dev);
+@@ -449,6 +450,7 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
+ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
+ u32 *value, const char *desc);
+
++ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip);
+ int tpm2_auto_startup(struct tpm_chip *chip);
+ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
+ unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
+index faacbe1ffa1a..149e953ca369 100644
+--- a/drivers/char/tpm/tpm1-cmd.c
++++ b/drivers/char/tpm/tpm1-cmd.c
+@@ -699,18 +699,6 @@ int tpm1_auto_startup(struct tpm_chip *chip)
+ goto out;
+ }
+
+- chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
+- GFP_KERNEL);
+- if (!chip->allocated_banks) {
+- rc = -ENOMEM;
+- goto out;
+- }
+-
+- chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
+- chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
+- chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
+- chip->nr_allocated_banks = 1;
+-
+ return rc;
+ out:
+ if (rc > 0)
+@@ -779,3 +767,27 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr)
+ return rc;
+ }
+
++/**
++ * tpm1_get_pcr_allocation() - initialize the allocated bank
++ * @chip: TPM chip to use.
++ *
++ * The function initializes the SHA1 allocated bank to extend PCR
++ *
++ * Return:
++ * * 0 on success,
++ * * < 0 on error.
++ */
++int tpm1_get_pcr_allocation(struct tpm_chip *chip)
++{
++ chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
++ GFP_KERNEL);
++ if (!chip->allocated_banks)
++ return -ENOMEM;
++
++ chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
++ chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
++ chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
++ chip->nr_allocated_banks = 1;
++
++ return 0;
++}
+diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
+index d103545e4055..ba9acae83bff 100644
+--- a/drivers/char/tpm/tpm2-cmd.c
++++ b/drivers/char/tpm/tpm2-cmd.c
+@@ -840,7 +840,7 @@ struct tpm2_pcr_selection {
+ u8 pcr_select[3];
+ } __packed;
+
+-static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
++ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
+ {
+ struct tpm2_pcr_selection pcr_selection;
+ struct tpm_buf buf;
+@@ -1040,10 +1040,6 @@ int tpm2_auto_startup(struct tpm_chip *chip)
+ goto out;
+ }
+
+- rc = tpm2_get_pcr_allocation(chip);
+- if (rc)
+- goto out;
+-
+ rc = tpm2_get_cc_attrs_tbl(chip);
+
+ out:
+diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
+index 44db83a6d01c..44a46dcc0518 100644
+--- a/drivers/clk/at91/clk-generated.c
++++ b/drivers/clk/at91/clk-generated.c
+@@ -141,6 +141,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
+ continue;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
++ if (div > GENERATED_MAX_DIV + 1)
++ div = GENERATED_MAX_DIV + 1;
+
+ clk_generated_best_diff(req, parent, parent_rate, div,
+ &best_diff, &best_rate);
+diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
+index 0201809bbd37..9dfa28d6fd9f 100644
+--- a/drivers/clk/renesas/renesas-cpg-mssr.c
++++ b/drivers/clk/renesas/renesas-cpg-mssr.c
+@@ -576,17 +576,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
+ unsigned int reg = id / 32;
+ unsigned int bit = id % 32;
+ u32 bitmask = BIT(bit);
+- unsigned long flags;
+- u32 value;
+
+ dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
+
+ /* Reset module */
+- spin_lock_irqsave(&priv->rmw_lock, flags);
+- value = readl(priv->base + SRCR(reg));
+- value |= bitmask;
+- writel(value, priv->base + SRCR(reg));
+- spin_unlock_irqrestore(&priv->rmw_lock, flags);
++ writel(bitmask, priv->base + SRCR(reg));
+
+ /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
+ udelay(35);
+@@ -603,16 +597,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
+ unsigned int reg = id / 32;
+ unsigned int bit = id % 32;
+ u32 bitmask = BIT(bit);
+- unsigned long flags;
+- u32 value;
+
+ dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
+
+- spin_lock_irqsave(&priv->rmw_lock, flags);
+- value = readl(priv->base + SRCR(reg));
+- value |= bitmask;
+- writel(value, priv->base + SRCR(reg));
+- spin_unlock_irqrestore(&priv->rmw_lock, flags);
++ writel(bitmask, priv->base + SRCR(reg));
+ return 0;
+ }
+
+diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
+index 91d3d721c801..3c219af25100 100644
+--- a/drivers/clk/sprd/Kconfig
++++ b/drivers/clk/sprd/Kconfig
+@@ -3,6 +3,7 @@ config SPRD_COMMON_CLK
+ tristate "Clock support for Spreadtrum SoCs"
+ depends on ARCH_SPRD || COMPILE_TEST
+ default ARCH_SPRD
++ select REGMAP_MMIO
+
+ if SPRD_COMMON_CLK
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 4b192e0ce92f..ed7977d0dd01 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1148,7 +1148,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ adev->asic_type != CHIP_FIJI &&
+ adev->asic_type != CHIP_POLARIS10 &&
+ adev->asic_type != CHIP_POLARIS11 &&
+- adev->asic_type != CHIP_POLARIS12) ?
++ adev->asic_type != CHIP_POLARIS12 &&
++ adev->asic_type != CHIP_VEGAM) ?
+ VI_BO_SIZE_ALIGN : 1;
+
+ mapping_flags = AMDGPU_VM_PAGE_READABLE;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 2f6239b6be6f..fe028561dc0e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -1093,29 +1093,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
+ return r;
+ }
+
+- fence = amdgpu_ctx_get_fence(ctx, entity,
+- deps[i].handle);
++ fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
++ amdgpu_ctx_put(ctx);
++
++ if (IS_ERR(fence))
++ return PTR_ERR(fence);
++ else if (!fence)
++ continue;
+
+ if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
+- struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
++ struct drm_sched_fence *s_fence;
+ struct dma_fence *old = fence;
+
++ s_fence = to_drm_sched_fence(fence);
+ fence = dma_fence_get(&s_fence->scheduled);
+ dma_fence_put(old);
+ }
+
+- if (IS_ERR(fence)) {
+- r = PTR_ERR(fence);
+- amdgpu_ctx_put(ctx);
++ r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
++ dma_fence_put(fence);
++ if (r)
+ return r;
+- } else if (fence) {
+- r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
+- true);
+- dma_fence_put(fence);
+- amdgpu_ctx_put(ctx);
+- if (r)
+- return r;
+- }
+ }
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index 8930d66f2204..91bfb24f963e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -703,7 +703,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
+ thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
+ bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
+
+- data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
++ data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index abeaab4bf1bc..d55519bc34e5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -144,12 +144,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
+ struct amdgpu_device *adev = ddev->dev_private;
+ enum amd_pm_state_type pm;
+
+- if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state)
+- pm = amdgpu_smu_get_current_power_state(adev);
+- else if (adev->powerplay.pp_funcs->get_current_power_state)
++ if (is_support_sw_smu(adev)) {
++ if (adev->smu.ppt_funcs->get_current_power_state)
++ pm = amdgpu_smu_get_current_power_state(adev);
++ else
++ pm = adev->pm.dpm.user_state;
++ } else if (adev->powerplay.pp_funcs->get_current_power_state) {
+ pm = amdgpu_dpm_get_current_power_state(adev);
+- else
++ } else {
+ pm = adev->pm.dpm.user_state;
++ }
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
+@@ -176,7 +180,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
+ goto fail;
+ }
+
+- if (adev->powerplay.pp_funcs->dispatch_tasks) {
++ if (is_support_sw_smu(adev)) {
++ mutex_lock(&adev->pm.mutex);
++ adev->pm.dpm.user_state = state;
++ mutex_unlock(&adev->pm.mutex);
++ } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
+ } else {
+ mutex_lock(&adev->pm.mutex);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index 2f18c64d531f..2f7f0a2e4a6c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -4553,7 +4553,7 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
+ value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
+ value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
+ value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+- WREG32(mmSQ_CMD, value);
++ WREG32_SOC15(GC, 0, mmSQ_CMD, value);
+ }
+
+ static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
+diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+index eec329ab6037..61a6d183c153 100644
+--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+@@ -63,7 +63,8 @@ int smu_get_power_num_states(struct smu_context *smu,
+
+ /* not support power state */
+ memset(state_info, 0, sizeof(struct pp_states_info));
+- state_info->nums = 0;
++ state_info->nums = 1;
++ state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
+
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
+index ee777469293a..e4e22bbae2a7 100644
+--- a/drivers/gpu/drm/bridge/Kconfig
++++ b/drivers/gpu/drm/bridge/Kconfig
+@@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC
+ config DRM_LVDS_ENCODER
+ tristate "Transparent parallel to LVDS encoder support"
+ depends on OF
++ select DRM_KMS_HELPER
+ select DRM_PANEL_BRIDGE
+ help
+ Support for transparent parallel to LVDS encoders that don't require
+@@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024
+
+ config DRM_TOSHIBA_TC358764
+ tristate "TC358764 DSI/LVDS bridge"
+- depends on DRM && DRM_PANEL
+ depends on OF
+ select DRM_MIPI_DSI
++ select DRM_KMS_HELPER
++ select DRM_PANEL
+ help
+ Toshiba TC358764 DSI/LVDS bridge driver.
+
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+index ec9c1b7d3103..8989f8af716b 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+@@ -94,12 +94,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
+ scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
+ do {
+ cpu_relax();
+- } while (retry > 1 &&
++ } while (--retry > 1 &&
+ scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
+ do {
+ cpu_relax();
+ scaler_write(1, SCALER_INT_EN);
+- } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
++ } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+
+ return retry ? 0 : -EIO;
+ }
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 4a0fe8a25ad7..a56eef3cfee7 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -1267,7 +1267,8 @@ static int add_gpu_components(struct device *dev,
+ if (!np)
+ return 0;
+
+- drm_of_component_match_add(dev, matchptr, compare_of, np);
++ if (of_device_is_available(np))
++ drm_of_component_match_add(dev, matchptr, compare_of, np);
+
+ of_node_put(np);
+
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index 847b7866137d..bdaf5ffd2504 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -766,16 +766,20 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
+ struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
+ int slots;
+
+- /* When restoring duplicated states, we need to make sure that the
+- * bw remains the same and avoid recalculating it, as the connector's
+- * bpc may have changed after the state was duplicated
+- */
+- if (!state->duplicated)
+- asyh->dp.pbn =
+- drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
+- connector->display_info.bpc * 3);
++ if (crtc_state->mode_changed || crtc_state->connectors_changed) {
++ /*
++ * When restoring duplicated states, we need to make sure that
++ * the bw remains the same and avoid recalculating it, as the
++ * connector's bpc may have changed after the state was
++ * duplicated
++ */
++ if (!state->duplicated) {
++ const int bpp = connector->display_info.bpc * 3;
++ const int clock = crtc_state->adjusted_mode.clock;
++
++ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
++ }
+
+- if (crtc_state->mode_changed) {
+ slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
+ mstc->port,
+ asyh->dp.pbn);
+diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
+index b3d502421b79..0a38e8e9bc78 100644
+--- a/drivers/hid/hid-holtek-kbd.c
++++ b/drivers/hid/hid-holtek-kbd.c
+@@ -123,9 +123,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
+
+ /* Locate the boot interface, to receive the LED change events */
+ struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
++ struct hid_device *boot_hid;
++ struct hid_input *boot_hid_input;
+
+- struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
+- struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
++ if (unlikely(boot_interface == NULL))
++ return -ENODEV;
++
++ boot_hid = usb_get_intfdata(boot_interface);
++ boot_hid_input = list_first_entry(&boot_hid->inputs,
+ struct hid_input, list);
+
+ return boot_hid_input->input->event(boot_hid_input->input, type, code,
+diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
+index 55b72573066b..4e11cc6fc34b 100644
+--- a/drivers/hid/usbhid/hiddev.c
++++ b/drivers/hid/usbhid/hiddev.c
+@@ -284,6 +284,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
+ spin_unlock_irq(&list->hiddev->list_lock);
+
+ mutex_lock(&hiddev->existancelock);
++ /*
++ * recheck exist with existance lock held to
++ * avoid opening a disconnected device
++ */
++ if (!list->hiddev->exist) {
++ res = -ENODEV;
++ goto bail_unlock;
++ }
+ if (!list->hiddev->open++)
+ if (list->hiddev->exist) {
+ struct hid_device *hid = hiddev->hid;
+@@ -300,6 +308,10 @@ bail_normal_power:
+ hid_hw_power(hid, PM_HINT_NORMAL);
+ bail_unlock:
+ mutex_unlock(&hiddev->existancelock);
++
++ spin_lock_irq(&list->hiddev->list_lock);
++ list_del(&list->node);
++ spin_unlock_irq(&list->hiddev->list_lock);
+ bail:
+ file->private_data = NULL;
+ vfree(list);
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index fd70b110e8f4..87564010ddbe 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -273,8 +273,8 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx,
+ }
+
+ /* Functions for DMA support */
+-static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
+- dma_addr_t phy_addr)
++static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
++ dma_addr_t phy_addr)
+ {
+ struct imx_i2c_dma *dma;
+ struct dma_slave_config dma_sconfig;
+@@ -283,7 +283,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
+
+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+- return -ENOMEM;
++ return;
+
+ dma->chan_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(dma->chan_tx)) {
+@@ -328,7 +328,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
+ dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
+ dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
+
+- return 0;
++ return;
+
+ fail_rx:
+ dma_release_channel(dma->chan_rx);
+@@ -336,8 +336,6 @@ fail_tx:
+ dma_release_channel(dma->chan_tx);
+ fail_al:
+ devm_kfree(dev, dma);
+- /* return successfully if there is no dma support */
+- return ret == -ENODEV ? 0 : ret;
+ }
+
+ static void i2c_imx_dma_callback(void *arg)
+@@ -1165,17 +1163,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
+ dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res);
+ dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
+ i2c_imx->adapter.name);
++ dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
+
+ /* Init DMA config if supported */
+- ret = i2c_imx_dma_request(i2c_imx, phy_addr);
+- if (ret < 0)
+- goto del_adapter;
++ i2c_imx_dma_request(i2c_imx, phy_addr);
+
+- dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
+ return 0; /* Return OK */
+
+-del_adapter:
+- i2c_del_adapter(&i2c_imx->adapter);
+ clk_notifier_unregister:
+ clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
+ rpm_disable:
+diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
+index 0e3c6529fc4c..da073d72f649 100644
+--- a/drivers/iio/adc/max9611.c
++++ b/drivers/iio/adc/max9611.c
+@@ -480,7 +480,7 @@ static int max9611_init(struct max9611_dev *max9611)
+ if (ret)
+ return ret;
+
+- regval = ret & MAX9611_TEMP_MASK;
++ regval &= MAX9611_TEMP_MASK;
+
+ if ((regval > MAX9611_TEMP_MAX_POS &&
+ regval < MAX9611_TEMP_MIN_NEG) ||
+diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
+index ff40a450b5d2..ff9e0d7fb4f3 100644
+--- a/drivers/infiniband/core/core_priv.h
++++ b/drivers/infiniband/core/core_priv.h
+@@ -292,7 +292,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
+ struct ib_udata *udata,
+ struct ib_uobject *uobj)
+ {
++ enum ib_qp_type qp_type = attr->qp_type;
+ struct ib_qp *qp;
++ bool is_xrc;
+
+ if (!dev->ops.create_qp)
+ return ERR_PTR(-EOPNOTSUPP);
+@@ -310,7 +312,8 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
+ * and more importantly they are created internaly by driver,
+ * see mlx5 create_dev_resources() as an example.
+ */
+- if (attr->qp_type < IB_QPT_XRC_INI) {
++ is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT;
++ if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) {
+ qp->res.type = RDMA_RESTRACK_QP;
+ if (uobj)
+ rdma_restrack_uadd(&qp->res);
+diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
+index cc99479b2c09..9947d16edef2 100644
+--- a/drivers/infiniband/core/mad.c
++++ b/drivers/infiniband/core/mad.c
+@@ -3224,18 +3224,18 @@ static int ib_mad_port_open(struct ib_device *device,
+ if (has_smi)
+ cq_size *= 2;
+
++ port_priv->pd = ib_alloc_pd(device, 0);
++ if (IS_ERR(port_priv->pd)) {
++ dev_err(&device->dev, "Couldn't create ib_mad PD\n");
++ ret = PTR_ERR(port_priv->pd);
++ goto error3;
++ }
++
+ port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
+ IB_POLL_UNBOUND_WORKQUEUE);
+ if (IS_ERR(port_priv->cq)) {
+ dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
+ ret = PTR_ERR(port_priv->cq);
+- goto error3;
+- }
+-
+- port_priv->pd = ib_alloc_pd(device, 0);
+- if (IS_ERR(port_priv->pd)) {
+- dev_err(&device->dev, "Couldn't create ib_mad PD\n");
+- ret = PTR_ERR(port_priv->pd);
+ goto error4;
+ }
+
+@@ -3278,11 +3278,11 @@ error8:
+ error7:
+ destroy_mad_qp(&port_priv->qp_info[0]);
+ error6:
+- ib_dealloc_pd(port_priv->pd);
+-error4:
+ ib_free_cq(port_priv->cq);
+ cleanup_recv_queue(&port_priv->qp_info[1]);
+ cleanup_recv_queue(&port_priv->qp_info[0]);
++error4:
++ ib_dealloc_pd(port_priv->pd);
+ error3:
+ kfree(port_priv);
+
+@@ -3312,8 +3312,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
+ destroy_workqueue(port_priv->wq);
+ destroy_mad_qp(&port_priv->qp_info[1]);
+ destroy_mad_qp(&port_priv->qp_info[0]);
+- ib_dealloc_pd(port_priv->pd);
+ ib_free_cq(port_priv->cq);
++ ib_dealloc_pd(port_priv->pd);
+ cleanup_recv_queue(&port_priv->qp_info[1]);
+ cleanup_recv_queue(&port_priv->qp_info[0]);
+ /* XXX: Handle deallocation of MAD registration tables */
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index 671f07ba1fad..025b6d86a61f 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -49,6 +49,7 @@
+ #include <linux/sched.h>
+ #include <linux/semaphore.h>
+ #include <linux/slab.h>
++#include <linux/nospec.h>
+
+ #include <linux/uaccess.h>
+
+@@ -883,11 +884,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
+
+ if (get_user(id, arg))
+ return -EFAULT;
++ if (id >= IB_UMAD_MAX_AGENTS)
++ return -EINVAL;
+
+ mutex_lock(&file->port->file_mutex);
+ mutex_lock(&file->mutex);
+
+- if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
++ id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
++ if (!__get_agent(file, id)) {
+ ret = -EINVAL;
+ goto out;
+ }
+diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
+index 0c6c1fe87705..d60453e98db7 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_db.c
++++ b/drivers/infiniband/hw/hns/hns_roce_db.c
+@@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
+ struct ib_udata *udata, unsigned long virt,
+ struct hns_roce_db *db)
+ {
++ unsigned long page_addr = virt & PAGE_MASK;
+ struct hns_roce_user_db_page *page;
++ unsigned int offset;
+ int ret = 0;
+
+ mutex_lock(&context->page_mutex);
+
+ list_for_each_entry(page, &context->page_list, list)
+- if (page->user_virt == (virt & PAGE_MASK))
++ if (page->user_virt == page_addr)
+ goto found;
+
+ page = kmalloc(sizeof(*page), GFP_KERNEL);
+@@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
+ }
+
+ refcount_set(&page->refcount, 1);
+- page->user_virt = (virt & PAGE_MASK);
+- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
++ page->user_virt = page_addr;
++ page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
+ if (IS_ERR(page->umem)) {
+ ret = PTR_ERR(page->umem);
+ kfree(page);
+@@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
+ list_add(&page->list, &context->page_list);
+
+ found:
+- db->dma = sg_dma_address(page->umem->sg_head.sgl) +
+- (virt & ~PAGE_MASK);
+- page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
+- db->virt_addr = sg_virt(page->umem->sg_head.sgl);
++ offset = virt - page_addr;
++ db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
++ db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
+ db->u.user_page = page;
+ refcount_inc(&page->refcount);
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+index e068a02122f5..9496c69fff3a 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+@@ -745,8 +745,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
+
+ ibdev = &hr_dev->ib_dev;
+ pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
+- if (!pd)
++ if (!pd) {
++ ret = -ENOMEM;
+ goto alloc_mem_failed;
++ }
+
+ pd->device = ibdev;
+ ret = hns_roce_alloc_pd(pd, NULL);
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index a6713a3b6c80..9ab276a8bc81 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -5687,13 +5687,12 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
+ return;
+ }
+
+- if (mpi->mdev_events.notifier_call)
+- mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
+- mpi->mdev_events.notifier_call = NULL;
+-
+ mpi->ibdev = NULL;
+
+ spin_unlock(&port->mp.mpi_lock);
++ if (mpi->mdev_events.notifier_call)
++ mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
++ mpi->mdev_events.notifier_call = NULL;
+ mlx5_remove_netdev_notifier(ibdev, port_num);
+ spin_lock(&port->mp.mpi_lock);
+
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index e54bec2c2965..d239fc58c002 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
+ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
+ static int mr_cache_max_order(struct mlx5_ib_dev *dev);
+ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
+-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
+-{
+- return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
+-}
+
+ static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
+ {
+ return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
+ }
+
+-static bool use_umr(struct mlx5_ib_dev *dev, int order)
+-{
+- return order <= mr_cache_max_order(dev) &&
+- umr_can_modify_entity_size(dev);
+-}
+-
+ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+ {
+ int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
+@@ -1271,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ {
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_mr *mr = NULL;
+- bool populate_mtts = false;
++ bool use_umr;
+ struct ib_umem *umem;
+ int page_shift;
+ int npages;
+@@ -1303,29 +1293,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ if (err < 0)
+ return ERR_PTR(err);
+
+- if (use_umr(dev, order)) {
++ use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
++ (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
++ !MLX5_CAP_GEN(dev->mdev, atomic));
++
++ if (order <= mr_cache_max_order(dev) && use_umr) {
+ mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
+ page_shift, order, access_flags);
+ if (PTR_ERR(mr) == -EAGAIN) {
+ mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
+ mr = NULL;
+ }
+- populate_mtts = false;
+ } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
+ if (access_flags & IB_ACCESS_ON_DEMAND) {
+ err = -EINVAL;
+ pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
+ goto error;
+ }
+- populate_mtts = true;
++ use_umr = false;
+ }
+
+ if (!mr) {
+- if (!umr_can_modify_entity_size(dev))
+- populate_mtts = true;
+ mutex_lock(&dev->slow_path_mutex);
+ mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
+- page_shift, access_flags, populate_mtts);
++ page_shift, access_flags, !use_umr);
+ mutex_unlock(&dev->slow_path_mutex);
+ }
+
+@@ -1341,7 +1332,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+
+ update_odp_mr(mr);
+
+- if (!populate_mtts) {
++ if (use_umr) {
+ int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
+
+ if (access_flags & IB_ACCESS_ON_DEMAND)
+diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
+index 91507a2e9290..f6e5351ba4d5 100644
+--- a/drivers/infiniband/hw/mlx5/odp.c
++++ b/drivers/infiniband/hw/mlx5/odp.c
+@@ -1765,7 +1765,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
+
+ num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
+ w->num_sge, 0);
+- kfree(w);
++ kvfree(w);
+ }
+
+ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
+@@ -1807,7 +1807,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
+ if (valid_req)
+ queue_work(system_unbound_wq, &work->work);
+ else
+- kfree(work);
++ kvfree(work);
+
+ srcu_read_unlock(&dev->mr_srcu, srcu_key);
+
+diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
+index 083c2c00a8e9..dfdd1e16de7f 100644
+--- a/drivers/infiniband/hw/qedr/main.c
++++ b/drivers/infiniband/hw/qedr/main.c
+@@ -125,14 +125,20 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ struct qedr_dev *dev =
+ rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
+
+- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
++ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver);
+ }
+ static DEVICE_ATTR_RO(hw_rev);
+
+ static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+ {
+- return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
++ struct qedr_dev *dev =
++ rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
++
++ return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n",
++ dev->pdev->device,
++ rdma_protocol_iwarp(&dev->ibdev, 1) ?
++ "iWARP" : "RoCE");
+ }
+ static DEVICE_ATTR_RO(hca_type);
+
+diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
+index f1569ae8381b..a0a686f56ac4 100644
+--- a/drivers/input/joystick/iforce/iforce-usb.c
++++ b/drivers/input/joystick/iforce/iforce-usb.c
+@@ -129,7 +129,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
+ return -ENODEV;
+
+ epirq = &interface->endpoint[0].desc;
++ if (!usb_endpoint_is_int_in(epirq))
++ return -ENODEV;
++
+ epout = &interface->endpoint[1].desc;
++ if (!usb_endpoint_is_int_out(epout))
++ return -ENODEV;
+
+ if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL)))
+ goto fail;
+diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
+index 0afffe8d824f..77110f3ec21d 100644
+--- a/drivers/input/mouse/trackpoint.h
++++ b/drivers/input/mouse/trackpoint.h
+@@ -158,7 +158,8 @@ struct trackpoint_data {
+ #ifdef CONFIG_MOUSE_PS2_TRACKPOINT
+ int trackpoint_detect(struct psmouse *psmouse, bool set_properties);
+ #else
+-inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
++static inline int trackpoint_detect(struct psmouse *psmouse,
++ bool set_properties)
+ {
+ return -ENOSYS;
+ }
+diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
+index 04b85571f41e..aa577898e952 100644
+--- a/drivers/input/tablet/kbtab.c
++++ b/drivers/input/tablet/kbtab.c
+@@ -117,6 +117,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
++ endpoint = &intf->cur_altsetting->endpoint[0].desc;
++ if (!usb_endpoint_is_int_in(endpoint))
++ return -ENODEV;
++
+ kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!kbtab || !input_dev)
+@@ -155,8 +159,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
+ input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
+
+- endpoint = &intf->cur_altsetting->endpoint[0].desc;
+-
+ usb_fill_int_urb(kbtab->irq, dev,
+ usb_rcvintpipe(dev, endpoint->bEndpointAddress),
+ kbtab->data, 8,
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 35500801dc2b..20e5482d91b9 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -3010,7 +3010,7 @@ static int its_vpe_init(struct its_vpe *vpe)
+
+ if (!its_alloc_vpe_table(vpe_id)) {
+ its_vpe_id_free(vpe_id);
+- its_free_pending_table(vpe->vpt_page);
++ its_free_pending_table(vpt_page);
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
+index bf2237ac5d09..4f74c15c4755 100644
+--- a/drivers/irqchip/irq-imx-gpcv2.c
++++ b/drivers/irqchip/irq-imx-gpcv2.c
+@@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
+ .irq_unmask = imx_gpcv2_irq_unmask,
+ .irq_set_wake = imx_gpcv2_irq_set_wake,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
++ .irq_set_type = irq_chip_set_type_parent,
+ #ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ #endif
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index b0aab3a0a1bf..f183cadd14e3 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1113,6 +1113,8 @@ static void bond_compute_features(struct bonding *bond)
+ done:
+ bond_dev->vlan_features = vlan_features;
+ bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
++ NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_GSO_UDP_L4;
+ bond_dev->gso_max_segs = gso_max_segs;
+ netif_set_gso_max_size(bond_dev, gso_max_size);
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 4039a9599d79..9d582b3ebc88 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -3057,12 +3057,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
+ /* if VF indicate to PF this function is going down (PF will delete sp
+ * elements and clear initializations
+ */
+- if (IS_VF(bp))
++ if (IS_VF(bp)) {
++ bnx2x_clear_vlan_info(bp);
+ bnx2x_vfpf_close_vf(bp);
+- else if (unload_mode != UNLOAD_RECOVERY)
++ } else if (unload_mode != UNLOAD_RECOVERY) {
+ /* if this is a normal/close unload need to clean up chip*/
+ bnx2x_chip_cleanup(bp, unload_mode, keep_link);
+- else {
++ } else {
+ /* Send the UNLOAD_REQUEST to the MCP */
+ bnx2x_send_unload_req(bp, unload_mode);
+
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+index c2f6e44e9a3f..8b08cb18e363 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
+ void bnx2x_disable_close_the_gate(struct bnx2x *bp);
+ int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
+
++void bnx2x_clear_vlan_info(struct bnx2x *bp);
++
+ /**
+ * bnx2x_sp_event - handle ramrods completion.
+ *
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index 2cc14db8f0ec..192ff8d5da32 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -8482,11 +8482,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+ return rc;
+ }
+
++void bnx2x_clear_vlan_info(struct bnx2x *bp)
++{
++ struct bnx2x_vlan_entry *vlan;
++
++ /* Mark that hw forgot all entries */
++ list_for_each_entry(vlan, &bp->vlan_reg, link)
++ vlan->hw = false;
++
++ bp->vlan_cnt = 0;
++}
++
+ static int bnx2x_del_all_vlans(struct bnx2x *bp)
+ {
+ struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
+ unsigned long ramrod_flags = 0, vlan_flags = 0;
+- struct bnx2x_vlan_entry *vlan;
+ int rc;
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+@@ -8495,10 +8505,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
+ if (rc)
+ return rc;
+
+- /* Mark that hw forgot all entries */
+- list_for_each_entry(vlan, &bp->vlan_reg, link)
+- vlan->hw = false;
+- bp->vlan_cnt = 0;
++ bnx2x_clear_vlan_info(bp);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 7afae9d80e75..36fe4f161cf1 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2015,9 +2015,9 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
+ if (bnapi->events & BNXT_RX_EVENT) {
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+
+- bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ if (bnapi->events & BNXT_AGG_EVENT)
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
++ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ }
+ bnapi->events = 0;
+ }
+@@ -5011,6 +5011,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
+
+ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
+ {
++ bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
+ int i, rc = 0;
+ u32 type;
+
+@@ -5086,7 +5087,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
+ if (rc)
+ goto err_out;
+ bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
+- bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
++ /* If we have agg rings, post agg buffers first. */
++ if (!agg_rings)
++ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+@@ -5105,7 +5108,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
+ }
+ }
+
+- if (bp->flags & BNXT_FLAG_AGG_RINGS) {
++ if (agg_rings) {
+ type = HWRM_RING_ALLOC_AGG;
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+@@ -5121,6 +5124,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
+ bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
++ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
+ }
+ }
+@@ -6963,19 +6967,29 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
+ bnxt_hwrm_vnic_set_rss(bp, i, false);
+ }
+
+-static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
+- bool irq_re_init)
++static void bnxt_clear_vnic(struct bnxt *bp)
+ {
+- if (bp->vnic_info) {
+- bnxt_hwrm_clear_vnic_filter(bp);
++ if (!bp->vnic_info)
++ return;
++
++ bnxt_hwrm_clear_vnic_filter(bp);
++ if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
+ /* clear all RSS setting before free vnic ctx */
+ bnxt_hwrm_clear_vnic_rss(bp);
+ bnxt_hwrm_vnic_ctx_free(bp);
+- /* before free the vnic, undo the vnic tpa settings */
+- if (bp->flags & BNXT_FLAG_TPA)
+- bnxt_set_tpa(bp, false);
+- bnxt_hwrm_vnic_free(bp);
+ }
++ /* before free the vnic, undo the vnic tpa settings */
++ if (bp->flags & BNXT_FLAG_TPA)
++ bnxt_set_tpa(bp, false);
++ bnxt_hwrm_vnic_free(bp);
++ if (bp->flags & BNXT_FLAG_CHIP_P5)
++ bnxt_hwrm_vnic_ctx_free(bp);
++}
++
++static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
++ bool irq_re_init)
++{
++ bnxt_clear_vnic(bp);
+ bnxt_hwrm_ring_free(bp, close_path);
+ bnxt_hwrm_ring_grp_free(bp);
+ if (irq_re_init) {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+index 549c90d3e465..c05d663212b2 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+@@ -98,10 +98,13 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
+ if (idx)
+ req->dimensions = cpu_to_le16(1);
+
+- if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE))
++ if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
+ memcpy(data_addr, buf, bytesize);
+-
+- rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
++ rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
++ } else {
++ rc = hwrm_send_message_silent(bp, msg, msg_len,
++ HWRM_CMD_TIMEOUT);
++ }
+ if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
+ memcpy(buf, data_addr, bytesize);
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index a6c7baf38036..b761a2e28a10 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -2016,21 +2016,19 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
+ mutex_lock(&bp->hwrm_cmd_lock);
+ hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+- if (hwrm_err)
+- goto flash_pkg_exit;
+-
+- if (resp->error_code) {
++ if (hwrm_err) {
+ u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
+
+- if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
++ if (resp->error_code && error_code ==
++ NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
+ install.flags |= cpu_to_le16(
+ NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
+ hwrm_err = _hwrm_send_message(bp, &install,
+ sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+- if (hwrm_err)
+- goto flash_pkg_exit;
+ }
++ if (hwrm_err)
++ goto flash_pkg_exit;
+ }
+
+ if (resp->result) {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+index 44d6c5743fb9..434470a6b9f3 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+@@ -1236,7 +1236,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
+ static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
+ u16 src_fid)
+ {
+- flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
++ flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
+ }
+
+ static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
+@@ -1285,9 +1285,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
+ goto free_node;
+
+ bnxt_tc_set_src_fid(bp, flow, src_fid);
+-
+- if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
+- bnxt_tc_set_flow_dir(bp, flow, src_fid);
++ bnxt_tc_set_flow_dir(bp, flow, flow->src_fid);
+
+ if (!bnxt_tc_can_offload(bp, flow)) {
+ rc = -EOPNOTSUPP;
+@@ -1407,7 +1405,7 @@ static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
+ * 2. 15th bit of flow_handle must specify the flow
+ * direction (TX/RX).
+ */
+- if (flow_node->flow.dir == BNXT_DIR_RX)
++ if (flow_node->flow.l2_key.dir == BNXT_DIR_RX)
+ handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
+ CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
+ else
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+index 8a0968967bc5..8b0f1510bdc4 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+@@ -23,6 +23,9 @@ struct bnxt_tc_l2_key {
+ __be16 inner_vlan_tci;
+ __be16 ether_type;
+ u8 num_vlans;
++ u8 dir;
++#define BNXT_DIR_RX 1
++#define BNXT_DIR_TX 0
+ };
+
+ struct bnxt_tc_l3_key {
+@@ -98,9 +101,6 @@ struct bnxt_tc_flow {
+
+ /* flow applicable to pkts ingressing on this fid */
+ u16 src_fid;
+- u8 dir;
+-#define BNXT_DIR_RX 1
+-#define BNXT_DIR_TX 0
+ struct bnxt_tc_l2_key l2_key;
+ struct bnxt_tc_l2_key l2_mask;
+ struct bnxt_tc_l3_key l3_key;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index 6c01314e87b0..db3552f2d087 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -1187,7 +1187,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
+ err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
+ if (err) {
+ en_err(priv, "Failed to allocate RSS indirection QP\n");
+- goto rss_err;
++ goto qp_alloc_err;
+ }
+
+ rss_map->indir_qp->event = mlx4_en_sqp_event;
+@@ -1241,6 +1241,7 @@ indir_err:
+ MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
+ mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
+ mlx4_qp_free(mdev->dev, rss_map->indir_qp);
++qp_alloc_err:
+ kfree(rss_map->indir_qp);
+ rss_map->indir_qp = NULL;
+ rss_err:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+index f3d98748b211..c1caf14bc334 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+@@ -76,9 +76,6 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
+ u8 state;
+ int err;
+
+- if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+- return 0;
+-
+ err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
+ if (err) {
+ netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
+@@ -86,10 +83,8 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
+ return err;
+ }
+
+- if (state != MLX5_SQC_STATE_ERR) {
+- netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn);
+- return -EINVAL;
+- }
++ if (state != MLX5_SQC_STATE_ERR)
++ return 0;
+
+ mlx5e_tx_disable_queue(sq->txq);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+index 8657e0f26995..2c75b2752f58 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
+ return &arfs_t->rules_hash[bucket_idx];
+ }
+
+-static u8 arfs_get_ip_proto(const struct sk_buff *skb)
+-{
+- return (skb->protocol == htons(ETH_P_IP)) ?
+- ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
+-}
+-
+ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
+ u8 ip_proto, __be16 etype)
+ {
+@@ -602,31 +596,9 @@ out:
+ arfs_may_expire_flow(priv);
+ }
+
+-/* return L4 destination port from ip4/6 packets */
+-static __be16 arfs_get_dst_port(const struct sk_buff *skb)
+-{
+- char *transport_header;
+-
+- transport_header = skb_transport_header(skb);
+- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
+- return ((struct tcphdr *)transport_header)->dest;
+- return ((struct udphdr *)transport_header)->dest;
+-}
+-
+-/* return L4 source port from ip4/6 packets */
+-static __be16 arfs_get_src_port(const struct sk_buff *skb)
+-{
+- char *transport_header;
+-
+- transport_header = skb_transport_header(skb);
+- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
+- return ((struct tcphdr *)transport_header)->source;
+- return ((struct udphdr *)transport_header)->source;
+-}
+-
+ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
+ struct arfs_table *arfs_t,
+- const struct sk_buff *skb,
++ const struct flow_keys *fk,
+ u16 rxq, u32 flow_id)
+ {
+ struct arfs_rule *rule;
+@@ -641,19 +613,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
+ INIT_WORK(&rule->arfs_work, arfs_handle_work);
+
+ tuple = &rule->tuple;
+- tuple->etype = skb->protocol;
++ tuple->etype = fk->basic.n_proto;
++ tuple->ip_proto = fk->basic.ip_proto;
+ if (tuple->etype == htons(ETH_P_IP)) {
+- tuple->src_ipv4 = ip_hdr(skb)->saddr;
+- tuple->dst_ipv4 = ip_hdr(skb)->daddr;
++ tuple->src_ipv4 = fk->addrs.v4addrs.src;
++ tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
+ } else {
+- memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
++ memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
+ sizeof(struct in6_addr));
+- memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
++ memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
+ }
+- tuple->ip_proto = arfs_get_ip_proto(skb);
+- tuple->src_port = arfs_get_src_port(skb);
+- tuple->dst_port = arfs_get_dst_port(skb);
++ tuple->src_port = fk->ports.src;
++ tuple->dst_port = fk->ports.dst;
+
+ rule->flow_id = flow_id;
+ rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
+@@ -664,37 +636,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
+ return rule;
+ }
+
+-static bool arfs_cmp_ips(struct arfs_tuple *tuple,
+- const struct sk_buff *skb)
++static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
+ {
+- if (tuple->etype == htons(ETH_P_IP) &&
+- tuple->src_ipv4 == ip_hdr(skb)->saddr &&
+- tuple->dst_ipv4 == ip_hdr(skb)->daddr)
+- return true;
+- if (tuple->etype == htons(ETH_P_IPV6) &&
+- (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+- sizeof(struct in6_addr))) &&
+- (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+- sizeof(struct in6_addr))))
+- return true;
++ if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
++ return false;
++ if (tuple->etype != fk->basic.n_proto)
++ return false;
++ if (tuple->etype == htons(ETH_P_IP))
++ return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
++ tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
++ if (tuple->etype == htons(ETH_P_IPV6))
++ return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
++ sizeof(struct in6_addr)) &&
++ !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
++ sizeof(struct in6_addr));
+ return false;
+ }
+
+ static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
+- const struct sk_buff *skb)
++ const struct flow_keys *fk)
+ {
+ struct arfs_rule *arfs_rule;
+ struct hlist_head *head;
+- __be16 src_port = arfs_get_src_port(skb);
+- __be16 dst_port = arfs_get_dst_port(skb);
+
+- head = arfs_hash_bucket(arfs_t, src_port, dst_port);
++ head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
+ hlist_for_each_entry(arfs_rule, head, hlist) {
+- if (arfs_rule->tuple.src_port == src_port &&
+- arfs_rule->tuple.dst_port == dst_port &&
+- arfs_cmp_ips(&arfs_rule->tuple, skb)) {
++ if (arfs_cmp(&arfs_rule->tuple, fk))
+ return arfs_rule;
+- }
+ }
+
+ return NULL;
+@@ -707,20 +675,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct arfs_table *arfs_t;
+ struct arfs_rule *arfs_rule;
++ struct flow_keys fk;
++
++ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
++ return -EPROTONOSUPPORT;
+
+- if (skb->protocol != htons(ETH_P_IP) &&
+- skb->protocol != htons(ETH_P_IPV6))
++ if (fk.basic.n_proto != htons(ETH_P_IP) &&
++ fk.basic.n_proto != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+- arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
++ arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
+ if (!arfs_t)
+ return -EPROTONOSUPPORT;
+
+ spin_lock_bh(&arfs->arfs_lock);
+- arfs_rule = arfs_find_rule(arfs_t, skb);
++ arfs_rule = arfs_find_rule(arfs_t, &fk);
+ if (arfs_rule) {
+ if (arfs_rule->rxq == rxq_index) {
+ spin_unlock_bh(&arfs->arfs_lock);
+@@ -728,8 +700,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ }
+ arfs_rule->rxq = rxq_index;
+ } else {
+- arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
+- rxq_index, flow_id);
++ arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
+ if (!arfs_rule) {
+ spin_unlock_bh(&arfs->arfs_lock);
+ return -ENOMEM;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index f637d81f08bc..06f9bd6a45e3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1060,6 +1060,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
+ link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
+ mlx5e_port_speed2linkmodes(mdev, speed, !ext);
+
++ if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
++ autoneg != AUTONEG_ENABLE) {
++ netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
++ __func__);
++ err = -EINVAL;
++ goto out;
++ }
++
+ link_modes = link_modes & eproto.cap;
+ if (!link_modes) {
+ netdev_err(priv->netdev, "%s: Not supported link mode(s) requested",
+@@ -1317,6 +1325,9 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
++ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
++ return -EOPNOTSUPP;
++
+ if (pauseparam->autoneg)
+ return -EINVAL;
+
+diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
+index b509b941d5ca..6825254eb882 100644
+--- a/drivers/net/netdevsim/dev.c
++++ b/drivers/net/netdevsim/dev.c
+@@ -71,46 +71,47 @@ static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port)
+ debugfs_remove_recursive(nsim_dev_port->ddir);
+ }
+
++static struct net *nsim_devlink_net(struct devlink *devlink)
++{
++ return &init_net;
++}
++
+ static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv)
+ {
+- struct nsim_dev *nsim_dev = priv;
++ struct net *net = priv;
+
+- return nsim_fib_get_val(nsim_dev->fib_data,
+- NSIM_RESOURCE_IPV4_FIB, false);
++ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
+ }
+
+ static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv)
+ {
+- struct nsim_dev *nsim_dev = priv;
++ struct net *net = priv;
+
+- return nsim_fib_get_val(nsim_dev->fib_data,
+- NSIM_RESOURCE_IPV4_FIB_RULES, false);
++ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
+ }
+
+ static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv)
+ {
+- struct nsim_dev *nsim_dev = priv;
++ struct net *net = priv;
+
+- return nsim_fib_get_val(nsim_dev->fib_data,
+- NSIM_RESOURCE_IPV6_FIB, false);
++ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
+ }
+
+ static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv)
+ {
+- struct nsim_dev *nsim_dev = priv;
++ struct net *net = priv;
+
+- return nsim_fib_get_val(nsim_dev->fib_data,
+- NSIM_RESOURCE_IPV6_FIB_RULES, false);
++ return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
+ }
+
+ static int nsim_dev_resources_register(struct devlink *devlink)
+ {
+- struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ struct devlink_resource_size_params params = {
+ .size_max = (u64)-1,
+ .size_granularity = 1,
+ .unit = DEVLINK_RESOURCE_UNIT_ENTRY
+ };
++ struct net *net = nsim_devlink_net(devlink);
+ int err;
+ u64 n;
+
+@@ -124,8 +125,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
+ goto out;
+ }
+
+- n = nsim_fib_get_val(nsim_dev->fib_data,
+- NSIM_RESOURCE_IPV4_FIB, true);
++ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
+ err = devlink_resource_register(devlink, "fib", n,
+ NSIM_RESOURCE_IPV4_FIB,
+ NSIM_RESOURCE_IPV4, ¶ms);
+@@ -134,8 +134,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
+ return err;
+ }
+
+- n = nsim_fib_get_val(nsim_dev->fib_data,
+- NSIM_RESOURCE_IPV4_FIB_RULES, true);
++ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
+ err = devlink_resource_register(devlink, "fib-rules", n,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV4, ¶ms);
+@@ -154,8 +153,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
+ goto out;
+ }
+
+- n = nsim_fib_get_val(nsim_dev->fib_data,
+- NSIM_RESOURCE_IPV6_FIB, true);
++ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
+ err = devlink_resource_register(devlink, "fib", n,
+ NSIM_RESOURCE_IPV6_FIB,
+ NSIM_RESOURCE_IPV6, ¶ms);
+@@ -164,8 +162,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
+ return err;
+ }
+
+- n = nsim_fib_get_val(nsim_dev->fib_data,
+- NSIM_RESOURCE_IPV6_FIB_RULES, true);
++ n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
+ err = devlink_resource_register(devlink, "fib-rules", n,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ NSIM_RESOURCE_IPV6, ¶ms);
+@@ -177,19 +174,19 @@ static int nsim_dev_resources_register(struct devlink *devlink)
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB,
+ nsim_dev_ipv4_fib_resource_occ_get,
+- nsim_dev);
++ net);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ nsim_dev_ipv4_fib_rules_res_occ_get,
+- nsim_dev);
++ net);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB,
+ nsim_dev_ipv6_fib_resource_occ_get,
+- nsim_dev);
++ net);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ nsim_dev_ipv6_fib_rules_res_occ_get,
+- nsim_dev);
++ net);
+ out:
+ return err;
+ }
+@@ -197,11 +194,11 @@ out:
+ static int nsim_dev_reload(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+ {
+- struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ enum nsim_resource_id res_ids[] = {
+ NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
+ };
++ struct net *net = nsim_devlink_net(devlink);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
+@@ -210,8 +207,7 @@ static int nsim_dev_reload(struct devlink *devlink,
+
+ err = devlink_resource_size_get(devlink, res_ids[i], &val);
+ if (!err) {
+- err = nsim_fib_set_max(nsim_dev->fib_data,
+- res_ids[i], val, extack);
++ err = nsim_fib_set_max(net, res_ids[i], val, extack);
+ if (err)
+ return err;
+ }
+@@ -241,15 +237,9 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
+ INIT_LIST_HEAD(&nsim_dev->port_list);
+ mutex_init(&nsim_dev->port_list_lock);
+
+- nsim_dev->fib_data = nsim_fib_create();
+- if (IS_ERR(nsim_dev->fib_data)) {
+- err = PTR_ERR(nsim_dev->fib_data);
+- goto err_devlink_free;
+- }
+-
+ err = nsim_dev_resources_register(devlink);
+ if (err)
+- goto err_fib_destroy;
++ goto err_devlink_free;
+
+ err = devlink_register(devlink, &nsim_bus_dev->dev);
+ if (err)
+@@ -271,8 +261,6 @@ err_dl_unregister:
+ devlink_unregister(devlink);
+ err_resources_unregister:
+ devlink_resources_unregister(devlink, NULL);
+-err_fib_destroy:
+- nsim_fib_destroy(nsim_dev->fib_data);
+ err_devlink_free:
+ devlink_free(devlink);
+ return ERR_PTR(err);
+@@ -286,7 +274,6 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
+ nsim_dev_debugfs_exit(nsim_dev);
+ devlink_unregister(devlink);
+ devlink_resources_unregister(devlink, NULL);
+- nsim_fib_destroy(nsim_dev->fib_data);
+ mutex_destroy(&nsim_dev->port_list_lock);
+ devlink_free(devlink);
+ }
+diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
+index 8c57ba747772..f61d094746c0 100644
+--- a/drivers/net/netdevsim/fib.c
++++ b/drivers/net/netdevsim/fib.c
+@@ -18,6 +18,7 @@
+ #include <net/ip_fib.h>
+ #include <net/ip6_fib.h>
+ #include <net/fib_rules.h>
++#include <net/netns/generic.h>
+
+ #include "netdevsim.h"
+
+@@ -32,14 +33,15 @@ struct nsim_per_fib_data {
+ };
+
+ struct nsim_fib_data {
+- struct notifier_block fib_nb;
+ struct nsim_per_fib_data ipv4;
+ struct nsim_per_fib_data ipv6;
+ };
+
+-u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+- enum nsim_resource_id res_id, bool max)
++static unsigned int nsim_fib_net_id;
++
++u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
+ {
++ struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
+ struct nsim_fib_entry *entry;
+
+ switch (res_id) {
+@@ -62,10 +64,10 @@ u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+ return max ? entry->max : entry->num;
+ }
+
+-int nsim_fib_set_max(struct nsim_fib_data *fib_data,
+- enum nsim_resource_id res_id, u64 val,
++int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
+ struct netlink_ext_ack *extack)
+ {
++ struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
+ struct nsim_fib_entry *entry;
+ int err = 0;
+
+@@ -118,9 +120,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
+ return err;
+ }
+
+-static int nsim_fib_rule_event(struct nsim_fib_data *data,
+- struct fib_notifier_info *info, bool add)
++static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add)
+ {
++ struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
+ struct netlink_ext_ack *extack = info->extack;
+ int err = 0;
+
+@@ -155,9 +157,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
+ return err;
+ }
+
+-static int nsim_fib_event(struct nsim_fib_data *data,
+- struct fib_notifier_info *info, bool add)
++static int nsim_fib_event(struct fib_notifier_info *info, bool add)
+ {
++ struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
+ struct netlink_ext_ack *extack = info->extack;
+ int err = 0;
+
+@@ -176,22 +178,18 @@ static int nsim_fib_event(struct nsim_fib_data *data,
+ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+ {
+- struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+- fib_nb);
+ struct fib_notifier_info *info = ptr;
+ int err = 0;
+
+ switch (event) {
+ case FIB_EVENT_RULE_ADD: /* fall through */
+ case FIB_EVENT_RULE_DEL:
+- err = nsim_fib_rule_event(data, info,
+- event == FIB_EVENT_RULE_ADD);
++ err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD);
+ break;
+
+ case FIB_EVENT_ENTRY_ADD: /* fall through */
+ case FIB_EVENT_ENTRY_DEL:
+- err = nsim_fib_event(data, info,
+- event == FIB_EVENT_ENTRY_ADD);
++ err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD);
+ break;
+ }
+
+@@ -201,23 +199,30 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
+ /* inconsistent dump, trying again */
+ static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
+ {
+- struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+- fib_nb);
++ struct nsim_fib_data *data;
++ struct net *net;
++
++ rcu_read_lock();
++ for_each_net_rcu(net) {
++ data = net_generic(net, nsim_fib_net_id);
++
++ data->ipv4.fib.num = 0ULL;
++ data->ipv4.rules.num = 0ULL;
+
+- data->ipv4.fib.num = 0ULL;
+- data->ipv4.rules.num = 0ULL;
+- data->ipv6.fib.num = 0ULL;
+- data->ipv6.rules.num = 0ULL;
++ data->ipv6.fib.num = 0ULL;
++ data->ipv6.rules.num = 0ULL;
++ }
++ rcu_read_unlock();
+ }
+
+-struct nsim_fib_data *nsim_fib_create(void)
+-{
+- struct nsim_fib_data *data;
+- int err;
++static struct notifier_block nsim_fib_nb = {
++ .notifier_call = nsim_fib_event_nb,
++};
+
+- data = kzalloc(sizeof(*data), GFP_KERNEL);
+- if (!data)
+- return ERR_PTR(-ENOMEM);
++/* Initialize per network namespace state */
++static int __net_init nsim_fib_netns_init(struct net *net)
++{
++ struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id);
+
+ data->ipv4.fib.max = (u64)-1;
+ data->ipv4.rules.max = (u64)-1;
+@@ -225,22 +230,37 @@ struct nsim_fib_data *nsim_fib_create(void)
+ data->ipv6.fib.max = (u64)-1;
+ data->ipv6.rules.max = (u64)-1;
+
+- data->fib_nb.notifier_call = nsim_fib_event_nb;
+- err = register_fib_notifier(&data->fib_nb, nsim_fib_dump_inconsistent);
+- if (err) {
+- pr_err("Failed to register fib notifier\n");
+- goto err_out;
+- }
++ return 0;
++}
+
+- return data;
++static struct pernet_operations nsim_fib_net_ops = {
++ .init = nsim_fib_netns_init,
++ .id = &nsim_fib_net_id,
++ .size = sizeof(struct nsim_fib_data),
++};
+
+-err_out:
+- kfree(data);
+- return ERR_PTR(err);
++void nsim_fib_exit(void)
++{
++ unregister_pernet_subsys(&nsim_fib_net_ops);
++ unregister_fib_notifier(&nsim_fib_nb);
+ }
+
+-void nsim_fib_destroy(struct nsim_fib_data *data)
++int nsim_fib_init(void)
+ {
+- unregister_fib_notifier(&data->fib_nb);
+- kfree(data);
++ int err;
++
++ err = register_pernet_subsys(&nsim_fib_net_ops);
++ if (err < 0) {
++ pr_err("Failed to register pernet subsystem\n");
++ goto err_out;
++ }
++
++ err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
++ if (err < 0) {
++ pr_err("Failed to register fib notifier\n");
++ goto err_out;
++ }
++
++err_out:
++ return err;
+ }
+diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
+index e5c8aa08e1cd..533a182eefca 100644
+--- a/drivers/net/netdevsim/netdev.c
++++ b/drivers/net/netdevsim/netdev.c
+@@ -370,12 +370,18 @@ static int __init nsim_module_init(void)
+ if (err)
+ goto err_dev_exit;
+
+- err = rtnl_link_register(&nsim_link_ops);
++ err = nsim_fib_init();
+ if (err)
+ goto err_bus_exit;
+
++ err = rtnl_link_register(&nsim_link_ops);
++ if (err)
++ goto err_fib_exit;
++
+ return 0;
+
++err_fib_exit:
++ nsim_fib_exit();
+ err_bus_exit:
+ nsim_bus_exit();
+ err_dev_exit:
+@@ -386,6 +392,7 @@ err_dev_exit:
+ static void __exit nsim_module_exit(void)
+ {
+ rtnl_link_unregister(&nsim_link_ops);
++ nsim_fib_exit();
+ nsim_bus_exit();
+ nsim_dev_exit();
+ }
+diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
+index 3f398797c2bc..f9253fe68c31 100644
+--- a/drivers/net/netdevsim/netdevsim.h
++++ b/drivers/net/netdevsim/netdevsim.h
+@@ -168,12 +168,10 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
+ int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int port_index);
+
+-struct nsim_fib_data *nsim_fib_create(void);
+-void nsim_fib_destroy(struct nsim_fib_data *fib_data);
+-u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+- enum nsim_resource_id res_id, bool max);
+-int nsim_fib_set_max(struct nsim_fib_data *fib_data,
+- enum nsim_resource_id res_id, u64 val,
++int nsim_fib_init(void);
++void nsim_fib_exit(void);
++u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
++int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
+ struct netlink_ext_ack *extack);
+
+ #if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
+diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
+index b9d4145781ca..58bb25e4af10 100644
+--- a/drivers/net/phy/phy-c45.c
++++ b/drivers/net/phy/phy-c45.c
+@@ -219,6 +219,20 @@ int genphy_c45_read_link(struct phy_device *phydev)
+ int val, devad;
+ bool link = true;
+
++ if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) {
++ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
++ if (val < 0)
++ return val;
++
++ /* Autoneg is being started, therefore disregard current
++ * link status and report link as down.
++ */
++ if (val & MDIO_AN_CTRL1_RESTART) {
++ phydev->link = 0;
++ return 0;
++ }
++ }
++
+ while (mmd_mask && link) {
+ devad = __ffs(mmd_mask);
+ mmd_mask &= ~BIT(devad);
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index ffa402732aea..3af0af495cf1 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1708,7 +1708,17 @@ EXPORT_SYMBOL(genphy_aneg_done);
+ */
+ int genphy_update_link(struct phy_device *phydev)
+ {
+- int status;
++ int status = 0, bmcr;
++
++ bmcr = phy_read(phydev, MII_BMCR);
++ if (bmcr < 0)
++ return bmcr;
++
++ /* Autoneg is being started, therefore disregard BMSR value and
++ * report link as down.
++ */
++ if (bmcr & BMCR_ANRESTART)
++ goto done;
+
+ /* The link state is latched low so that momentary link
+ * drops can be detected. Do not double-read the status
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 36916bf51ee6..d1b4c7d8e2bc 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1004,6 +1004,8 @@ static void __team_compute_features(struct team *team)
+
+ team->dev->vlan_features = vlan_features;
+ team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
++ NETIF_F_HW_VLAN_CTAG_TX |
++ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_GSO_UDP_L4;
+ team->dev->hard_header_len = max_hard_header_len;
+
+diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
+index 6d25dea5ad4b..f7d117d80cfb 100644
+--- a/drivers/net/usb/pegasus.c
++++ b/drivers/net/usb/pegasus.c
+@@ -282,7 +282,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
+ static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
+ {
+ int i;
+- __u8 tmp;
++ __u8 tmp = 0;
+ __le16 retdatai;
+ int ret;
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+index 405038ce98d6..7573af2d88ce 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+@@ -97,7 +97,7 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
+
+ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
+ union acpi_object *data,
+- int data_size)
++ int data_size, int *tbl_rev)
+ {
+ int i;
+ union acpi_object *wifi_pkg;
+@@ -113,16 +113,19 @@ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
+ /*
+ * We need at least two packages, one for the revision and one
+ * for the data itself. Also check that the revision is valid
+- * (i.e. it is an integer set to 0).
++ * (i.e. it is an integer smaller than 2, as we currently support only
++ * 2 revisions).
+ */
+ if (data->type != ACPI_TYPE_PACKAGE ||
+ data->package.count < 2 ||
+ data->package.elements[0].type != ACPI_TYPE_INTEGER ||
+- data->package.elements[0].integer.value != 0) {
++ data->package.elements[0].integer.value > 1) {
+ IWL_DEBUG_DEV_RADIO(dev, "Unsupported packages structure\n");
+ return ERR_PTR(-EINVAL);
+ }
+
++ *tbl_rev = data->package.elements[0].integer.value;
++
+ /* loop through all the packages to find the one for WiFi */
+ for (i = 1; i < data->package.count; i++) {
+ union acpi_object *domain;
+@@ -151,14 +154,15 @@ int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+ {
+ union acpi_object *wifi_pkg, *data;
+ u32 mcc_val;
+- int ret;
++ int ret, tbl_rev;
+
+ data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE);
+- if (IS_ERR(wifi_pkg)) {
++ wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
++ &tbl_rev);
++ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+@@ -185,6 +189,7 @@ u64 iwl_acpi_get_pwr_limit(struct device *dev)
+ {
+ union acpi_object *data, *wifi_pkg;
+ u64 dflt_pwr_limit;
++ int tbl_rev;
+
+ data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD);
+ if (IS_ERR(data)) {
+@@ -193,8 +198,8 @@ u64 iwl_acpi_get_pwr_limit(struct device *dev)
+ }
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data,
+- ACPI_SPLC_WIFI_DATA_SIZE);
+- if (IS_ERR(wifi_pkg) ||
++ ACPI_SPLC_WIFI_DATA_SIZE, &tbl_rev);
++ if (IS_ERR(wifi_pkg) || tbl_rev != 0 ||
+ wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) {
+ dflt_pwr_limit = 0;
+ goto out_free;
+@@ -211,14 +216,15 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit);
+ int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
+ {
+ union acpi_object *wifi_pkg, *data;
+- int ret;
++ int ret, tbl_rev;
+
+ data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+- wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE);
+- if (IS_ERR(wifi_pkg)) {
++ wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
++ &tbl_rev);
++ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+index f5704e16643f..991a23450999 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+@@ -97,7 +97,7 @@
+ void *iwl_acpi_get_object(struct device *dev, acpi_string method);
+ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
+ union acpi_object *data,
+- int data_size);
++ int data_size, int *tbl_rev);
+
+ /**
+ * iwl_acpi_get_mcc - read MCC from ACPI, if available
+@@ -131,7 +131,8 @@ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
+
+ static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
+ union acpi_object *data,
+- int data_size)
++ int data_size,
++ int *tbl_rev)
+ {
+ return ERR_PTR(-ENOENT);
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+index 01f003c6cff9..f195db398bed 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+@@ -419,14 +419,26 @@ struct iwl_per_chain_offset_group {
+ struct iwl_per_chain_offset hb;
+ } __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */
+
++/**
++ * struct iwl_geo_tx_power_profile_cmd_v1 - struct for GEO_TX_POWER_LIMIT cmd.
++ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
++ * @table: offset profile per band.
++ */
++struct iwl_geo_tx_power_profiles_cmd_v1 {
++ __le32 ops;
++ struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
++} __packed; /* GEO_TX_POWER_LIMIT_VER_1 */
++
+ /**
+ * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
++ * @table_revision: BIOS table revision.
+ */
+ struct iwl_geo_tx_power_profiles_cmd {
+ __le32 ops;
+ struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
++ __le32 table_revision;
+ } __packed; /* GEO_TX_POWER_LIMIT */
+
+ /**
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
+index de9243d30135..a74f34a8dffb 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
+@@ -286,6 +286,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
+ * SCAN_OFFLOAD_PROFILES_QUERY_RSP_S.
+ * @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of
+ * STA_CONTEXT_DOT11AX_API_S
++ * @IWL_UCODE_TLV_CAPA_SAR_TABLE_VER: This ucode supports different sar
++ * version tables.
+ *
+ * @NUM_IWL_UCODE_TLV_API: number of bits used
+ */
+@@ -318,6 +320,7 @@ enum iwl_ucode_tlv_api {
+ IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52,
+ IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53,
+ IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54,
++ IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55,
+
+ NUM_IWL_UCODE_TLV_API
+ #ifdef __CHECKER__
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 5af9959d05e5..8892707050d5 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -682,15 +682,15 @@ static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
+ {
+ union acpi_object *wifi_pkg, *table, *data;
+ bool enabled;
+- int ret;
++ int ret, tbl_rev;
+
+ data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
+- ACPI_WRDS_WIFI_DATA_SIZE);
+- if (IS_ERR(wifi_pkg)) {
++ ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
++ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+@@ -719,15 +719,15 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+ {
+ union acpi_object *wifi_pkg, *data;
+ bool enabled;
+- int i, n_profiles, ret;
++ int i, n_profiles, ret, tbl_rev;
+
+ data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
+- ACPI_EWRD_WIFI_DATA_SIZE);
+- if (IS_ERR(wifi_pkg)) {
++ ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
++ if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+@@ -778,7 +778,7 @@ out_free:
+ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
+ {
+ union acpi_object *wifi_pkg, *data;
+- int i, j, ret;
++ int i, j, ret, tbl_rev;
+ int idx = 1;
+
+ data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD);
+@@ -786,12 +786,13 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
+ return PTR_ERR(data);
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
+- ACPI_WGDS_WIFI_DATA_SIZE);
+- if (IS_ERR(wifi_pkg)) {
++ ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
++ if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+ }
+
++ mvm->geo_rev = tbl_rev;
+ for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
+ for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
+ union acpi_object *entry;
+@@ -894,15 +895,29 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+ {
+ struct iwl_geo_tx_power_profiles_resp *resp;
+ int ret;
++ u16 len;
++ void *data;
++ struct iwl_geo_tx_power_profiles_cmd geo_cmd;
++ struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1;
++ struct iwl_host_cmd cmd;
++
++ if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
++ geo_cmd.ops =
++ cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
++ len = sizeof(geo_cmd);
++ data = &geo_cmd;
++ } else {
++ geo_cmd_v1.ops =
++ cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
++ len = sizeof(geo_cmd_v1);
++ data = &geo_cmd_v1;
++ }
+
+- struct iwl_geo_tx_power_profiles_cmd geo_cmd = {
+- .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE),
+- };
+- struct iwl_host_cmd cmd = {
++ cmd = (struct iwl_host_cmd){
+ .id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
+- .len = { sizeof(geo_cmd), },
++ .len = { len, },
+ .flags = CMD_WANT_SKB,
+- .data = { &geo_cmd },
++ .data = { data },
+ };
+
+ if (!iwl_mvm_sar_geo_support(mvm))
+@@ -969,6 +984,16 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+ i, j, value[1], value[2], value[0]);
+ }
+ }
++
++ cmd.table_revision = cpu_to_le32(mvm->geo_rev);
++
++ if (!fw_has_api(&mvm->fw->ucode_capa,
++ IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
++ return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0,
++ sizeof(struct iwl_geo_tx_power_profiles_cmd_v1),
++ &cmd);
++ }
++
+ return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index 88af1f0ba3f0..ed8fc9a9204c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -1184,6 +1184,7 @@ struct iwl_mvm {
+ #ifdef CONFIG_ACPI
+ struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
+ struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
++ u32 geo_rev;
+ #endif
+ };
+
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 1d9940d4e8c7..c9262ffeefe4 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
+ skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
+ nskb = xenvif_alloc_skb(0);
+ if (unlikely(nskb == NULL)) {
++ skb_shinfo(skb)->nr_frags = 0;
+ kfree_skb(skb);
+ xenvif_tx_err(queue, &txreq, extra_count, idx);
+ if (net_ratelimit())
+@@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
+
+ if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
+ /* Failure in xenvif_set_skb_gso is fatal. */
++ skb_shinfo(skb)->nr_frags = 0;
+ kfree_skb(skb);
+ kfree_skb(nskb);
+ break;
+diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
+index 1d902230ba61..be6cda89dcf5 100644
+--- a/drivers/platform/x86/intel_pmc_core.c
++++ b/drivers/platform/x86/intel_pmc_core.c
+@@ -815,6 +815,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
+ INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map),
+ INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map),
+ INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map),
++ INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
+ {}
+ };
+
+diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c
+index c1ca931e1fab..7a8cbfb5d213 100644
+--- a/drivers/platform/x86/pcengines-apuv2.c
++++ b/drivers/platform/x86/pcengines-apuv2.c
+@@ -255,6 +255,4 @@ MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver");
+ MODULE_LICENSE("GPL");
+ MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table);
+ MODULE_ALIAS("platform:pcengines-apuv2");
+-MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME);
+-MODULE_SOFTDEP("pre: platform:leds-gpio");
+-MODULE_SOFTDEP("pre: platform:gpio_keys_polled");
++MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME " platform:leds-gpio platform:gpio_keys_polled");
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 8068520cf89e..152de392f9aa 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -2320,6 +2320,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
+ case IOACCEL2_SERV_RESPONSE_COMPLETE:
+ switch (c2->error_data.status) {
+ case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
++ if (cmd)
++ cmd->result = 0;
+ break;
+ case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
+ cmd->result |= SAM_STAT_CHECK_CONDITION;
+@@ -2479,8 +2481,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
+
+ /* check for good status */
+ if (likely(c2->error_data.serv_response == 0 &&
+- c2->error_data.status == 0))
++ c2->error_data.status == 0)) {
++ cmd->result = 0;
+ return hpsa_cmd_free_and_done(h, c, cmd);
++ }
+
+ /*
+ * Any RAID offload error results in retry which will use
+@@ -5638,6 +5642,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
+ }
+ c = cmd_tagged_alloc(h, cmd);
+
++ /*
++ * This is necessary because the SML doesn't zero out this field during
++ * error recovery.
++ */
++ cmd->result = 0;
++
+ /*
+ * Call alternate submit routine for I/O accelerated commands.
+ * Retries always go down the normal I/O path.
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 54772d4c377f..6a4c719497ca 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -4877,7 +4877,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
+ ql_log(ql_log_warn, vha, 0xd049,
+ "Failed to allocate ct_sns request.\n");
+ kfree(fcport);
+- fcport = NULL;
++ return NULL;
+ }
+
+ INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
+diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
+index 2edf3ee91300..caf4d4df4bd3 100644
+--- a/drivers/staging/comedi/drivers/dt3000.c
++++ b/drivers/staging/comedi/drivers/dt3000.c
+@@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
+ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
+ unsigned int flags)
+ {
+- int divider, base, prescale;
++ unsigned int divider, base, prescale;
+
+- /* This function needs improvment */
++ /* This function needs improvement */
+ /* Don't know if divider==0 works. */
+
+ for (prescale = 0; prescale < 16; prescale++) {
+@@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
+ divider = (*nanosec) / base;
+ break;
+ case CMDF_ROUND_UP:
+- divider = (*nanosec) / base;
++ divider = DIV_ROUND_UP(*nanosec, base);
+ break;
+ }
+ if (divider < 65536) {
+@@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
+ }
+
+ prescale = 15;
+- base = timer_base * (1 << prescale);
++ base = timer_base * (prescale + 1);
+ divider = 65535;
+ *nanosec = divider * base;
+ return (prescale << 16) | (divider);
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 183b41753c98..62f4fb9b362f 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1301,10 +1301,6 @@ made_compressed_probe:
+ tty_port_init(&acm->port);
+ acm->port.ops = &acm_port_ops;
+
+- minor = acm_alloc_minor(acm);
+- if (minor < 0)
+- goto alloc_fail1;
+-
+ ctrlsize = usb_endpoint_maxp(epctrl);
+ readsize = usb_endpoint_maxp(epread) *
+ (quirks == SINGLE_RX_URB ? 1 : 2);
+@@ -1312,6 +1308,13 @@ made_compressed_probe:
+ acm->writesize = usb_endpoint_maxp(epwrite) * 20;
+ acm->control = control_interface;
+ acm->data = data_interface;
++
++ usb_get_intf(acm->control); /* undone in destruct() */
++
++ minor = acm_alloc_minor(acm);
++ if (minor < 0)
++ goto alloc_fail1;
++
+ acm->minor = minor;
+ acm->dev = usb_dev;
+ if (h.usb_cdc_acm_descriptor)
+@@ -1458,7 +1461,6 @@ skip_countries:
+ usb_driver_claim_interface(&acm_driver, data_interface, acm);
+ usb_set_intfdata(data_interface, acm);
+
+- usb_get_intf(control_interface);
+ tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
+ &control_interface->dev);
+ if (IS_ERR(tty_dev)) {
+diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
+index 65de6f73b672..558890ada0e5 100644
+--- a/drivers/usb/core/file.c
++++ b/drivers/usb/core/file.c
+@@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
+ intf->minor = minor;
+ break;
+ }
+- up_write(&minor_rwsem);
+- if (intf->minor < 0)
++ if (intf->minor < 0) {
++ up_write(&minor_rwsem);
+ return -EXFULL;
++ }
+
+ /* create a usb class device for this usb interface */
+ snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
+@@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
+ MKDEV(USB_MAJOR, minor), class_driver,
+ "%s", kbasename(name));
+ if (IS_ERR(intf->usb_dev)) {
+- down_write(&minor_rwsem);
+ usb_minors[minor] = NULL;
+ intf->minor = -1;
+- up_write(&minor_rwsem);
+ retval = PTR_ERR(intf->usb_dev);
+ }
++ up_write(&minor_rwsem);
+ return retval;
+ }
+ EXPORT_SYMBOL_GPL(usb_register_dev);
+@@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
+ return;
+
+ dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
++ device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
+
+ down_write(&minor_rwsem);
+ usb_minors[intf->minor] = NULL;
+ up_write(&minor_rwsem);
+
+- device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
+ intf->usb_dev = NULL;
+ intf->minor = -1;
+ destroy_usb_class();
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 94d22551fc1b..82e41179fb2d 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -101,11 +101,6 @@ static DEFINE_SPINLOCK(hcd_urb_unlink_lock);
+ /* wait queue for synchronous unlinks */
+ DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue);
+
+-static inline int is_root_hub(struct usb_device *udev)
+-{
+- return (udev->parent == NULL);
+-}
+-
+ /*-------------------------------------------------------------------------*/
+
+ /*
+@@ -878,101 +873,6 @@ static int usb_rh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ }
+
+
+-
+-/*
+- * Show & store the current value of authorized_default
+- */
+-static ssize_t authorized_default_show(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- struct usb_device *rh_usb_dev = to_usb_device(dev);
+- struct usb_bus *usb_bus = rh_usb_dev->bus;
+- struct usb_hcd *hcd;
+-
+- hcd = bus_to_hcd(usb_bus);
+- return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy);
+-}
+-
+-static ssize_t authorized_default_store(struct device *dev,
+- struct device_attribute *attr,
+- const char *buf, size_t size)
+-{
+- ssize_t result;
+- unsigned val;
+- struct usb_device *rh_usb_dev = to_usb_device(dev);
+- struct usb_bus *usb_bus = rh_usb_dev->bus;
+- struct usb_hcd *hcd;
+-
+- hcd = bus_to_hcd(usb_bus);
+- result = sscanf(buf, "%u\n", &val);
+- if (result == 1) {
+- hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ?
+- val : USB_DEVICE_AUTHORIZE_ALL;
+- result = size;
+- } else {
+- result = -EINVAL;
+- }
+- return result;
+-}
+-static DEVICE_ATTR_RW(authorized_default);
+-
+-/*
+- * interface_authorized_default_show - show default authorization status
+- * for USB interfaces
+- *
+- * note: interface_authorized_default is the default value
+- * for initializing the authorized attribute of interfaces
+- */
+-static ssize_t interface_authorized_default_show(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- struct usb_device *usb_dev = to_usb_device(dev);
+- struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
+-
+- return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd));
+-}
+-
+-/*
+- * interface_authorized_default_store - store default authorization status
+- * for USB interfaces
+- *
+- * note: interface_authorized_default is the default value
+- * for initializing the authorized attribute of interfaces
+- */
+-static ssize_t interface_authorized_default_store(struct device *dev,
+- struct device_attribute *attr, const char *buf, size_t count)
+-{
+- struct usb_device *usb_dev = to_usb_device(dev);
+- struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
+- int rc = count;
+- bool val;
+-
+- if (strtobool(buf, &val) != 0)
+- return -EINVAL;
+-
+- if (val)
+- set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
+- else
+- clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
+-
+- return rc;
+-}
+-static DEVICE_ATTR_RW(interface_authorized_default);
+-
+-/* Group all the USB bus attributes */
+-static struct attribute *usb_bus_attrs[] = {
+- &dev_attr_authorized_default.attr,
+- &dev_attr_interface_authorized_default.attr,
+- NULL,
+-};
+-
+-static const struct attribute_group usb_bus_attr_group = {
+- .name = NULL, /* we want them in the same directory */
+- .attrs = usb_bus_attrs,
+-};
+-
+-
+-
+ /*-------------------------------------------------------------------------*/
+
+ /**
+@@ -2895,32 +2795,11 @@ int usb_add_hcd(struct usb_hcd *hcd,
+ if (retval != 0)
+ goto err_register_root_hub;
+
+- retval = sysfs_create_group(&rhdev->dev.kobj, &usb_bus_attr_group);
+- if (retval < 0) {
+- printk(KERN_ERR "Cannot register USB bus sysfs attributes: %d\n",
+- retval);
+- goto error_create_attr_group;
+- }
+ if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
+ usb_hcd_poll_rh_status(hcd);
+
+ return retval;
+
+-error_create_attr_group:
+- clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
+- if (HC_IS_RUNNING(hcd->state))
+- hcd->state = HC_STATE_QUIESCING;
+- spin_lock_irq(&hcd_root_hub_lock);
+- hcd->rh_registered = 0;
+- spin_unlock_irq(&hcd_root_hub_lock);
+-
+-#ifdef CONFIG_PM
+- cancel_work_sync(&hcd->wakeup_work);
+-#endif
+- cancel_work_sync(&hcd->died_work);
+- mutex_lock(&usb_bus_idr_lock);
+- usb_disconnect(&rhdev); /* Sets rhdev to NULL */
+- mutex_unlock(&usb_bus_idr_lock);
+ err_register_root_hub:
+ hcd->rh_pollable = 0;
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+@@ -2964,8 +2843,6 @@ void usb_remove_hcd(struct usb_hcd *hcd)
+ dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
+
+ usb_get_dev(rhdev);
+- sysfs_remove_group(&rhdev->dev.kobj, &usb_bus_attr_group);
+-
+ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
+ if (HC_IS_RUNNING (hcd->state))
+ hcd->state = HC_STATE_QUIESCING;
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index e844bb7b5676..5adf489428aa 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -2218,14 +2218,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
+ (struct usb_cdc_dmm_desc *)buffer;
+ break;
+ case USB_CDC_MDLM_TYPE:
+- if (elength < sizeof(struct usb_cdc_mdlm_desc *))
++ if (elength < sizeof(struct usb_cdc_mdlm_desc))
+ goto next_desc;
+ if (desc)
+ return -EINVAL;
+ desc = (struct usb_cdc_mdlm_desc *)buffer;
+ break;
+ case USB_CDC_MDLM_DETAIL_TYPE:
+- if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
++ if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
+ goto next_desc;
+ if (detail)
+ return -EINVAL;
+diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
+index 7e88fdfe3cf5..f19694e69f5c 100644
+--- a/drivers/usb/core/sysfs.c
++++ b/drivers/usb/core/sysfs.c
+@@ -15,6 +15,7 @@
+ #include <linux/kernel.h>
+ #include <linux/string.h>
+ #include <linux/usb.h>
++#include <linux/usb/hcd.h>
+ #include <linux/usb/quirks.h>
+ #include <linux/of.h>
+ #include "usb.h"
+@@ -922,6 +923,116 @@ static struct bin_attribute dev_bin_attr_descriptors = {
+ .size = 18 + 65535, /* dev descr + max-size raw descriptor */
+ };
+
++/*
++ * Show & store the current value of authorized_default
++ */
++static ssize_t authorized_default_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct usb_device *rh_usb_dev = to_usb_device(dev);
++ struct usb_bus *usb_bus = rh_usb_dev->bus;
++ struct usb_hcd *hcd;
++
++ hcd = bus_to_hcd(usb_bus);
++ return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy);
++}
++
++static ssize_t authorized_default_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t size)
++{
++ ssize_t result;
++ unsigned int val;
++ struct usb_device *rh_usb_dev = to_usb_device(dev);
++ struct usb_bus *usb_bus = rh_usb_dev->bus;
++ struct usb_hcd *hcd;
++
++ hcd = bus_to_hcd(usb_bus);
++ result = sscanf(buf, "%u\n", &val);
++ if (result == 1) {
++ hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ?
++ val : USB_DEVICE_AUTHORIZE_ALL;
++ result = size;
++ } else {
++ result = -EINVAL;
++ }
++ return result;
++}
++static DEVICE_ATTR_RW(authorized_default);
++
++/*
++ * interface_authorized_default_show - show default authorization status
++ * for USB interfaces
++ *
++ * note: interface_authorized_default is the default value
++ * for initializing the authorized attribute of interfaces
++ */
++static ssize_t interface_authorized_default_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct usb_device *usb_dev = to_usb_device(dev);
++ struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
++
++ return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd));
++}
++
++/*
++ * interface_authorized_default_store - store default authorization status
++ * for USB interfaces
++ *
++ * note: interface_authorized_default is the default value
++ * for initializing the authorized attribute of interfaces
++ */
++static ssize_t interface_authorized_default_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct usb_device *usb_dev = to_usb_device(dev);
++ struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
++ int rc = count;
++ bool val;
++
++ if (strtobool(buf, &val) != 0)
++ return -EINVAL;
++
++ if (val)
++ set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
++ else
++ clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
++
++ return rc;
++}
++static DEVICE_ATTR_RW(interface_authorized_default);
++
++/* Group all the USB bus attributes */
++static struct attribute *usb_bus_attrs[] = {
++ &dev_attr_authorized_default.attr,
++ &dev_attr_interface_authorized_default.attr,
++ NULL,
++};
++
++static const struct attribute_group usb_bus_attr_group = {
++ .name = NULL, /* we want them in the same directory */
++ .attrs = usb_bus_attrs,
++};
++
++
++static int add_default_authorized_attributes(struct device *dev)
++{
++ int rc = 0;
++
++ if (is_usb_device(dev))
++ rc = sysfs_create_group(&dev->kobj, &usb_bus_attr_group);
++
++ return rc;
++}
++
++static void remove_default_authorized_attributes(struct device *dev)
++{
++ if (is_usb_device(dev)) {
++ sysfs_remove_group(&dev->kobj, &usb_bus_attr_group);
++ }
++}
++
+ int usb_create_sysfs_dev_files(struct usb_device *udev)
+ {
+ struct device *dev = &udev->dev;
+@@ -938,7 +1049,14 @@ int usb_create_sysfs_dev_files(struct usb_device *udev)
+ retval = add_power_attributes(dev);
+ if (retval)
+ goto error;
++
++ if (is_root_hub(udev)) {
++ retval = add_default_authorized_attributes(dev);
++ if (retval)
++ goto error;
++ }
+ return retval;
++
+ error:
+ usb_remove_sysfs_dev_files(udev);
+ return retval;
+@@ -948,6 +1066,9 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev)
+ {
+ struct device *dev = &udev->dev;
+
++ if (is_root_hub(udev))
++ remove_default_authorized_attributes(dev);
++
+ remove_power_attributes(dev);
+ remove_persist_attributes(dev);
+ device_remove_bin_file(dev, &dev_bin_attr_descriptors);
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index d95a5358f73d..d5ac492f441b 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -153,6 +153,11 @@ static inline int is_usb_port(const struct device *dev)
+ return dev->type == &usb_port_device_type;
+ }
+
++static inline int is_root_hub(struct usb_device *udev)
++{
++ return (udev->parent == NULL);
++}
++
+ /* Do the same for device drivers and interface drivers. */
+
+ static inline int is_usb_device_driver(struct device_driver *drv)
+diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
+index 7dc248546fd4..b6eec81b6a40 100644
+--- a/drivers/usb/gadget/udc/renesas_usb3.c
++++ b/drivers/usb/gadget/udc/renesas_usb3.c
+@@ -19,6 +19,7 @@
+ #include <linux/pm_runtime.h>
+ #include <linux/sizes.h>
+ #include <linux/slab.h>
++#include <linux/string.h>
+ #include <linux/sys_soc.h>
+ #include <linux/uaccess.h>
+ #include <linux/usb/ch9.h>
+@@ -2378,9 +2379,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
+ if (usb3->forced_b_device)
+ return -EBUSY;
+
+- if (!strncmp(buf, "host", strlen("host")))
++ if (sysfs_streq(buf, "host"))
+ new_mode_is_host = true;
+- else if (!strncmp(buf, "peripheral", strlen("peripheral")))
++ else if (sysfs_streq(buf, "peripheral"))
+ new_mode_is_host = false;
+ else
+ return -EINVAL;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index c1582fbd1150..38e920ac7f82 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = {
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
+
++ /* Motorola devices */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
+
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
+ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
+@@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
+ .driver_info = RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
+@@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
+ .driver_info = RSVD(4) },
++ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
++ .driver_info = RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
+ .driver_info = RSVD(4) },
++ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
++ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
+ { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
+ { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
+diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
+index 73427d8e0116..e5694133ebe5 100644
+--- a/drivers/xen/xen-pciback/conf_space_capability.c
++++ b/drivers/xen/xen-pciback/conf_space_capability.c
+@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
+ {
+ int err;
+ u16 old_value;
+- pci_power_t new_state, old_state;
++ pci_power_t new_state;
+
+ err = pci_read_config_word(dev, offset, &old_value);
+ if (err)
+ goto out;
+
+- old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
+ new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
+
+ new_value &= PM_OK_BITS;
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 982152d3f920..69f8ab4d91f2 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1488,7 +1488,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
+ goto out;
+ }
+
+- trans = btrfs_attach_transaction(root);
++ trans = btrfs_join_transaction_nostart(root);
+ if (IS_ERR(trans)) {
+ if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
+ ret = PTR_ERR(trans);
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 1aa3f6d6d775..2db14fdd6bff 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
+ [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
+ [TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
+ __TRANS_ATTACH |
+- __TRANS_JOIN),
++ __TRANS_JOIN |
++ __TRANS_JOIN_NOSTART),
+ [TRANS_STATE_UNBLOCKED] = (__TRANS_START |
+ __TRANS_ATTACH |
+ __TRANS_JOIN |
+- __TRANS_JOIN_NOLOCK),
++ __TRANS_JOIN_NOLOCK |
++ __TRANS_JOIN_NOSTART),
+ [TRANS_STATE_COMPLETED] = (__TRANS_START |
+ __TRANS_ATTACH |
+ __TRANS_JOIN |
+- __TRANS_JOIN_NOLOCK),
++ __TRANS_JOIN_NOLOCK |
++ __TRANS_JOIN_NOSTART),
+ };
+
+ void btrfs_put_transaction(struct btrfs_transaction *transaction)
+@@ -525,7 +528,8 @@ again:
+ ret = join_transaction(fs_info, type);
+ if (ret == -EBUSY) {
+ wait_current_trans(fs_info);
+- if (unlikely(type == TRANS_ATTACH))
++ if (unlikely(type == TRANS_ATTACH ||
++ type == TRANS_JOIN_NOSTART))
+ ret = -ENOENT;
+ }
+ } while (ret == -EBUSY);
+@@ -641,6 +645,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
+ BTRFS_RESERVE_NO_FLUSH, true);
+ }
+
++/*
++ * Similar to regular join but it never starts a transaction when none is
++ * running or after waiting for the current one to finish.
++ */
++struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
++{
++ return start_transaction(root, 0, TRANS_JOIN_NOSTART,
++ BTRFS_RESERVE_NO_FLUSH, true);
++}
++
+ /*
+ * btrfs_attach_transaction() - catch the running transaction
+ *
+diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
+index 78c446c222b7..2f695587f828 100644
+--- a/fs/btrfs/transaction.h
++++ b/fs/btrfs/transaction.h
+@@ -94,11 +94,13 @@ struct btrfs_transaction {
+ #define __TRANS_JOIN (1U << 11)
+ #define __TRANS_JOIN_NOLOCK (1U << 12)
+ #define __TRANS_DUMMY (1U << 13)
++#define __TRANS_JOIN_NOSTART (1U << 14)
+
+ #define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
+ #define TRANS_ATTACH (__TRANS_ATTACH)
+ #define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
+ #define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
++#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
+
+ #define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
+
+@@ -183,6 +185,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
+ int min_factor);
+ struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
+ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
++struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
+ struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
+ struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
+ struct btrfs_root *root);
+diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
+index 963fb4571fd9..bb6fd5a506d3 100644
+--- a/fs/f2fs/gc.c
++++ b/fs/f2fs/gc.c
+@@ -794,6 +794,29 @@ static int move_data_block(struct inode *inode, block_t bidx,
+ if (lfs_mode)
+ down_write(&fio.sbi->io_order_lock);
+
++ mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
++ fio.old_blkaddr, false);
++ if (!mpage)
++ goto up_out;
++
++ fio.encrypted_page = mpage;
++
++ /* read source block in mpage */
++ if (!PageUptodate(mpage)) {
++ err = f2fs_submit_page_bio(&fio);
++ if (err) {
++ f2fs_put_page(mpage, 1);
++ goto up_out;
++ }
++ lock_page(mpage);
++ if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
++ !PageUptodate(mpage))) {
++ err = -EIO;
++ f2fs_put_page(mpage, 1);
++ goto up_out;
++ }
++ }
++
+ f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
+ &sum, CURSEG_COLD_DATA, NULL, false);
+
+@@ -801,44 +824,18 @@ static int move_data_block(struct inode *inode, block_t bidx,
+ newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
+ if (!fio.encrypted_page) {
+ err = -ENOMEM;
+- goto recover_block;
+- }
+-
+- mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
+- fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
+- if (mpage) {
+- bool updated = false;
+-
+- if (PageUptodate(mpage)) {
+- memcpy(page_address(fio.encrypted_page),
+- page_address(mpage), PAGE_SIZE);
+- updated = true;
+- }
+ f2fs_put_page(mpage, 1);
+- invalidate_mapping_pages(META_MAPPING(fio.sbi),
+- fio.old_blkaddr, fio.old_blkaddr);
+- if (updated)
+- goto write_page;
+- }
+-
+- err = f2fs_submit_page_bio(&fio);
+- if (err)
+- goto put_page_out;
+-
+- /* write page */
+- lock_page(fio.encrypted_page);
+-
+- if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
+- err = -EIO;
+- goto put_page_out;
+- }
+- if (unlikely(!PageUptodate(fio.encrypted_page))) {
+- err = -EIO;
+- goto put_page_out;
++ goto recover_block;
+ }
+
+-write_page:
++ /* write target block */
+ f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
++ memcpy(page_address(fio.encrypted_page),
++ page_address(mpage), PAGE_SIZE);
++ f2fs_put_page(mpage, 1);
++ invalidate_mapping_pages(META_MAPPING(fio.sbi),
++ fio.old_blkaddr, fio.old_blkaddr);
++
+ set_page_dirty(fio.encrypted_page);
+ if (clear_page_dirty_for_io(fio.encrypted_page))
+ dec_page_count(fio.sbi, F2FS_DIRTY_META);
+@@ -869,11 +866,12 @@ write_page:
+ put_page_out:
+ f2fs_put_page(fio.encrypted_page, 1);
+ recover_block:
+- if (lfs_mode)
+- up_write(&fio.sbi->io_order_lock);
+ if (err)
+ f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
+ true, true);
++up_out:
++ if (lfs_mode)
++ up_write(&fio.sbi->io_order_lock);
+ put_out:
+ f2fs_put_dnode(&dn);
+ out:
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 3e887a09533b..61018559e8fe 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1032,10 +1032,8 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
+
+ iter->bvec = bvec + seg_skip;
+ iter->nr_segs -= seg_skip;
+- iter->count -= (seg_skip << PAGE_SHIFT);
++ iter->count -= bvec->bv_len + offset;
+ iter->iov_offset = offset & ~PAGE_MASK;
+- if (iter->iov_offset)
+- iter->count -= iter->iov_offset;
+ }
+ }
+
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index 385f3aaa2448..90c830e3758e 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -3825,7 +3825,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
+ u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
+ int low_bucket = 0, bucket, high_bucket;
+ struct ocfs2_xattr_bucket *search;
+- u32 last_hash;
+ u64 blkno, lower_blkno = 0;
+
+ search = ocfs2_xattr_bucket_new(inode);
+@@ -3869,8 +3868,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
+ if (xh->xh_count)
+ xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
+
+- last_hash = le32_to_cpu(xe->xe_name_hash);
+-
+ /* record lower_blkno which may be the insert place. */
+ lower_blkno = blkno;
+
+diff --git a/fs/seq_file.c b/fs/seq_file.c
+index abe27ec43176..225bf9239b32 100644
+--- a/fs/seq_file.c
++++ b/fs/seq_file.c
+@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
+ }
+ if (seq_has_overflowed(m))
+ goto Eoverflow;
++ p = m->op->next(m, p, &m->index);
+ if (pos + m->count > offset) {
+ m->from = offset - pos;
+ m->count -= m->from;
+@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
+ }
+ pos += m->count;
+ m->count = 0;
+- p = m->op->next(m, p, &m->index);
+ if (pos == offset)
+ break;
+ }
+diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
+index c64bea7a52be..e9f20b813a69 100644
+--- a/include/asm-generic/getorder.h
++++ b/include/asm-generic/getorder.h
+@@ -7,24 +7,6 @@
+ #include <linux/compiler.h>
+ #include <linux/log2.h>
+
+-/*
+- * Runtime evaluation of get_order()
+- */
+-static inline __attribute_const__
+-int __get_order(unsigned long size)
+-{
+- int order;
+-
+- size--;
+- size >>= PAGE_SHIFT;
+-#if BITS_PER_LONG == 32
+- order = fls(size);
+-#else
+- order = fls64(size);
+-#endif
+- return order;
+-}
+-
+ /**
+ * get_order - Determine the allocation order of a memory size
+ * @size: The size for which to get the order
+@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
+ * to hold an object of the specified size.
+ *
+ * The result is undefined if the size is 0.
+- *
+- * This function may be used to initialise variables with compile time
+- * evaluations of constants.
+ */
+-#define get_order(n) \
+-( \
+- __builtin_constant_p(n) ? ( \
+- ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
+- (((n) < (1UL << PAGE_SHIFT)) ? 0 : \
+- ilog2((n) - 1) - PAGE_SHIFT + 1) \
+- ) : \
+- __get_order(n) \
+-)
++static inline __attribute_const__ int get_order(unsigned long size)
++{
++ if (__builtin_constant_p(size)) {
++ if (!size)
++ return BITS_PER_LONG - PAGE_SHIFT;
++
++ if (size < (1UL << PAGE_SHIFT))
++ return 0;
++
++ return ilog2((size) - 1) - PAGE_SHIFT + 1;
++ }
++
++ size--;
++ size >>= PAGE_SHIFT;
++#if BITS_PER_LONG == 32
++ return fls(size);
++#else
++ return fls64(size);
++#endif
++}
+
+ #endif /* __ASSEMBLY__ */
+
+diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
+index 1dda31825ec4..71283739ffd2 100644
+--- a/include/linux/page-flags-layout.h
++++ b/include/linux/page-flags-layout.h
+@@ -32,6 +32,7 @@
+
+ #endif /* CONFIG_SPARSEMEM */
+
++#ifndef BUILD_VDSO32_64
+ /*
+ * page->flags layout:
+ *
+@@ -76,20 +77,22 @@
+ #define LAST_CPUPID_SHIFT 0
+ #endif
+
+-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
++#ifdef CONFIG_KASAN_SW_TAGS
++#define KASAN_TAG_WIDTH 8
++#else
++#define KASAN_TAG_WIDTH 0
++#endif
++
++#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
++ <= BITS_PER_LONG - NR_PAGEFLAGS
+ #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
+ #else
+ #define LAST_CPUPID_WIDTH 0
+ #endif
+
+-#ifdef CONFIG_KASAN_SW_TAGS
+-#define KASAN_TAG_WIDTH 8
+ #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
+ > BITS_PER_LONG - NR_PAGEFLAGS
+-#error "KASAN: not enough bits in page flags for tag"
+-#endif
+-#else
+-#define KASAN_TAG_WIDTH 0
++#error "Not enough bits in page flags"
+ #endif
+
+ /*
+@@ -104,4 +107,5 @@
+ #define LAST_CPUPID_NOT_IN_PAGE_FLAGS
+ #endif
+
++#endif
+ #endif /* _LINUX_PAGE_FLAGS_LAYOUT */
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 056f557d5194..64fa59b2c8d5 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1358,6 +1358,14 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
+ to->l4_hash = from->l4_hash;
+ };
+
++static inline void skb_copy_decrypted(struct sk_buff *to,
++ const struct sk_buff *from)
++{
++#ifdef CONFIG_TLS_DEVICE
++ to->decrypted = from->decrypted;
++#endif
++}
++
+ #ifdef NET_SKBUFF_DATA_USES_OFFSET
+ static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
+ {
+diff --git a/include/linux/socket.h b/include/linux/socket.h
+index b57cd8bf96e2..810d5ec0ada3 100644
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -291,6 +291,9 @@ struct ucred {
+ #define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
+ #define MSG_EOF MSG_FIN
+ #define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
++#define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry
++ * plain text and require encryption
++ */
+
+ #define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
+ #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
+diff --git a/include/net/netlink.h b/include/net/netlink.h
+index 395b4406f4b0..222af2046086 100644
+--- a/include/net/netlink.h
++++ b/include/net/netlink.h
+@@ -680,9 +680,8 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+ {
+- return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
+- nlmsg_attrlen(nlh, hdrlen), policy,
+- NL_VALIDATE_STRICT, extack);
++ return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
++ NL_VALIDATE_STRICT, extack);
+ }
+
+ /**
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 6cbc16136357..526de911cd91 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2482,6 +2482,7 @@ static inline bool sk_fullsock(const struct sock *sk)
+
+ /* Checks if this SKB belongs to an HW offloaded socket
+ * and whether any SW fallbacks are required based on dev.
++ * Check decrypted mark in case skb_orphan() cleared socket.
+ */
+ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
+ struct net_device *dev)
+@@ -2489,8 +2490,15 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
+ #ifdef CONFIG_SOCK_VALIDATE_XMIT
+ struct sock *sk = skb->sk;
+
+- if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb)
++ if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
+ skb = sk->sk_validate_xmit_skb(sk, dev, skb);
++#ifdef CONFIG_TLS_DEVICE
++ } else if (unlikely(skb->decrypted)) {
++ pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
++ kfree_skb(skb);
++ skb = NULL;
++#endif
++ }
+ #endif
+
+ return skb;
+diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
+index 2212adda8f77..64e92d56c6a8 100644
+--- a/include/trace/events/dma_fence.h
++++ b/include/trace/events/dma_fence.h
+@@ -2,7 +2,7 @@
+ #undef TRACE_SYSTEM
+ #define TRACE_SYSTEM dma_fence
+
+-#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
++#if !defined(_TRACE_DMA_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
+ #define _TRACE_DMA_FENCE_H
+
+ #include <linux/tracepoint.h>
+diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
+index f3a12566bed0..6678cf8b235b 100644
+--- a/include/trace/events/napi.h
++++ b/include/trace/events/napi.h
+@@ -3,7 +3,7 @@
+ #define TRACE_SYSTEM napi
+
+ #if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
+-#define _TRACE_NAPI_H_
++#define _TRACE_NAPI_H
+
+ #include <linux/netdevice.h>
+ #include <linux/tracepoint.h>
+@@ -38,7 +38,7 @@ TRACE_EVENT(napi_poll,
+
+ #undef NO_DEV
+
+-#endif /* _TRACE_NAPI_H_ */
++#endif /* _TRACE_NAPI_H */
+
+ /* This part must be outside protection */
+ #include <trace/define_trace.h>
+diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
+index 60d0d8bd336d..0d1a9ebf55ba 100644
+--- a/include/trace/events/qdisc.h
++++ b/include/trace/events/qdisc.h
+@@ -2,7 +2,7 @@
+ #define TRACE_SYSTEM qdisc
+
+ #if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ)
+-#define _TRACE_QDISC_H_
++#define _TRACE_QDISC_H
+
+ #include <linux/skbuff.h>
+ #include <linux/netdevice.h>
+@@ -44,7 +44,7 @@ TRACE_EVENT(qdisc_dequeue,
+ __entry->txq_state, __entry->packets, __entry->skbaddr )
+ );
+
+-#endif /* _TRACE_QDISC_H_ */
++#endif /* _TRACE_QDISC_H */
+
+ /* This part must be outside protection */
+ #include <trace/define_trace.h>
+diff --git a/include/trace/events/tegra_apb_dma.h b/include/trace/events/tegra_apb_dma.h
+index 0818f6286110..971cd02d2daf 100644
+--- a/include/trace/events/tegra_apb_dma.h
++++ b/include/trace/events/tegra_apb_dma.h
+@@ -1,5 +1,5 @@
+ #if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
+-#define _TRACE_TEGRA_APM_DMA_H
++#define _TRACE_TEGRA_APB_DMA_H
+
+ #include <linux/tracepoint.h>
+ #include <linux/dmaengine.h>
+@@ -55,7 +55,7 @@ TRACE_EVENT(tegra_dma_isr,
+ TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq)
+ );
+
+-#endif /* _TRACE_TEGRADMA_H */
++#endif /* _TRACE_TEGRA_APB_DMA_H */
+
+ /* This part must be outside protection */
+ #include <trace/define_trace.h>
+diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
+index f7afdadb6770..3401382bbca2 100644
+--- a/kernel/dma/mapping.c
++++ b/kernel/dma/mapping.c
+@@ -116,11 +116,16 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ int ret;
+
+ if (!dev_is_dma_coherent(dev)) {
++ unsigned long pfn;
++
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
+ return -ENXIO;
+
+- page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr,
+- dma_addr));
++ /* If the PFN is not valid, we do not have a struct page */
++ pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
++ if (!pfn_valid(pfn))
++ return -ENXIO;
++ page = pfn_to_page(pfn);
+ } else {
+ page = virt_to_page(cpu_addr);
+ }
+@@ -170,7 +175,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ if (!dev_is_dma_coherent(dev)) {
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
+ return -ENXIO;
++
++ /* If the PFN is not valid, we do not have a struct page */
+ pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
++ if (!pfn_valid(pfn))
++ return -ENXIO;
+ } else {
+ pfn = page_to_pfn(virt_to_page(cpu_addr));
+ }
+diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
+index 962cf343f798..ae3ec77bb92f 100644
+--- a/kernel/sched/cpufreq_schedutil.c
++++ b/kernel/sched/cpufreq_schedutil.c
+@@ -40,6 +40,7 @@ struct sugov_policy {
+ struct task_struct *thread;
+ bool work_in_progress;
+
++ bool limits_changed;
+ bool need_freq_update;
+ };
+
+@@ -89,8 +90,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
+ !cpufreq_this_cpu_can_update(sg_policy->policy))
+ return false;
+
+- if (unlikely(sg_policy->need_freq_update))
++ if (unlikely(sg_policy->limits_changed)) {
++ sg_policy->limits_changed = false;
++ sg_policy->need_freq_update = true;
+ return true;
++ }
+
+ delta_ns = time - sg_policy->last_freq_update_time;
+
+@@ -427,7 +431,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
+ {
+ if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
+- sg_policy->need_freq_update = true;
++ sg_policy->limits_changed = true;
+ }
+
+ static void sugov_update_single(struct update_util_data *hook, u64 time,
+@@ -447,7 +451,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
+ if (!sugov_should_update_freq(sg_policy, time))
+ return;
+
+- busy = sugov_cpu_is_busy(sg_cpu);
++ /* Limits may have changed, don't skip frequency update */
++ busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
+
+ util = sugov_get_util(sg_cpu);
+ max = sg_cpu->max;
+@@ -821,6 +826,7 @@ static int sugov_start(struct cpufreq_policy *policy)
+ sg_policy->last_freq_update_time = 0;
+ sg_policy->next_freq = 0;
+ sg_policy->work_in_progress = false;
++ sg_policy->limits_changed = false;
+ sg_policy->need_freq_update = false;
+ sg_policy->cached_raw_freq = 0;
+
+@@ -869,7 +875,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
+ mutex_unlock(&sg_policy->work_lock);
+ }
+
+- sg_policy->need_freq_update = true;
++ sg_policy->limits_changed = true;
+ }
+
+ struct cpufreq_governor schedutil_gov = {
+diff --git a/mm/hmm.c b/mm/hmm.c
+index 4c405dfbd2b3..27dd9a881627 100644
+--- a/mm/hmm.c
++++ b/mm/hmm.c
+@@ -995,7 +995,7 @@ EXPORT_SYMBOL(hmm_range_unregister);
+ * @range: range
+ * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
+ * permission (for instance asking for write and range is read only),
+- * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
++ * -EBUSY if you need to retry, -EFAULT invalid (ie either no valid
+ * vma or it is illegal to access that range), number of valid pages
+ * in range->pfns[] (from range start address).
+ *
+@@ -1019,7 +1019,7 @@ long hmm_range_snapshot(struct hmm_range *range)
+ do {
+ /* If range is no longer valid force retry. */
+ if (!range->valid)
+- return -EAGAIN;
++ return -EBUSY;
+
+ vma = find_vma(hmm->mm, start);
+ if (vma == NULL || (vma->vm_flags & device_vma))
+@@ -1117,10 +1117,8 @@ long hmm_range_fault(struct hmm_range *range, bool block)
+
+ do {
+ /* If range is no longer valid force retry. */
+- if (!range->valid) {
+- up_read(&hmm->mm->mmap_sem);
+- return -EAGAIN;
+- }
++ if (!range->valid)
++ return -EBUSY;
+
+ vma = find_vma(hmm->mm, start);
+ if (vma == NULL || (vma->vm_flags & device_vma))
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 3e147ea83182..3afb01bce736 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -114,7 +114,7 @@
+ /* GFP bitmask for kmemleak internal allocations */
+ #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
+ __GFP_NORETRY | __GFP_NOMEMALLOC | \
+- __GFP_NOWARN | __GFP_NOFAIL)
++ __GFP_NOWARN)
+
+ /* scanning area inside a memory block */
+ struct kmemleak_scan_area {
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 902d020aa70e..8f5dabfaf94d 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1126,26 +1126,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
+ css_put(&prev->css);
+ }
+
+-static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
++static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
++ struct mem_cgroup *dead_memcg)
+ {
+- struct mem_cgroup *memcg = dead_memcg;
+ struct mem_cgroup_reclaim_iter *iter;
+ struct mem_cgroup_per_node *mz;
+ int nid;
+ int i;
+
+- for (; memcg; memcg = parent_mem_cgroup(memcg)) {
+- for_each_node(nid) {
+- mz = mem_cgroup_nodeinfo(memcg, nid);
+- for (i = 0; i <= DEF_PRIORITY; i++) {
+- iter = &mz->iter[i];
+- cmpxchg(&iter->position,
+- dead_memcg, NULL);
+- }
++ for_each_node(nid) {
++ mz = mem_cgroup_nodeinfo(from, nid);
++ for (i = 0; i <= DEF_PRIORITY; i++) {
++ iter = &mz->iter[i];
++ cmpxchg(&iter->position,
++ dead_memcg, NULL);
+ }
+ }
+ }
+
++static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
++{
++ struct mem_cgroup *memcg = dead_memcg;
++ struct mem_cgroup *last;
++
++ do {
++ __invalidate_reclaim_iterators(memcg, dead_memcg);
++ last = memcg;
++ } while ((memcg = parent_mem_cgroup(memcg)));
++
++ /*
++ * When cgruop1 non-hierarchy mode is used,
++ * parent_mem_cgroup() does not walk all the way up to the
++ * cgroup root (root_mem_cgroup). So we have to handle
++ * dead_memcg from cgroup root separately.
++ */
++ if (last != root_mem_cgroup)
++ __invalidate_reclaim_iterators(root_mem_cgroup,
++ dead_memcg);
++}
++
+ /**
+ * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
+ * @memcg: hierarchy root
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index fdcb73536319..ca3f443c8fc1 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -403,7 +403,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
+ },
+ };
+
+-static void migrate_page_add(struct page *page, struct list_head *pagelist,
++static int migrate_page_add(struct page *page, struct list_head *pagelist,
+ unsigned long flags);
+
+ struct queue_pages {
+@@ -429,11 +429,14 @@ static inline bool queue_pages_required(struct page *page,
+ }
+
+ /*
+- * queue_pages_pmd() has three possible return values:
+- * 1 - pages are placed on the right node or queued successfully.
+- * 0 - THP was split.
+- * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
+- * page was already on a node that does not follow the policy.
++ * queue_pages_pmd() has four possible return values:
++ * 0 - pages are placed on the right node or queued successfully.
++ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
++ * specified.
++ * 2 - THP was split.
++ * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
++ * existing page was already on a node that does not follow the
++ * policy.
+ */
+ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+@@ -451,23 +454,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
+ if (is_huge_zero_page(page)) {
+ spin_unlock(ptl);
+ __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
++ ret = 2;
+ goto out;
+ }
+- if (!queue_pages_required(page, qp)) {
+- ret = 1;
++ if (!queue_pages_required(page, qp))
+ goto unlock;
+- }
+
+- ret = 1;
+ flags = qp->flags;
+ /* go to thp migration */
+ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+- if (!vma_migratable(walk->vma)) {
+- ret = -EIO;
++ if (!vma_migratable(walk->vma) ||
++ migrate_page_add(page, qp->pagelist, flags)) {
++ ret = 1;
+ goto unlock;
+ }
+-
+- migrate_page_add(page, qp->pagelist, flags);
+ } else
+ ret = -EIO;
+ unlock:
+@@ -479,6 +479,13 @@ out:
+ /*
+ * Scan through pages checking if pages follow certain conditions,
+ * and move them to the pagelist if they do.
++ *
++ * queue_pages_pte_range() has three possible return values:
++ * 0 - pages are placed on the right node or queued successfully.
++ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
++ * specified.
++ * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
++ * on a node that does not follow the policy.
+ */
+ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+@@ -488,17 +495,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
+ struct queue_pages *qp = walk->private;
+ unsigned long flags = qp->flags;
+ int ret;
++ bool has_unmovable = false;
+ pte_t *pte;
+ spinlock_t *ptl;
+
+ ptl = pmd_trans_huge_lock(pmd, vma);
+ if (ptl) {
+ ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
+- if (ret > 0)
+- return 0;
+- else if (ret < 0)
++ if (ret != 2)
+ return ret;
+ }
++ /* THP was split, fall through to pte walk */
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
+@@ -519,14 +526,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
+ if (!queue_pages_required(page, qp))
+ continue;
+ if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+- if (!vma_migratable(vma))
++ /* MPOL_MF_STRICT must be specified if we get here */
++ if (!vma_migratable(vma)) {
++ has_unmovable = true;
+ break;
+- migrate_page_add(page, qp->pagelist, flags);
++ }
++
++ /*
++ * Do not abort immediately since there may be
++ * temporary off LRU pages in the range. Still
++ * need migrate other LRU pages.
++ */
++ if (migrate_page_add(page, qp->pagelist, flags))
++ has_unmovable = true;
+ } else
+ break;
+ }
+ pte_unmap_unlock(pte - 1, ptl);
+ cond_resched();
++
++ if (has_unmovable)
++ return 1;
++
+ return addr != end ? -EIO : 0;
+ }
+
+@@ -639,7 +660,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
+ *
+ * If pages found in a given range are on a set of nodes (determined by
+ * @nodes and @flags,) it's isolated and queued to the pagelist which is
+- * passed via @private.)
++ * passed via @private.
++ *
++ * queue_pages_range() has three possible return values:
++ * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
++ * specified.
++ * 0 - queue pages successfully or no misplaced page.
++ * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
+ */
+ static int
+ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
+@@ -940,7 +967,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
+ /*
+ * page migration, thp tail pages can be passed.
+ */
+-static void migrate_page_add(struct page *page, struct list_head *pagelist,
++static int migrate_page_add(struct page *page, struct list_head *pagelist,
+ unsigned long flags)
+ {
+ struct page *head = compound_head(page);
+@@ -953,8 +980,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
+ mod_node_page_state(page_pgdat(head),
+ NR_ISOLATED_ANON + page_is_file_cache(head),
+ hpage_nr_pages(head));
++ } else if (flags & MPOL_MF_STRICT) {
++ /*
++ * Non-movable page may reach here. And, there may be
++ * temporary off LRU pages or non-LRU movable pages.
++ * Treat them as unmovable pages since they can't be
++ * isolated, so they can't be moved at the moment. It
++ * should return -EIO for this case too.
++ */
++ return -EIO;
+ }
+ }
++
++ return 0;
+ }
+
+ /* page allocation callback for NUMA node migration */
+@@ -1157,9 +1195,10 @@ static struct page *new_page(struct page *page, unsigned long start)
+ }
+ #else
+
+-static void migrate_page_add(struct page *page, struct list_head *pagelist,
++static int migrate_page_add(struct page *page, struct list_head *pagelist,
+ unsigned long flags)
+ {
++ return -EIO;
+ }
+
+ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+@@ -1182,6 +1221,7 @@ static long do_mbind(unsigned long start, unsigned long len,
+ struct mempolicy *new;
+ unsigned long end;
+ int err;
++ int ret;
+ LIST_HEAD(pagelist);
+
+ if (flags & ~(unsigned long)MPOL_MF_VALID)
+@@ -1243,10 +1283,15 @@ static long do_mbind(unsigned long start, unsigned long len,
+ if (err)
+ goto mpol_out;
+
+- err = queue_pages_range(mm, start, end, nmask,
++ ret = queue_pages_range(mm, start, end, nmask,
+ flags | MPOL_MF_INVERT, &pagelist);
+- if (!err)
+- err = mbind_range(mm, start, end, new);
++
++ if (ret < 0) {
++ err = -EIO;
++ goto up_out;
++ }
++
++ err = mbind_range(mm, start, end, new);
+
+ if (!err) {
+ int nr_failed = 0;
+@@ -1259,13 +1304,14 @@ static long do_mbind(unsigned long start, unsigned long len,
+ putback_movable_pages(&pagelist);
+ }
+
+- if (nr_failed && (flags & MPOL_MF_STRICT))
++ if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
+ err = -EIO;
+ } else
+ putback_movable_pages(&pagelist);
+
++up_out:
+ up_write(&mm->mmap_sem);
+- mpol_out:
++mpol_out:
+ mpol_put(new);
+ return err;
+ }
+diff --git a/mm/rmap.c b/mm/rmap.c
+index e5dfe2ae6b0d..003377e24232 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1475,7 +1475,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+ /*
+ * No need to invalidate here it will synchronize on
+ * against the special swap migration pte.
++ *
++ * The assignment to subpage above was computed from a
++ * swap PTE which results in an invalid pointer.
++ * Since only PAGE_SIZE pages can currently be
++ * migrated, just set it to page. This will need to be
++ * changed when hugepage migrations to device private
++ * memory are supported.
+ */
++ subpage = page;
+ goto discard;
+ }
+
+diff --git a/mm/usercopy.c b/mm/usercopy.c
+index 2a09796edef8..98e924864554 100644
+--- a/mm/usercopy.c
++++ b/mm/usercopy.c
+@@ -147,7 +147,7 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
+ bool to_user)
+ {
+ /* Reject if object wraps past end of memory. */
+- if (ptr + n < ptr)
++ if (ptr + (n - 1) < ptr)
+ usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
+
+ /* Reject if NULL or ZERO-allocation. */
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 4ebf20152328..c8f58f5695a9 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -88,9 +88,6 @@ struct scan_control {
+ /* Can pages be swapped as part of reclaim? */
+ unsigned int may_swap:1;
+
+- /* e.g. boosted watermark reclaim leaves slabs alone */
+- unsigned int may_shrinkslab:1;
+-
+ /*
+ * Cgroups are not reclaimed below their configured memory.low,
+ * unless we threaten to OOM. If any cgroups are skipped due to
+@@ -2669,10 +2666,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
+ shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
+ node_lru_pages += lru_pages;
+
+- if (sc->may_shrinkslab) {
+- shrink_slab(sc->gfp_mask, pgdat->node_id,
+- memcg, sc->priority);
+- }
++ shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
++ sc->priority);
+
+ /* Record the group's reclaim efficiency */
+ vmpressure(sc->gfp_mask, memcg, false,
+@@ -3149,7 +3144,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+ .may_writepage = !laptop_mode,
+ .may_unmap = 1,
+ .may_swap = 1,
+- .may_shrinkslab = 1,
+ };
+
+ /*
+@@ -3191,7 +3185,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
+ .may_unmap = 1,
+ .reclaim_idx = MAX_NR_ZONES - 1,
+ .may_swap = !noswap,
+- .may_shrinkslab = 1,
+ };
+ unsigned long lru_pages;
+
+@@ -3236,7 +3229,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
+ .may_writepage = !laptop_mode,
+ .may_unmap = 1,
+ .may_swap = may_swap,
+- .may_shrinkslab = 1,
+ };
+
+ /*
+@@ -3545,7 +3537,6 @@ restart:
+ */
+ sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
+ sc.may_swap = !nr_boost_reclaim;
+- sc.may_shrinkslab = !nr_boost_reclaim;
+
+ /*
+ * Do some background aging of the anon list, to give
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index 3b27094dc42e..c4debbe683eb 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -819,9 +819,19 @@ out:
+ static void z3fold_destroy_pool(struct z3fold_pool *pool)
+ {
+ kmem_cache_destroy(pool->c_handle);
+- z3fold_unregister_migration(pool);
+- destroy_workqueue(pool->release_wq);
++
++ /*
++ * We need to destroy pool->compact_wq before pool->release_wq,
++ * as any pending work on pool->compact_wq will call
++ * queue_work(pool->release_wq, &pool->work).
++ *
++ * There are still outstanding pages until both workqueues are drained,
++ * so we cannot unregister migration until then.
++ */
++
+ destroy_workqueue(pool->compact_wq);
++ destroy_workqueue(pool->release_wq);
++ z3fold_unregister_migration(pool);
+ kfree(pool);
+ }
+
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 963dfdc14827..1fa9ac483173 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -1770,20 +1770,28 @@ static int compat_calc_entry(const struct ebt_entry *e,
+ return 0;
+ }
+
++static int ebt_compat_init_offsets(unsigned int number)
++{
++ if (number > INT_MAX)
++ return -EINVAL;
++
++ /* also count the base chain policies */
++ number += NF_BR_NUMHOOKS;
++
++ return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
++}
+
+ static int compat_table_info(const struct ebt_table_info *info,
+ struct compat_ebt_replace *newinfo)
+ {
+ unsigned int size = info->entries_size;
+ const void *entries = info->entries;
++ int ret;
+
+ newinfo->entries_size = size;
+- if (info->nentries) {
+- int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
+- info->nentries);
+- if (ret)
+- return ret;
+- }
++ ret = ebt_compat_init_offsets(info->nentries);
++ if (ret)
++ return ret;
+
+ return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
+ entries, newinfo);
+@@ -2234,11 +2242,9 @@ static int compat_do_replace(struct net *net, void __user *user,
+
+ xt_compat_lock(NFPROTO_BRIDGE);
+
+- if (tmp.nentries) {
+- ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
+- if (ret < 0)
+- goto out_unlock;
+- }
++ ret = ebt_compat_init_offsets(tmp.nentries);
++ if (ret < 0)
++ goto out_unlock;
+
+ ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
+ if (ret < 0)
+diff --git a/net/core/filter.c b/net/core/filter.c
+index f681fb772940..534c310bb089 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -7325,12 +7325,12 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
+ case offsetof(struct __sk_buff, gso_segs):
+ /* si->dst_reg = skb_shinfo(SKB); */
+ #ifdef NET_SKBUFF_DATA_USES_OFFSET
+- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
+- si->dst_reg, si->src_reg,
+- offsetof(struct sk_buff, head));
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
+ BPF_REG_AX, si->src_reg,
+ offsetof(struct sk_buff, end));
++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
++ si->dst_reg, si->src_reg,
++ offsetof(struct sk_buff, head));
+ *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
+ #else
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
+diff --git a/net/core/sock.c b/net/core/sock.c
+index aa4a00d381e3..df7b38b60164 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1988,6 +1988,19 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
+ }
+ EXPORT_SYMBOL(skb_set_owner_w);
+
++static bool can_skb_orphan_partial(const struct sk_buff *skb)
++{
++#ifdef CONFIG_TLS_DEVICE
++ /* Drivers depend on in-order delivery for crypto offload,
++ * partial orphan breaks out-of-order-OK logic.
++ */
++ if (skb->decrypted)
++ return false;
++#endif
++ return (skb->destructor == sock_wfree ||
++ (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
++}
++
+ /* This helper is used by netem, as it can hold packets in its
+ * delay queue. We want to allow the owner socket to send more
+ * packets, as if they were already TX completed by a typical driver.
+@@ -1999,11 +2012,7 @@ void skb_orphan_partial(struct sk_buff *skb)
+ if (skb_is_tcp_pure_ack(skb))
+ return;
+
+- if (skb->destructor == sock_wfree
+-#ifdef CONFIG_INET
+- || skb->destructor == tcp_wfree
+-#endif
+- ) {
++ if (can_skb_orphan_partial(skb)) {
+ struct sock *sk = skb->sk;
+
+ if (refcount_inc_not_zero(&sk->sk_refcnt)) {
+diff --git a/net/dsa/switch.c b/net/dsa/switch.c
+index 4ec5b7f85d51..09d9286b27cc 100644
+--- a/net/dsa/switch.c
++++ b/net/dsa/switch.c
+@@ -153,6 +153,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
+ {
+ int port;
+
++ if (!ds->ops->port_mdb_add)
++ return;
++
+ for_each_set_bit(port, bitmap, ds->num_ports)
+ ds->ops->port_mdb_add(ds, port, mdb);
+ }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 5264f064a87e..b30f7f877181 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -984,6 +984,9 @@ new_segment:
+ if (!skb)
+ goto wait_for_memory;
+
++#ifdef CONFIG_TLS_DEVICE
++ skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
++#endif
+ skb_entail(sk, skb);
+ copy = size_goal;
+ }
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 3d1e15401384..8a56e09cfb0e 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -398,10 +398,14 @@ more_data:
+ static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+ {
+ struct sk_msg tmp, *msg_tx = NULL;
+- int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
+ int copied = 0, err = 0;
+ struct sk_psock *psock;
+ long timeo;
++ int flags;
++
++ /* Don't let internal do_tcp_sendpages() flags through */
++ flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
++ flags |= MSG_NO_SHARED_FRAGS;
+
+ psock = sk_psock_get(sk);
+ if (unlikely(!psock))
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 7d0be046cbc1..359d298348c7 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1318,6 +1318,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
+ buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
+ if (!buff)
+ return -ENOMEM; /* We'll just try again later. */
++ skb_copy_decrypted(buff, skb);
+
+ sk->sk_wmem_queued += buff->truesize;
+ sk_mem_charge(sk, buff->truesize);
+@@ -1872,6 +1873,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
+ buff = sk_stream_alloc_skb(sk, 0, gfp, true);
+ if (unlikely(!buff))
+ return -ENOMEM;
++ skb_copy_decrypted(buff, skb);
+
+ sk->sk_wmem_queued += buff->truesize;
+ sk_mem_charge(sk, buff->truesize);
+@@ -2141,6 +2143,7 @@ static int tcp_mtu_probe(struct sock *sk)
+ sk_mem_charge(sk, nskb->truesize);
+
+ skb = tcp_send_head(sk);
++ skb_copy_decrypted(nskb, skb);
+
+ TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
+ TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index f4f9b8344a32..e343a030ec26 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -453,13 +453,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
+ * table location, we assume id gets exposed to userspace.
+ *
+ * Following nf_conn items do not change throughout lifetime
+- * of the nf_conn after it has been committed to main hash table:
++ * of the nf_conn:
+ *
+ * 1. nf_conn address
+- * 2. nf_conn->ext address
+- * 3. nf_conn->master address (normally NULL)
+- * 4. tuple
+- * 5. the associated net namespace
++ * 2. nf_conn->master address (normally NULL)
++ * 3. the associated net namespace
++ * 4. the original direction tuple
+ */
+ u32 nf_ct_get_id(const struct nf_conn *ct)
+ {
+@@ -469,9 +468,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
+ net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
+
+ a = (unsigned long)ct;
+- b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
+- c = (unsigned long)ct->ext;
+- d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
++ b = (unsigned long)ct->master;
++ c = (unsigned long)nf_ct_net(ct);
++ d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
++ sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
+ &ct_id_seed);
+ #ifdef CONFIG_64BIT
+ return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 5f78df080573..bad144dfabc5 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2607,6 +2607,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+
+ mutex_lock(&po->pg_vec_lock);
+
++ /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
++ * we need to confirm it under protection of pg_vec_lock.
++ */
++ if (unlikely(!po->tx_ring.pg_vec)) {
++ err = -EBUSY;
++ goto out;
++ }
+ if (likely(saddr == NULL)) {
+ dev = packet_cached_dev_get(po);
+ proto = po->num;
+diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
+index b100870f02a6..37dced00b63d 100644
+--- a/net/sched/act_skbedit.c
++++ b/net/sched/act_skbedit.c
+@@ -307,6 +307,17 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index)
+ return tcf_idr_search(tn, a, index);
+ }
+
++static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
++{
++ return nla_total_size(sizeof(struct tc_skbedit))
++ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
++ + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
++ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
++ + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
++ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
++ + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
++}
++
+ static struct tc_action_ops act_skbedit_ops = {
+ .kind = "skbedit",
+ .id = TCA_ID_SKBEDIT,
+@@ -316,6 +327,7 @@ static struct tc_action_ops act_skbedit_ops = {
+ .init = tcf_skbedit_init,
+ .cleanup = tcf_skbedit_cleanup,
+ .walk = tcf_skbedit_walker,
++ .get_fill_size = tcf_skbedit_get_fill_size,
+ .lookup = tcf_skbedit_search,
+ .size = sizeof(struct tcf_skbedit),
+ };
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 9ecfb8f5902a..8be89aa52b6e 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -849,7 +849,8 @@ unlock:
+ spin_unlock_bh(qdisc_lock(sch));
+
+ free_sched:
+- kfree(new_admin);
++ if (new_admin)
++ call_rcu(&new_admin->rcu, taprio_free_sched_cb);
+
+ return err;
+ }
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index a554d6d15d1b..1cf5bb5b73c4 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -546,7 +546,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
+ */
+ if (net->sctp.pf_enable &&
+ (transport->state == SCTP_ACTIVE) &&
+- (asoc->pf_retrans < transport->pathmaxrxt) &&
++ (transport->error_count < transport->pathmaxrxt) &&
+ (transport->error_count > asoc->pf_retrans)) {
+
+ sctp_assoc_control_transport(asoc, transport,
+diff --git a/net/sctp/stream.c b/net/sctp/stream.c
+index 25946604af85..e83cdaa2ab76 100644
+--- a/net/sctp/stream.c
++++ b/net/sctp/stream.c
+@@ -316,6 +316,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
+ nstr_list[i] = htons(str_list[i]);
+
+ if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
++ kfree(nstr_list);
+ retval = -EAGAIN;
+ goto out;
+ }
+diff --git a/net/tipc/addr.c b/net/tipc/addr.c
+index b88d48d00913..0f1eaed1bd1b 100644
+--- a/net/tipc/addr.c
++++ b/net/tipc/addr.c
+@@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
+ tipc_set_node_id(net, node_id);
+ }
+ tn->trial_addr = addr;
++ tn->addr_trial_end = jiffies;
+ pr_info("32-bit node address hash set to %x\n", addr);
+ }
+
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index eb8f24f420f0..4cfcce211c2f 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -342,9 +342,9 @@ static int tls_push_data(struct sock *sk,
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
+- int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
+ int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
+ struct tls_record_info *record = ctx->open_record;
++ int tls_push_record_flags;
+ struct page_frag *pfrag;
+ size_t orig_size = size;
+ u32 max_open_record_len;
+@@ -359,6 +359,9 @@ static int tls_push_data(struct sock *sk,
+ if (sk->sk_err)
+ return -sk->sk_err;
+
++ flags |= MSG_SENDPAGE_DECRYPTED;
++ tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
++
+ timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+ if (tls_is_partially_sent_record(tls_ctx)) {
+ rc = tls_push_partial_record(sk, tls_ctx, flags);
+@@ -545,7 +548,9 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
+ gfp_t sk_allocation = sk->sk_allocation;
+
+ sk->sk_allocation = GFP_ATOMIC;
+- tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL);
++ tls_push_partial_record(sk, ctx,
++ MSG_DONTWAIT | MSG_NOSIGNAL |
++ MSG_SENDPAGE_DECRYPTED);
+ sk->sk_allocation = sk_allocation;
+ }
+ }
+diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
+index 8a5c4d645eb1..4bbf4fc163a2 100644
+--- a/scripts/Kconfig.include
++++ b/scripts/Kconfig.include
+@@ -25,7 +25,7 @@ failure = $(if-success,$(1),n,y)
+
+ # $(cc-option,<flag>)
+ # Return y if the compiler supports <flag>, n otherwise
+-cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null)
++cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
+
+ # $(ld-option,<flag>)
+ # Return y if the linker supports <flag>, n otherwise
+diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
+index 38d77353c66a..cea276955147 100644
+--- a/scripts/Makefile.modpost
++++ b/scripts/Makefile.modpost
+@@ -75,7 +75,7 @@ modpost = scripts/mod/modpost \
+ $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
+ $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
+ $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
+- $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
++ $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
+ $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
+ $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
+ $(if $(KBUILD_MODPOST_WARN),-w)
+diff --git a/security/keys/trusted.c b/security/keys/trusted.c
+index 9a94672e7adc..ade699131065 100644
+--- a/security/keys/trusted.c
++++ b/security/keys/trusted.c
+@@ -1228,24 +1228,11 @@ hashalg_fail:
+
+ static int __init init_digests(void)
+ {
+- u8 digest[TPM_MAX_DIGEST_SIZE];
+- int ret;
+- int i;
+-
+- ret = tpm_get_random(chip, digest, TPM_MAX_DIGEST_SIZE);
+- if (ret < 0)
+- return ret;
+- if (ret < TPM_MAX_DIGEST_SIZE)
+- return -EFAULT;
+-
+ digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
+ GFP_KERNEL);
+ if (!digests)
+ return -ENOMEM;
+
+- for (i = 0; i < chip->nr_allocated_banks; i++)
+- memcpy(digests[i].digest, digest, TPM_MAX_DIGEST_SIZE);
+-
+ return 0;
+ }
+
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 12dd9b318db1..703857aab00f 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -1873,6 +1873,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
+ if (!to_check)
+ break; /* all drained */
+ init_waitqueue_entry(&wait, current);
++ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&to_check->sleep, &wait);
+ snd_pcm_stream_unlock_irq(substream);
+ if (runtime->no_period_wakeup)
+@@ -1885,7 +1886,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
+ }
+ tout = msecs_to_jiffies(tout * 1000);
+ }
+- tout = schedule_timeout_interruptible(tout);
++ tout = schedule_timeout(tout);
+
+ snd_pcm_stream_lock_irq(substream);
+ group = snd_pcm_stream_group_ref(substream);
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 485edaba0037..5bf24fb819d2 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -6051,6 +6051,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
+ }
+ EXPORT_SYMBOL_GPL(snd_hda_gen_free);
+
++/**
++ * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
++ * @codec: the HDA codec
++ *
++ * This can be put as patch_ops reboot_notify function.
++ */
++void snd_hda_gen_reboot_notify(struct hda_codec *codec)
++{
++ /* Make the codec enter D3 to avoid spurious noises from the internal
++ * speaker during (and after) reboot
++ */
++ snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
++ snd_hda_codec_write(codec, codec->core.afg, 0,
++ AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
++ msleep(10);
++}
++EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
++
+ #ifdef CONFIG_PM
+ /**
+ * snd_hda_gen_check_power_status - check the loopback power save state
+@@ -6078,6 +6096,7 @@ static const struct hda_codec_ops generic_patch_ops = {
+ .init = snd_hda_gen_init,
+ .free = snd_hda_gen_free,
+ .unsol_event = snd_hda_jack_unsol_event,
++ .reboot_notify = snd_hda_gen_reboot_notify,
+ #ifdef CONFIG_PM
+ .check_power_status = snd_hda_gen_check_power_status,
+ #endif
+@@ -6100,7 +6119,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
+
+ err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
+ if (err < 0)
+- return err;
++ goto error;
+
+ err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
+ if (err < 0)
+diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
+index 35a670a71c42..5f199dcb0d18 100644
+--- a/sound/pci/hda/hda_generic.h
++++ b/sound/pci/hda/hda_generic.h
+@@ -332,6 +332,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
+ struct auto_pin_cfg *cfg);
+ int snd_hda_gen_build_controls(struct hda_codec *codec);
+ int snd_hda_gen_build_pcms(struct hda_codec *codec);
++void snd_hda_gen_reboot_notify(struct hda_codec *codec);
+
+ /* standard jack event callbacks */
+ void snd_hda_gen_hp_automute(struct hda_codec *codec,
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index fb8f452a1c78..5732c31c4167 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2505,6 +2505,9 @@ static const struct pci_device_id azx_ids[] = {
+ /* AMD, X370 & co */
+ { PCI_DEVICE(0x1022, 0x1457),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
++ /* AMD, X570 & co */
++ { PCI_DEVICE(0x1022, 0x1487),
++ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
+ /* AMD Stoney */
+ { PCI_DEVICE(0x1022, 0x157a),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index f299f137eaea..14298ef45b21 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -163,23 +163,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
+ {
+ struct conexant_spec *spec = codec->spec;
+
+- switch (codec->core.vendor_id) {
+- case 0x14f12008: /* CX8200 */
+- case 0x14f150f2: /* CX20722 */
+- case 0x14f150f4: /* CX20724 */
+- break;
+- default:
+- return;
+- }
+-
+ /* Turn the problematic codec into D3 to avoid spurious noises
+ from the internal speaker during (and after) reboot */
+ cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
+-
+- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+- snd_hda_codec_write(codec, codec->core.afg, 0,
+- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+- msleep(10);
++ snd_hda_gen_reboot_notify(codec);
+ }
+
+ static void cx_auto_free(struct hda_codec *codec)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index de224cbea7a0..e333b3e30e31 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -869,15 +869,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
+ alc_shutup(codec);
+ }
+
+-/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
+-static void alc_d3_at_reboot(struct hda_codec *codec)
+-{
+- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+- snd_hda_codec_write(codec, codec->core.afg, 0,
+- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+- msleep(10);
+-}
+-
+ #define alc_free snd_hda_gen_free
+
+ #ifdef CONFIG_PM
+@@ -5152,7 +5143,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
+ struct alc_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+- spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
++ spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
+ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+ codec->power_save_node = 0; /* avoid click noises */
+ snd_hda_apply_pincfgs(codec, pincfgs);
+@@ -6987,6 +6978,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
++ SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 7498b5191b68..b5927c3d5bc0 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -68,6 +68,7 @@ struct mixer_build {
+ unsigned char *buffer;
+ unsigned int buflen;
+ DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
++ DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
+ struct usb_audio_term oterm;
+ const struct usbmix_name_map *map;
+ const struct usbmix_selector_map *selector_map;
+@@ -744,6 +745,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
+ return -EINVAL;
+ if (!desc->bNrInPins)
+ return -EINVAL;
++ if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
++ return -EINVAL;
+
+ switch (state->mixer->protocol) {
+ case UAC_VERSION_1:
+@@ -773,16 +776,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
+ * parse the source unit recursively until it reaches to a terminal
+ * or a branched unit.
+ */
+-static int check_input_term(struct mixer_build *state, int id,
++static int __check_input_term(struct mixer_build *state, int id,
+ struct usb_audio_term *term)
+ {
+ int protocol = state->mixer->protocol;
+ int err;
+ void *p1;
++ unsigned char *hdr;
+
+ memset(term, 0, sizeof(*term));
+- while ((p1 = find_audio_control_unit(state, id)) != NULL) {
+- unsigned char *hdr = p1;
++ for (;;) {
++ /* a loop in the terminal chain? */
++ if (test_and_set_bit(id, state->termbitmap))
++ return -EINVAL;
++
++ p1 = find_audio_control_unit(state, id);
++ if (!p1)
++ break;
++
++ hdr = p1;
+ term->id = id;
+
+ if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
+@@ -800,7 +812,7 @@ static int check_input_term(struct mixer_build *state, int id,
+
+ /* call recursively to verify that the
+ * referenced clock entity is valid */
+- err = check_input_term(state, d->bCSourceID, term);
++ err = __check_input_term(state, d->bCSourceID, term);
+ if (err < 0)
+ return err;
+
+@@ -834,7 +846,7 @@ static int check_input_term(struct mixer_build *state, int id,
+ case UAC2_CLOCK_SELECTOR: {
+ struct uac_selector_unit_descriptor *d = p1;
+ /* call recursively to retrieve the channel info */
+- err = check_input_term(state, d->baSourceID[0], term);
++ err = __check_input_term(state, d->baSourceID[0], term);
+ if (err < 0)
+ return err;
+ term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
+@@ -897,7 +909,7 @@ static int check_input_term(struct mixer_build *state, int id,
+
+ /* call recursively to verify that the
+ * referenced clock entity is valid */
+- err = check_input_term(state, d->bCSourceID, term);
++ err = __check_input_term(state, d->bCSourceID, term);
+ if (err < 0)
+ return err;
+
+@@ -948,7 +960,7 @@ static int check_input_term(struct mixer_build *state, int id,
+ case UAC3_CLOCK_SELECTOR: {
+ struct uac_selector_unit_descriptor *d = p1;
+ /* call recursively to retrieve the channel info */
+- err = check_input_term(state, d->baSourceID[0], term);
++ err = __check_input_term(state, d->baSourceID[0], term);
+ if (err < 0)
+ return err;
+ term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
+@@ -964,7 +976,7 @@ static int check_input_term(struct mixer_build *state, int id,
+ return -EINVAL;
+
+ /* call recursively to retrieve the channel info */
+- err = check_input_term(state, d->baSourceID[0], term);
++ err = __check_input_term(state, d->baSourceID[0], term);
+ if (err < 0)
+ return err;
+
+@@ -982,6 +994,15 @@ static int check_input_term(struct mixer_build *state, int id,
+ return -ENODEV;
+ }
+
++
++static int check_input_term(struct mixer_build *state, int id,
++ struct usb_audio_term *term)
++{
++ memset(term, 0, sizeof(*term));
++ memset(state->termbitmap, 0, sizeof(state->termbitmap));
++ return __check_input_term(state, id, term);
++}
++
+ /*
+ * Feature Unit
+ */
+diff --git a/tools/perf/trace/beauty/usbdevfs_ioctl.sh b/tools/perf/trace/beauty/usbdevfs_ioctl.sh
+index 930b80f422e8..aa597ae53747 100755
+--- a/tools/perf/trace/beauty/usbdevfs_ioctl.sh
++++ b/tools/perf/trace/beauty/usbdevfs_ioctl.sh
+@@ -3,10 +3,13 @@
+
+ [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
+
++# also as:
++# #define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len)
++
+ printf "static const char *usbdevfs_ioctl_cmds[] = {\n"
+-regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)[[:space:]]+_IO[WR]{0,2}\([[:space:]]*'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*"
+-egrep $regex ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \
+- sed -r "s/$regex/\2 \1/g" | \
++regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)(\(\w+\))?[[:space:]]+_IO[CWR]{0,2}\([[:space:]]*(_IOC_\w+,[[:space:]]*)?'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*"
++egrep "$regex" ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \
++ sed -r "s/$regex/\4 \1/g" | \
+ sort | xargs printf "\t[%s] = \"%s\",\n"
+ printf "};\n\n"
+ printf "#if 0\n"
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index e84b70be3fc1..abe9af867967 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -3478,6 +3478,13 @@ int perf_session__read_header(struct perf_session *session)
+ data->file.path);
+ }
+
++ if (f_header.attr_size == 0) {
++ pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
++ "Was the 'perf record' command properly terminated?\n",
++ data->file.path);
++ return -EINVAL;
++ }
++
+ nr_attrs = f_header.attrs.size / f_header.attr_size;
+ lseek(fd, f_header.attrs.offset, SEEK_SET);
+
+@@ -3558,7 +3565,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
+ size += sizeof(struct perf_event_header);
+ size += ids * sizeof(u64);
+
+- ev = malloc(size);
++ ev = zalloc(size);
+
+ if (ev == NULL)
+ return -ENOMEM;
+diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
+index ecd96eda7f6a..e11b7c1efda3 100644
+--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
++++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
+@@ -509,5 +509,52 @@
+ "teardown": [
+ "$TC actions flush action skbedit"
+ ]
++ },
++ {
++ "id": "630c",
++ "name": "Add batch of 32 skbedit actions with all parameters and cookie",
++ "category": [
++ "actions",
++ "skbedit"
++ ],
++ "setup": [
++ [
++ "$TC actions flush action skbedit",
++ 0,
++ 1,
++ 255
++ ]
++ ],
++ "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i cookie aabbccddeeff112233445566778800a1 \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
++ "expExitCode": "0",
++ "verifyCmd": "$TC actions list action skbedit",
++ "matchPattern": "^[ \t]+index [0-9]+ ref",
++ "matchCount": "32",
++ "teardown": [
++ "$TC actions flush action skbedit"
++ ]
++ },
++ {
++ "id": "706d",
++ "name": "Delete batch of 32 skbedit actions with all parameters",
++ "category": [
++ "actions",
++ "skbedit"
++ ],
++ "setup": [
++ [
++ "$TC actions flush action skbedit",
++ 0,
++ 1,
++ 255
++ ],
++ "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i \\\"; args=\\\"\\$args\\$cmd\\\"; done && $TC actions add \\$args\""
++ ],
++ "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
++ "expExitCode": "0",
++ "verifyCmd": "$TC actions list action skbedit",
++ "matchPattern": "^[ \t]+index [0-9]+ ref",
++ "matchCount": "0",
++ "teardown": []
+ }
+ ]
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-08-23 22:19 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-08-23 22:19 UTC (permalink / raw
To: gentoo-commits
commit: db9df1b48b1160e9c5e247032373db9b63c81c58
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Aug 23 22:18:55 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Aug 23 22:18:55 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=db9df1b4
Add support for gcc 9.1 CPU optimization patch
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/0000_README b/0000_README
index eb585a9..04259bc 100644
--- a/0000_README
+++ b/0000_README
@@ -110,3 +110,7 @@ Desc: Kernel patch enables gcc >= v4.13 optimizations for additional CPUs.
Patch: 5011_enable-cpu-optimizations-for-gcc8.patch
From: https://github.com/graysky2/kernel_gcc_patch/
Desc: Kernel patch for >= gccv8 enables kernel >= v4.13 optimizations for additional CPUs.
+
+Patch: 5012_enable-cpu-optimizations-for-gcc91.patch
+From: https://github.com/graysky2/kernel_gcc_patch/
+Desc: Kernel patch enables gcc >= v9.1 optimizations for additional CPUs.
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-08-16 12:30 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-08-16 12:30 UTC (permalink / raw
To: gentoo-commits
commit: 7f97e61dce9374550e6d8fd4db1fd1773e39ffb9
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Aug 16 12:29:27 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Aug 16 12:29:27 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7f97e61d
Remove redundant patch (vmalloc)
1800_vmalloc-sync-unmappings-fix.patch
mm/vmalloc: Sync unmappings in __purge_vmap_area_lazy()
0000_README | 4 ---
1800_vmalloc-sync-unmappings-fix.patch | 58 ----------------------------------
2 files changed, 62 deletions(-)
diff --git a/0000_README b/0000_README
index 4179af7..eb585a9 100644
--- a/0000_README
+++ b/0000_README
@@ -87,10 +87,6 @@ Patch: 1510_fs-enable-link-security-restrictions-by-default.patch
From: http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
Desc: Enable link security restrictions by default.
-Patch: 1800_vmalloc-sync-unmappings-fix.patch
-From: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/patch/?id=3f8fd02b1bf1d7ba964485a56f2f4b53ae88c167
-Desc: mm/vmalloc: Sync unmappings in __purge_vmap_area_lazy()
-
Patch: 2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
From: https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
Desc: Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
diff --git a/1800_vmalloc-sync-unmappings-fix.patch b/1800_vmalloc-sync-unmappings-fix.patch
deleted file mode 100644
index 7e56e51..0000000
--- a/1800_vmalloc-sync-unmappings-fix.patch
+++ /dev/null
@@ -1,58 +0,0 @@
-From 3f8fd02b1bf1d7ba964485a56f2f4b53ae88c167 Mon Sep 17 00:00:00 2001
-From: Joerg Roedel <jroedel@suse.de>
-Date: Fri, 19 Jul 2019 20:46:52 +0200
-Subject: mm/vmalloc: Sync unmappings in __purge_vmap_area_lazy()
-
-On x86-32 with PTI enabled, parts of the kernel page-tables are not shared
-between processes. This can cause mappings in the vmalloc/ioremap area to
-persist in some page-tables after the region is unmapped and released.
-
-When the region is re-used the processes with the old mappings do not fault
-in the new mappings but still access the old ones.
-
-This causes undefined behavior, in reality often data corruption, kernel
-oopses and panics and even spontaneous reboots.
-
-Fix this problem by activly syncing unmaps in the vmalloc/ioremap area to
-all page-tables in the system before the regions can be re-used.
-
-References: https://bugzilla.suse.com/show_bug.cgi?id=1118689
-Fixes: 5d72b4fba40ef ('x86, mm: support huge I/O mapping capability I/F')
-Signed-off-by: Joerg Roedel <jroedel@suse.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
-Link: https://lkml.kernel.org/r/20190719184652.11391-4-joro@8bytes.org
----
- mm/vmalloc.c | 9 +++++++++
- 1 file changed, 9 insertions(+)
-
-diff --git a/mm/vmalloc.c b/mm/vmalloc.c
-index 4fa8d84599b0..e0fc963acc41 100644
---- a/mm/vmalloc.c
-+++ b/mm/vmalloc.c
-@@ -1258,6 +1258,12 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
- if (unlikely(valist == NULL))
- return false;
-
-+ /*
-+ * First make sure the mappings are removed from all page-tables
-+ * before they are freed.
-+ */
-+ vmalloc_sync_all();
-+
- /*
- * TODO: to calculate a flush range without looping.
- * The list can be up to lazy_max_pages() elements.
-@@ -3038,6 +3044,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
- /*
- * Implement a stub for vmalloc_sync_all() if the architecture chose not to
- * have one.
-+ *
-+ * The purpose of this function is to make sure the vmalloc area
-+ * mappings are identical in all page-tables in the system.
- */
- void __weak vmalloc_sync_all(void)
- {
---
-cgit 1.2-0.3.lf.el7
-
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-08-16 12:16 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-08-16 12:16 UTC (permalink / raw
To: gentoo-commits
commit: cb6b2cec3829ccc37b3cad4e77fe06905bd73489
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Aug 16 12:15:48 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Aug 16 12:15:48 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cb6b2cec
Linux patch 5.2.9
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1008_linux-5.2.9.patch | 4835 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 4839 insertions(+)
diff --git a/0000_README b/0000_README
index 6e8d29d..4179af7 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch: 1007_linux-5.2.8.patch
From: https://www.kernel.org
Desc: Linux 5.2.8
+Patch: 1008_linux-5.2.9.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.9
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1008_linux-5.2.9.patch b/1008_linux-5.2.9.patch
new file mode 100644
index 0000000..682b8db
--- /dev/null
+++ b/1008_linux-5.2.9.patch
@@ -0,0 +1,4835 @@
+diff --git a/Makefile b/Makefile
+index bad87c4c8117..cfc667fe9959 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+index 5fd47eec4407..1679959a3654 100644
+--- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
++++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+@@ -126,6 +126,9 @@
+ };
+
+ mdio-bus-mux {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
+ /* BIT(9) = 1 => external mdio */
+ mdio_ext: mdio@200 {
+ reg = <0x200>;
+diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
+index 9207d5d071f1..d556f7c541ce 100644
+--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
++++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
+@@ -112,7 +112,7 @@
+ };
+
+ &i2c2 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+diff --git a/arch/arm/boot/dts/imx6ul-geam.dts b/arch/arm/boot/dts/imx6ul-geam.dts
+index bc77f26a2f1d..6157a058feec 100644
+--- a/arch/arm/boot/dts/imx6ul-geam.dts
++++ b/arch/arm/boot/dts/imx6ul-geam.dts
+@@ -156,7 +156,7 @@
+ };
+
+ &i2c2 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+diff --git a/arch/arm/boot/dts/imx6ul-isiot.dtsi b/arch/arm/boot/dts/imx6ul-isiot.dtsi
+index 213e802bf35c..23e6e2e7ace9 100644
+--- a/arch/arm/boot/dts/imx6ul-isiot.dtsi
++++ b/arch/arm/boot/dts/imx6ul-isiot.dtsi
+@@ -148,7 +148,7 @@
+ };
+
+ &i2c2 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+diff --git a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
+index 39eeeddac39e..09f7ffa9ad8c 100644
+--- a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
++++ b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
+@@ -43,7 +43,7 @@
+ };
+
+ &i2c2 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+diff --git a/arch/arm/boot/dts/imx6ul-pico-pi.dts b/arch/arm/boot/dts/imx6ul-pico-pi.dts
+index de07357b27fc..6cd7d5877d20 100644
+--- a/arch/arm/boot/dts/imx6ul-pico-pi.dts
++++ b/arch/arm/boot/dts/imx6ul-pico-pi.dts
+@@ -43,7 +43,7 @@
+ };
+
+ &i2c2 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+@@ -58,7 +58,7 @@
+ };
+
+ &i2c3 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
+index 05d03f09ff54..71262dcdbca3 100644
+--- a/arch/arm/mach-davinci/sleep.S
++++ b/arch/arm/mach-davinci/sleep.S
+@@ -24,6 +24,7 @@
+ #define DEEPSLEEP_SLEEPENABLE_BIT BIT(31)
+
+ .text
++ .arch armv5te
+ /*
+ * Move DaVinci into deep sleep state
+ *
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+index e25f7fcd7997..cffa8991880d 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
++++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+@@ -462,7 +462,7 @@
+ #define MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x1CC 0x434 0x000 0x5 0x0
+ #define MX8MM_IOMUXC_SAI3_RXFS_TPSMP_HTRANS0 0x1CC 0x434 0x000 0x7 0x0
+ #define MX8MM_IOMUXC_SAI3_RXC_SAI3_RX_BCLK 0x1D0 0x438 0x000 0x0 0x0
+-#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CAPTURE2 0x1D0 0x438 0x000 0x1 0x0
++#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CLK 0x1D0 0x438 0x000 0x1 0x0
+ #define MX8MM_IOMUXC_SAI3_RXC_SAI5_RX_BCLK 0x1D0 0x438 0x4D0 0x2 0x2
+ #define MX8MM_IOMUXC_SAI3_RXC_GPIO4_IO29 0x1D0 0x438 0x000 0x5 0x0
+ #define MX8MM_IOMUXC_SAI3_RXC_TPSMP_HTRANS1 0x1D0 0x438 0x000 0x7 0x0
+@@ -472,7 +472,7 @@
+ #define MX8MM_IOMUXC_SAI3_RXD_GPIO4_IO30 0x1D4 0x43C 0x000 0x5 0x0
+ #define MX8MM_IOMUXC_SAI3_RXD_TPSMP_HDATA0 0x1D4 0x43C 0x000 0x7 0x0
+ #define MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0x1D8 0x440 0x000 0x0 0x0
+-#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CLK 0x1D8 0x440 0x000 0x1 0x0
++#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CAPTURE2 0x1D8 0x440 0x000 0x1 0x0
+ #define MX8MM_IOMUXC_SAI3_TXFS_SAI5_RX_DATA1 0x1D8 0x440 0x4D8 0x2 0x2
+ #define MX8MM_IOMUXC_SAI3_TXFS_GPIO4_IO31 0x1D8 0x440 0x000 0x5 0x0
+ #define MX8MM_IOMUXC_SAI3_TXFS_TPSMP_HDATA1 0x1D8 0x440 0x000 0x7 0x0
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+index 6d635ba0904c..6632cbd88bed 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+@@ -675,8 +675,7 @@
+
+ sai2: sai@308b0000 {
+ #sound-dai-cells = <0>;
+- compatible = "fsl,imx8mq-sai",
+- "fsl,imx6sx-sai";
++ compatible = "fsl,imx8mq-sai";
+ reg = <0x308b0000 0x10000>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_SAI2_IPG>,
+diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
+index fd5b1a4efc70..844e2964b0f5 100644
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
+ regs->pmr_save = GIC_PRIO_IRQON;
+ }
+
++static inline void set_ssbs_bit(struct pt_regs *regs)
++{
++ regs->pstate |= PSR_SSBS_BIT;
++}
++
++static inline void set_compat_ssbs_bit(struct pt_regs *regs)
++{
++ regs->pstate |= PSR_AA32_SSBS_BIT;
++}
++
+ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
+ unsigned long sp)
+ {
+@@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
+ regs->pstate = PSR_MODE_EL0t;
+
+ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+- regs->pstate |= PSR_SSBS_BIT;
++ set_ssbs_bit(regs);
+
+ regs->sp = sp;
+ }
+@@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
+ #endif
+
+ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+- regs->pstate |= PSR_AA32_SSBS_BIT;
++ set_compat_ssbs_bit(regs);
+
+ regs->compat_sp = sp;
+ }
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 9cdc4592da3e..320a30dbe35e 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -586,10 +586,8 @@ el1_sync:
+ b.eq el1_ia
+ cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
+ b.eq el1_undef
+- cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
+- b.eq el1_sp_pc
+ cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
+- b.eq el1_sp_pc
++ b.eq el1_pc
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
+ b.eq el1_undef
+ cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
+@@ -611,9 +609,11 @@ el1_da:
+ bl do_mem_abort
+
+ kernel_exit 1
+-el1_sp_pc:
++el1_pc:
+ /*
+- * Stack or PC alignment exception handling
++ * PC alignment exception handling. We don't handle SP alignment faults,
++ * since we will have hit a recursive exception when trying to push the
++ * initial pt_regs.
+ */
+ mrs x0, far_el1
+ inherit_daif pstate=x23, tmp=x2
+@@ -732,9 +732,9 @@ el0_sync:
+ ccmp x24, #ESR_ELx_EC_WFx, #4, ne
+ b.eq el0_sys
+ cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
+- b.eq el0_sp_pc
++ b.eq el0_sp
+ cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
+- b.eq el0_sp_pc
++ b.eq el0_pc
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
+ b.eq el0_undef
+ cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
+@@ -758,7 +758,7 @@ el0_sync_compat:
+ cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
+ b.eq el0_fpsimd_exc
+ cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
+- b.eq el0_sp_pc
++ b.eq el0_pc
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
+ b.eq el0_undef
+ cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
+@@ -858,11 +858,15 @@ el0_fpsimd_exc:
+ mov x1, sp
+ bl do_fpsimd_exc
+ b ret_to_user
++el0_sp:
++ ldr x26, [sp, #S_SP]
++ b el0_sp_pc
++el0_pc:
++ mrs x26, far_el1
+ el0_sp_pc:
+ /*
+ * Stack or PC alignment exception handling
+ */
+- mrs x26, far_el1
+ gic_prio_kentry_setup tmp=x0
+ enable_da_f
+ #ifdef CONFIG_TRACE_IRQFLAGS
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 6a869d9f304f..b0c859ca6320 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
+ childregs->pstate |= PSR_UAO_BIT;
+
+ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
+- childregs->pstate |= PSR_SSBS_BIT;
++ set_ssbs_bit(childregs);
+
+ if (system_uses_irq_prio_masking())
+ childregs->pmr_save = GIC_PRIO_IRQON;
+@@ -442,6 +442,32 @@ void uao_thread_switch(struct task_struct *next)
+ }
+ }
+
++/*
++ * Force SSBS state on context-switch, since it may be lost after migrating
++ * from a CPU which treats the bit as RES0 in a heterogeneous system.
++ */
++static void ssbs_thread_switch(struct task_struct *next)
++{
++ struct pt_regs *regs = task_pt_regs(next);
++
++ /*
++ * Nothing to do for kernel threads, but 'regs' may be junk
++ * (e.g. idle task) so check the flags and bail early.
++ */
++ if (unlikely(next->flags & PF_KTHREAD))
++ return;
++
++ /* If the mitigation is enabled, then we leave SSBS clear. */
++ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
++ test_tsk_thread_flag(next, TIF_SSBD))
++ return;
++
++ if (compat_user_mode(regs))
++ set_compat_ssbs_bit(regs);
++ else if (user_mode(regs))
++ set_ssbs_bit(regs);
++}
++
+ /*
+ * We store our current task in sp_el0, which is clobbered by userspace. Keep a
+ * shadow copy so that we can restore this upon entry from userspace.
+@@ -471,6 +497,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
+ entry_task_switch(next);
+ uao_thread_switch(next);
+ ptrauth_thread_switch(next);
++ ssbs_thread_switch(next);
+
+ /*
+ * Complete any pending TLB or cache maintenance on this CPU in case
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index 6d704ad2472b..993017dd83ca 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -50,6 +50,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+ return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
+ }
+
++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
++{
++ return kvm_arch_vcpu_runnable(vcpu);
++}
++
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+ {
+ return false;
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 2540d3b2588c..2eda1ec36f55 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -249,7 +249,7 @@ void __init paging_init(void)
+
+ #ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
+- ((1UL << ARCH_ZONE_DMA_BITS) - 1) >> PAGE_SHIFT);
++ 1UL << (ARCH_ZONE_DMA_BITS - PAGE_SHIFT));
+ #endif
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+ #ifdef CONFIG_HIGHMEM
+diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
+index 96c53b23e58f..dad9825e4087 100644
+--- a/arch/powerpc/platforms/pseries/papr_scm.c
++++ b/arch/powerpc/platforms/pseries/papr_scm.c
+@@ -42,8 +42,9 @@ struct papr_scm_priv {
+ static int drc_pmem_bind(struct papr_scm_priv *p)
+ {
+ unsigned long ret[PLPAR_HCALL_BUFSIZE];
+- uint64_t rc, token;
+ uint64_t saved = 0;
++ uint64_t token;
++ int64_t rc;
+
+ /*
+ * When the hypervisor cannot map all the requested memory in a single
+@@ -63,6 +64,10 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
+ } while (rc == H_BUSY);
+
+ if (rc) {
++ /* H_OVERLAP needs a separate error path */
++ if (rc == H_OVERLAP)
++ return -EBUSY;
++
+ dev_err(&p->pdev->dev, "bind err: %lld\n", rc);
+ return -ENXIO;
+ }
+@@ -316,6 +321,14 @@ static int papr_scm_probe(struct platform_device *pdev)
+
+ /* request the hypervisor to bind this region to somewhere in memory */
+ rc = drc_pmem_bind(p);
++
++ /* If phyp says drc memory still bound then force unbound and retry */
++ if (rc == -EBUSY) {
++ dev_warn(&pdev->dev, "Retrying bind after unbinding\n");
++ drc_pmem_unbind(p);
++ rc = drc_pmem_bind(p);
++ }
++
+ if (rc)
+ goto err;
+
+diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
+index a4d38092530a..823578c6b9e2 100644
+--- a/arch/s390/include/asm/page.h
++++ b/arch/s390/include/asm/page.h
+@@ -177,6 +177,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#define ARCH_ZONE_DMA_BITS 31
++
+ #include <asm-generic/memory_model.h>
+ #include <asm-generic/getorder.h>
+
+diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
+index 401e30ca0a75..8272a4492844 100644
+--- a/arch/x86/boot/string.c
++++ b/arch/x86/boot/string.c
+@@ -37,6 +37,14 @@ int memcmp(const void *s1, const void *s2, size_t len)
+ return diff;
+ }
+
++/*
++ * Clang may lower `memcmp == 0` to `bcmp == 0`.
++ */
++int bcmp(const void *s1, const void *s2, size_t len)
++{
++ return memcmp(s1, s2, len);
++}
++
+ int strcmp(const char *str1, const char *str2)
+ {
+ const unsigned char *s1 = (const unsigned char *)str1;
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 2889dd023566..6179be624f35 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -20,7 +20,6 @@
+ #include <asm/intel-family.h>
+ #include <asm/apic.h>
+ #include <asm/cpu_device_id.h>
+-#include <asm/hypervisor.h>
+
+ #include "../perf_event.h"
+
+@@ -263,8 +262,8 @@ static struct event_constraint intel_icl_event_constraints[] = {
+ };
+
+ static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
+- INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff9fffull, RSP_0),
+- INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff9fffull, RSP_1),
++ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
++ INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+ INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
+ EVENT_EXTRA_END
+@@ -4057,7 +4056,7 @@ static bool check_msr(unsigned long msr, u64 mask)
+ * Disable the check for real HW, so we don't
+ * mess with potentionaly enabled registers:
+ */
+- if (hypervisor_is_type(X86_HYPER_NATIVE))
++ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return true;
+
+ /*
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 505c73dc6a73..6601b8759c92 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -851,7 +851,7 @@ struct event_constraint intel_skl_pebs_event_constraints[] = {
+
+ struct event_constraint intel_icl_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
+- INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x400000000ULL), /* SLOTS */
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */
+
+ INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 8253925c5e8c..921c609c2af7 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1169,6 +1169,7 @@ struct kvm_x86_ops {
+ int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set);
+ void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
++ bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
+
+ int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
+ bool *expired);
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 48c865a4e5dd..14384a1ec53f 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3290,7 +3290,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
+ vmcb->control.exit_int_info_err,
+ KVM_ISA_SVM);
+
+- rc = kvm_vcpu_map(&svm->vcpu, gfn_to_gpa(svm->nested.vmcb), &map);
++ rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
+ if (rc) {
+ if (rc == -EINVAL)
+ kvm_inject_gp(&svm->vcpu, 0);
+@@ -3580,7 +3580,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
+
+ vmcb_gpa = svm->vmcb->save.rax;
+
+- rc = kvm_vcpu_map(&svm->vcpu, gfn_to_gpa(vmcb_gpa), &map);
++ rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
+ if (rc) {
+ if (rc == -EINVAL)
+ kvm_inject_gp(&svm->vcpu, 0);
+@@ -5167,6 +5167,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
+ kvm_vcpu_wake_up(vcpu);
+ }
+
++static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
++{
++ return false;
++}
++
+ static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
+ {
+ unsigned long flags;
+@@ -7264,6 +7269,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+
+ .pmu_ops = &amd_pmu_ops,
+ .deliver_posted_interrupt = svm_deliver_avic_intr,
++ .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
+ .update_pi_irte = svm_update_pi_irte,
+ .setup_mce = svm_setup_mce,
+
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 924c2a79e4a9..4b830c0adcf8 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6096,6 +6096,11 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+ return max_irr;
+ }
+
++static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
++{
++ return pi_test_on(vcpu_to_pi_desc(vcpu));
++}
++
+ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+ {
+ if (!kvm_vcpu_apicv_active(vcpu))
+@@ -7662,6 +7667,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
+ .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
+ .sync_pir_to_irr = vmx_sync_pir_to_irr,
+ .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
++ .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
+
+ .set_tss_addr = vmx_set_tss_addr,
+ .set_identity_map_addr = vmx_set_identity_map_addr,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index a8ad3a4d86b1..cbced8ff29d4 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9641,6 +9641,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+ return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
+ }
+
++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
++{
++ if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
++ return true;
++
++ if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
++ kvm_test_request(KVM_REQ_SMI, vcpu) ||
++ kvm_test_request(KVM_REQ_EVENT, vcpu))
++ return true;
++
++ if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
++ return true;
++
++ return false;
++}
++
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+ {
+ return vcpu->arch.preempted_in_kernel;
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 46df4c6aae46..26a8b4b1b9ed 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -194,13 +194,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+- if (!pmd_present(*pmd_k))
+- return NULL;
+
+- if (!pmd_present(*pmd))
++ if (pmd_present(*pmd) != pmd_present(*pmd_k))
+ set_pmd(pmd, *pmd_k);
++
++ if (!pmd_present(*pmd_k))
++ return NULL;
+ else
+- BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
++ BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
+
+ return pmd_k;
+ }
+@@ -220,17 +221,13 @@ void vmalloc_sync_all(void)
+ spin_lock(&pgd_lock);
+ list_for_each_entry(page, &pgd_list, lru) {
+ spinlock_t *pgt_lock;
+- pmd_t *ret;
+
+ /* the pgt_lock only for Xen */
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+
+ spin_lock(pgt_lock);
+- ret = vmalloc_sync_one(page_address(page), address);
++ vmalloc_sync_one(page_address(page), address);
+ spin_unlock(pgt_lock);
+-
+- if (!ret)
+- break;
+ }
+ spin_unlock(&pgd_lock);
+ }
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index 3cf302b26332..8901a1f89cf5 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
+ targets += $(purgatory-y)
+ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
+
++$(obj)/string.o: $(srctree)/arch/x86/boot/compressed/string.c FORCE
++ $(call if_changed_rule,cc_o_c)
++
+ $(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
+ $(call if_changed_rule,cc_o_c)
+
+@@ -17,11 +20,34 @@ KCOV_INSTRUMENT := n
+
+ # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
+ # in turn leaves some undefined symbols like __fentry__ in purgatory and not
+-# sure how to relocate those. Like kexec-tools, use custom flags.
+-
+-KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -Os -mcmodel=large
+-KBUILD_CFLAGS += -m$(BITS)
+-KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
++# sure how to relocate those.
++ifdef CONFIG_FUNCTION_TRACER
++CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE)
++endif
++
++ifdef CONFIG_STACKPROTECTOR
++CFLAGS_REMOVE_sha256.o += -fstack-protector
++CFLAGS_REMOVE_purgatory.o += -fstack-protector
++CFLAGS_REMOVE_string.o += -fstack-protector
++CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector
++endif
++
++ifdef CONFIG_STACKPROTECTOR_STRONG
++CFLAGS_REMOVE_sha256.o += -fstack-protector-strong
++CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong
++CFLAGS_REMOVE_string.o += -fstack-protector-strong
++CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong
++endif
++
++ifdef CONFIG_RETPOLINE
++CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS)
++CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS)
++CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS)
++CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS)
++endif
+
+ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+ $(call if_changed,ld)
+diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
+index 6d8d5a34c377..b607bda786f6 100644
+--- a/arch/x86/purgatory/purgatory.c
++++ b/arch/x86/purgatory/purgatory.c
+@@ -68,3 +68,9 @@ void purgatory(void)
+ }
+ copy_backup_region();
+ }
++
++/*
++ * Defined in order to reuse memcpy() and memset() from
++ * arch/x86/boot/compressed/string.c
++ */
++void warn(const char *msg) {}
+diff --git a/arch/x86/purgatory/string.c b/arch/x86/purgatory/string.c
+deleted file mode 100644
+index 01ad43873ad9..000000000000
+--- a/arch/x86/purgatory/string.c
++++ /dev/null
+@@ -1,23 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * Simple string functions.
+- *
+- * Copyright (C) 2014 Red Hat Inc.
+- *
+- * Author:
+- * Vivek Goyal <vgoyal@redhat.com>
+- */
+-
+-#include <linux/types.h>
+-
+-#include "../boot/string.c"
+-
+-void *memcpy(void *dst, const void *src, size_t len)
+-{
+- return __builtin_memcpy(dst, src, len);
+-}
+-
+-void *memset(void *dst, int c, size_t len)
+-{
+- return __builtin_memset(dst, c, len);
+-}
+diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
+index 659ccb8b693f..06d024204f50 100644
+--- a/block/blk-rq-qos.c
++++ b/block/blk-rq-qos.c
+@@ -202,6 +202,7 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr,
+ return -1;
+
+ data->got_token = true;
++ smp_wmb();
+ list_del_init(&curr->entry);
+ wake_up_process(data->task);
+ return 1;
+@@ -245,6 +246,7 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
+
+ prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
+ do {
++ /* The memory barrier in set_task_state saves us here. */
+ if (data.got_token)
+ break;
+ if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
+@@ -255,12 +257,14 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
+ * which means we now have two. Put our local token
+ * and wake anyone else potentially waiting for one.
+ */
++ smp_rmb();
+ if (data.got_token)
+ cleanup_cb(rqw, private_data);
+ break;
+ }
+ io_schedule();
+- has_sleeper = false;
++ has_sleeper = true;
++ set_current_state(TASK_UNINTERRUPTIBLE);
+ } while (1);
+ finish_wait(&rqw->wait, &data.wq);
+ }
+diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
+index d4551e33fa71..8569b79e8b58 100644
+--- a/drivers/acpi/arm64/iort.c
++++ b/drivers/acpi/arm64/iort.c
+@@ -611,8 +611,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
+
+ /* Move to ITS specific data */
+ its = (struct acpi_iort_its_group *)node->node_data;
+- if (idx > its->its_count) {
+- dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
++ if (idx >= its->its_count) {
++ dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
+ idx, its->its_count);
+ return -ENXIO;
+ }
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 4d1729853d1a..8b25c7b12179 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -157,8 +157,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
+ * the device will only expose one IRQ, and this fallback
+ * allows a common code path across either kind of resource.
+ */
+- if (num == 0 && has_acpi_companion(&dev->dev))
+- return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
++ if (num == 0 && has_acpi_companion(&dev->dev)) {
++ int ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
++
++ /* Our callers expect -ENXIO for missing IRQs. */
++ if (ret >= 0 || ret == -EPROBE_DEFER)
++ return ret;
++ }
+
+ return -ENXIO;
+ #endif
+diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
+index 90ebfcae0ce6..2b3103c30857 100644
+--- a/drivers/block/drbd/drbd_receiver.c
++++ b/drivers/block/drbd/drbd_receiver.c
+@@ -5417,7 +5417,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
+ unsigned int key_len;
+ char secret[SHARED_SECRET_MAX]; /* 64 byte */
+ unsigned int resp_size;
+- SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
++ struct shash_desc *desc;
+ struct packet_info pi;
+ struct net_conf *nc;
+ int err, rv;
+@@ -5430,6 +5430,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
+ memcpy(secret, nc->shared_secret, key_len);
+ rcu_read_unlock();
+
++ desc = kmalloc(sizeof(struct shash_desc) +
++ crypto_shash_descsize(connection->cram_hmac_tfm),
++ GFP_KERNEL);
++ if (!desc) {
++ rv = -1;
++ goto fail;
++ }
+ desc->tfm = connection->cram_hmac_tfm;
+
+ rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
+@@ -5571,7 +5578,10 @@ static int drbd_do_auth(struct drbd_connection *connection)
+ kfree(peers_ch);
+ kfree(response);
+ kfree(right_response);
+- shash_desc_zero(desc);
++ if (desc) {
++ shash_desc_zero(desc);
++ kfree(desc);
++ }
+
+ return rv;
+ }
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 430d31499ce9..e1739efca37e 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -893,7 +893,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
+
+ static int loop_kthread_worker_fn(void *worker_ptr)
+ {
+- current->flags |= PF_LESS_THROTTLE;
++ current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
+ return kthread_worker_fn(worker_ptr);
+ }
+
+diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
+index 6b1e4abe3248..d2f061015323 100644
+--- a/drivers/cpufreq/pasemi-cpufreq.c
++++ b/drivers/cpufreq/pasemi-cpufreq.c
+@@ -131,10 +131,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ int err = -ENODEV;
+
+ cpu = of_get_cpu_node(policy->cpu, NULL);
++ if (!cpu)
++ goto out;
+
++ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+ of_node_put(cpu);
+- if (!cpu)
++ if (!max_freqp) {
++ err = -EINVAL;
+ goto out;
++ }
++
++ /* we need the freq in kHz */
++ max_freq = *max_freqp / 1000;
+
+ dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
+ if (!dn)
+@@ -171,16 +179,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ }
+
+ pr_debug("init cpufreq on CPU %d\n", policy->cpu);
+-
+- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+- if (!max_freqp) {
+- err = -EINVAL;
+- goto out_unmap_sdcpwr;
+- }
+-
+- /* we need the freq in kHz */
+- max_freq = *max_freqp / 1000;
+-
+ pr_debug("max clock-frequency is at %u kHz\n", max_freq);
+ pr_debug("initializing frequency table\n");
+
+@@ -198,9 +196,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
+
+ return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
+
+-out_unmap_sdcpwr:
+- iounmap(sdcpwr_mapbase);
+-
+ out_unmap_sdcasr:
+ iounmap(sdcasr_mapbase);
+ out:
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+index f9fec2ddf56a..94c1ad7eeddf 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+@@ -58,6 +58,19 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+ {
++ switch (authsize) {
++ case 16:
++ case 15:
++ case 14:
++ case 13:
++ case 12:
++ case 8:
++ case 4:
++ break;
++ default:
++ return -EINVAL;
++ }
++
+ return 0;
+ }
+
+@@ -104,6 +117,7 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_AES;
++ rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm);
+ rctx->cmd.u.aes.type = ctx->u.aes.type;
+ rctx->cmd.u.aes.mode = ctx->u.aes.mode;
+ rctx->cmd.u.aes.action = encrypt;
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index 1cbdfc08ca00..a13e8a362316 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -622,6 +622,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+
+ unsigned long long *final;
+ unsigned int dm_offset;
++ unsigned int authsize;
+ unsigned int jobid;
+ unsigned int ilen;
+ bool in_place = true; /* Default value */
+@@ -643,6 +644,21 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ if (!aes->key) /* Gotta have a key SGL */
+ return -EINVAL;
+
++ /* Zero defaults to 16 bytes, the maximum size */
++ authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
++ switch (authsize) {
++ case 16:
++ case 15:
++ case 14:
++ case 13:
++ case 12:
++ case 8:
++ case 4:
++ break;
++ default:
++ return -EINVAL;
++ }
++
+ /* First, decompose the source buffer into AAD & PT,
+ * and the destination buffer into AAD, CT & tag, or
+ * the input into CT & tag.
+@@ -657,7 +673,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
+ } else {
+ /* Input length for decryption includes tag */
+- ilen = aes->src_len - AES_BLOCK_SIZE;
++ ilen = aes->src_len - authsize;
+ p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
+ }
+
+@@ -766,8 +782,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ while (src.sg_wa.bytes_left) {
+ ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
+ if (!src.sg_wa.bytes_left) {
+- unsigned int nbytes = aes->src_len
+- % AES_BLOCK_SIZE;
++ unsigned int nbytes = ilen % AES_BLOCK_SIZE;
+
+ if (nbytes) {
+ op.eom = 1;
+@@ -839,19 +854,19 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+
+ if (aes->action == CCP_AES_ACTION_ENCRYPT) {
+ /* Put the ciphered tag after the ciphertext. */
+- ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
++ ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
+ } else {
+ /* Does this ciphered tag match the input? */
+- ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
++ ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_tag;
+- ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
++ ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
+ if (ret)
+ goto e_tag;
+
+ ret = crypto_memneq(tag.address, final_wa.address,
+- AES_BLOCK_SIZE) ? -EBADMSG : 0;
++ authsize) ? -EBADMSG : 0;
+ ccp_dm_free(&tag);
+ }
+
+@@ -859,11 +874,11 @@ e_tag:
+ ccp_dm_free(&final_wa);
+
+ e_dst:
+- if (aes->src_len && !in_place)
++ if (ilen > 0 && !in_place)
+ ccp_free_data(&dst, cmd_q);
+
+ e_src:
+- if (aes->src_len)
++ if (ilen > 0)
+ ccp_free_data(&src, cmd_q);
+
+ e_aad:
+diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
+index d40ccc3af9e2..fa7ed01415b7 100644
+--- a/drivers/firmware/Kconfig
++++ b/drivers/firmware/Kconfig
+@@ -157,7 +157,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
+
+ config ISCSI_IBFT_FIND
+ bool "iSCSI Boot Firmware Table Attributes"
+- depends on X86 && ACPI
++ depends on X86 && ISCSI_IBFT
+ default n
+ help
+ This option enables the kernel to find the region of memory
+@@ -168,7 +168,8 @@ config ISCSI_IBFT_FIND
+ config ISCSI_IBFT
+ tristate "iSCSI Boot Firmware Table Attributes module"
+ select ISCSI_BOOT_SYSFS
+- depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
++ select ISCSI_IBFT_FIND if X86
++ depends on ACPI && SCSI && SCSI_LOWLEVEL
+ default n
+ help
+ This option enables support for detection and exposing of iSCSI
+diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
+index ab3aa3983833..7e12cbdf957c 100644
+--- a/drivers/firmware/iscsi_ibft.c
++++ b/drivers/firmware/iscsi_ibft.c
+@@ -84,6 +84,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION(IBFT_ISCSI_VERSION);
+
++#ifndef CONFIG_ISCSI_IBFT_FIND
++struct acpi_table_ibft *ibft_addr;
++#endif
++
+ struct ibft_hdr {
+ u8 id;
+ u8 version;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index ee6b646180b6..0a7adc2925e3 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -608,8 +608,10 @@ const struct dc_link_settings *dc_link_get_link_cap(
+
+ static void destruct(struct dc *dc)
+ {
+- dc_release_state(dc->current_state);
+- dc->current_state = NULL;
++ if (dc->current_state) {
++ dc_release_state(dc->current_state);
++ dc->current_state = NULL;
++ }
+
+ destroy_links(dc);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index a3ff33ff6da1..adf39e3b8d29 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2284,7 +2284,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
+ if (core_dc->current_state->res_ctx.
+ pipe_ctx[i].stream->link
+- == link)
++ == link) {
+ /* DMCU -1 for all controller id values,
+ * therefore +1 here
+ */
+@@ -2292,6 +2292,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
+ core_dc->current_state->
+ res_ctx.pipe_ctx[i].stream_res.tg->inst +
+ 1;
++
++ /* Disable brightness ramping when the display is blanked
++ * as it can hang the DMCU
++ */
++ if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
++ frame_ramp = 0;
++ }
+ }
+ }
+ abm->funcs->set_backlight_level_pwm(
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 253311864cdd..966aa3b754c5 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -2218,11 +2218,18 @@ static void get_active_converter_info(
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ ddc_service_set_dongle_type(link->ddc,
+ link->dpcd_caps.dongle_type);
++ link->dpcd_caps.is_branch_dev = false;
+ return;
+ }
+
+ /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
+- link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
++ if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) {
++ link->dpcd_caps.is_branch_dev = false;
++ }
++
++ else {
++ link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
++ }
+
+ switch (ds_port.fields.PORT_TYPE) {
+ case DOWNSTREAM_VGA:
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 12142d13f22f..b459ce056b60 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -254,7 +254,7 @@ bool resource_construct(
+ * PORT_CONNECTIVITY == 1 (as instructed by HW team).
+ */
+ update_num_audio(&straps, &num_audio, &pool->audio_support);
+- for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
++ for (i = 0; i < caps->num_audio; i++) {
+ struct audio *aud = create_funcs->create_audio(ctx, i);
+
+ if (aud == NULL) {
+@@ -1702,6 +1702,12 @@ static struct audio *find_first_free_audio(
+ return pool->audios[i];
+ }
+ }
++
++ /* use engine id to find free audio */
++ if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
++ return pool->audios[id];
++ }
++
+ /*not found the matching one, first come first serve*/
+ for (i = 0; i < pool->audio_count; i++) {
+ if (res_ctx->is_audio_acquired[i] == false) {
+@@ -1866,6 +1872,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
+ pix_clk /= 2;
+ if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
+ switch (timing->display_color_depth) {
++ case COLOR_DEPTH_666:
+ case COLOR_DEPTH_888:
+ normalized_pix_clk = pix_clk;
+ break;
+@@ -2012,7 +2019,7 @@ enum dc_status resource_map_pool_resources(
+ /* TODO: Add check if ASIC support and EDID audio */
+ if (!stream->converter_disable_audio &&
+ dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
+- stream->audio_info.mode_count) {
++ stream->audio_info.mode_count && stream->audio_info.flags.all) {
+ pipe_ctx->stream_res.audio = find_first_free_audio(
+ &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+index 2959c3c9390b..da30ae04e82b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+@@ -234,6 +234,10 @@ static void dmcu_set_backlight_level(
+ s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+ REG_WRITE(BIOS_SCRATCH_2, s2);
++
++ /* waitDMCUReadyForCmd */
++ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
++ 0, 1, 80000);
+ }
+
+ static void dce_abm_init(struct abm *abm)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 9e4d70a0055e..5cc5dabf4d65 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1120,16 +1120,7 @@ static void dcn10_init_hw(struct dc *dc)
+ * everything down.
+ */
+ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+- struct hubp *hubp = dc->res_pool->hubps[i];
+- struct dpp *dpp = dc->res_pool->dpps[i];
+-
+- hubp->funcs->hubp_init(hubp);
+- dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
+- plane_atomic_power_down(dc, dpp, hubp);
+- }
+-
+- apply_DEGVIDCN10_253_wa(dc);
++ dc->hwss.init_pipes(dc, dc->current_state);
+ }
+
+ for (i = 0; i < dc->res_pool->audio_count; i++) {
+@@ -1298,10 +1289,6 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
+ return result;
+ }
+
+-
+-
+-
+-
+ static bool
+ dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
+@@ -2416,6 +2403,12 @@ static void dcn10_apply_ctx_for_surface(
+ if (removed_pipe[i])
+ dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+
++ for (i = 0; i < dc->res_pool->pipe_count; i++)
++ if (removed_pipe[i]) {
++ dc->hwss.optimize_bandwidth(dc, context);
++ break;
++ }
++
+ if (dc->hwseq->wa.DEGVIDCN10_254)
+ hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 7eccb54c421d..aac52eed6b2a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -512,7 +512,7 @@ static const struct resource_caps rv2_res_cap = {
+ .num_audio = 3,
+ .num_stream_encoder = 3,
+ .num_pll = 3,
+- .num_ddc = 3,
++ .num_ddc = 4,
+ };
+ #endif
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index 6f5ab05d6467..6f0cc718fbd7 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -169,7 +169,7 @@ struct resource_pool {
+ struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
+ unsigned int clk_src_count;
+
+- struct audio *audios[MAX_PIPES];
++ struct audio *audios[MAX_AUDIOS];
+ unsigned int audio_count;
+ struct audio_support audio_support;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+index 4c8e2c6fb6db..72266efd826c 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+@@ -34,6 +34,7 @@
+ * Data types shared between different Virtual HW blocks
+ ******************************************************************************/
+
++#define MAX_AUDIOS 7
+ #define MAX_PIPES 6
+
+ struct gamma_curve {
+diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
+index d8d75e25f6fb..45f6f11a88a7 100644
+--- a/drivers/gpu/drm/drm_framebuffer.c
++++ b/drivers/gpu/drm/drm_framebuffer.c
+@@ -830,7 +830,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
+ struct drm_device *dev = fb->dev;
+ struct drm_atomic_state *state;
+ struct drm_plane *plane;
+- struct drm_connector *conn;
++ struct drm_connector *conn __maybe_unused;
+ struct drm_connector_state *conn_state;
+ int i, ret;
+ unsigned plane_mask;
+diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
+index 5e7b1fb2db5d..8ea1c927dbad 100644
+--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
++++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
+@@ -394,8 +394,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
+ else
+ txesc2_div = 10;
+
+- I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
+- I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
++ I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
++ I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
+ }
+
+ /* Program BXT Mipi clocks and dividers */
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 0ea150196659..c62f7abcf509 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -2226,8 +2226,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+ if (ret)
+ goto fail;
+
+- spin_lock_init(&dpu_enc->enc_spinlock);
+-
+ atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
+ timer_setup(&dpu_enc->frame_done_timer,
+ dpu_encoder_frame_done_timeout, 0);
+@@ -2281,6 +2279,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
+
+ drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+
++ spin_lock_init(&dpu_enc->enc_spinlock);
+ dpu_enc->enabled = false;
+
+ return &dpu_enc->base;
+diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
+index 93942063b51b..49dd2d905c7f 100644
+--- a/drivers/hid/hid-sony.c
++++ b/drivers/hid/hid-sony.c
+@@ -585,10 +585,14 @@ static void sony_set_leds(struct sony_sc *sc);
+ static inline void sony_schedule_work(struct sony_sc *sc,
+ enum sony_worker which)
+ {
++ unsigned long flags;
++
+ switch (which) {
+ case SONY_WORKER_STATE:
+- if (!sc->defer_initialization)
++ spin_lock_irqsave(&sc->lock, flags);
++ if (!sc->defer_initialization && sc->state_worker_initialized)
+ schedule_work(&sc->state_worker);
++ spin_unlock_irqrestore(&sc->lock, flags);
+ break;
+ case SONY_WORKER_HOTPLUG:
+ if (sc->hotplug_worker_initialized)
+@@ -2558,13 +2562,18 @@ static inline void sony_init_output_report(struct sony_sc *sc,
+
+ static inline void sony_cancel_work_sync(struct sony_sc *sc)
+ {
++ unsigned long flags;
++
+ if (sc->hotplug_worker_initialized)
+ cancel_work_sync(&sc->hotplug_worker);
+- if (sc->state_worker_initialized)
++ if (sc->state_worker_initialized) {
++ spin_lock_irqsave(&sc->lock, flags);
++ sc->state_worker_initialized = 0;
++ spin_unlock_irqrestore(&sc->lock, flags);
+ cancel_work_sync(&sc->state_worker);
++ }
+ }
+
+-
+ static int sony_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
+ {
+diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
+index 3fb9c0a2d6d0..ce5ec403ec73 100644
+--- a/drivers/hwmon/lm75.c
++++ b/drivers/hwmon/lm75.c
+@@ -343,7 +343,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
+ data->sample_time = MSEC_PER_SEC / 2;
+ break;
+ case tmp75b: /* not one-shot mode, Conversion rate 37Hz */
+- clr_mask |= 1 << 15 | 0x3 << 13;
++ clr_mask |= 1 << 7 | 0x3 << 5;
+ data->resolution = 12;
+ data->sample_time = MSEC_PER_SEC / 37;
+ break;
+diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
+index e7dff5febe16..d42bc0883a32 100644
+--- a/drivers/hwmon/nct6775.c
++++ b/drivers/hwmon/nct6775.c
+@@ -852,7 +852,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
+ static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
+ static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
+ static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
+-static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
++static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
+ static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
+ static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
+
+@@ -3764,6 +3764,7 @@ static int nct6775_probe(struct platform_device *pdev)
+ data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
+ data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
++ data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
+ data->REG_PWM[0] = NCT6106_REG_PWM;
+ data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
+ data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
+diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
+index ec7bcf8d7cd6..f3dd2a17bd42 100644
+--- a/drivers/hwmon/nct7802.c
++++ b/drivers/hwmon/nct7802.c
+@@ -704,7 +704,7 @@ static struct attribute *nct7802_in_attrs[] = {
+ &sensor_dev_attr_in3_alarm.dev_attr.attr,
+ &sensor_dev_attr_in3_beep.dev_attr.attr,
+
+- &sensor_dev_attr_in4_input.dev_attr.attr, /* 17 */
++ &sensor_dev_attr_in4_input.dev_attr.attr, /* 16 */
+ &sensor_dev_attr_in4_min.dev_attr.attr,
+ &sensor_dev_attr_in4_max.dev_attr.attr,
+ &sensor_dev_attr_in4_alarm.dev_attr.attr,
+@@ -730,9 +730,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
+
+ if (index >= 6 && index < 11 && (reg & 0x03) != 0x03) /* VSEN1 */
+ return 0;
+- if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c) /* VSEN2 */
++ if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c) /* VSEN2 */
+ return 0;
+- if (index >= 17 && (reg & 0x30) != 0x30) /* VSEN3 */
++ if (index >= 16 && (reg & 0x30) != 0x30) /* VSEN3 */
+ return 0;
+
+ return attr->mode;
+diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
+index 13a6290c8d25..f02aa403332c 100644
+--- a/drivers/hwmon/occ/common.c
++++ b/drivers/hwmon/occ/common.c
+@@ -402,8 +402,10 @@ static ssize_t occ_show_power_1(struct device *dev,
+
+ static u64 occ_get_powr_avg(u64 *accum, u32 *samples)
+ {
+- return div64_u64(get_unaligned_be64(accum) * 1000000ULL,
+- get_unaligned_be32(samples));
++ u64 divisor = get_unaligned_be32(samples);
++
++ return (divisor == 0) ? 0 :
++ div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor);
+ }
+
+ static ssize_t occ_show_power_2(struct device *dev,
+diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
+index 3c6294432748..1ef098ff27c3 100644
+--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
+@@ -544,6 +544,7 @@ int etm_perf_add_symlink_sink(struct coresight_device *csdev)
+ /* See function coresight_get_sink_by_id() to know where this is used */
+ hash = hashlen_hash(hashlen_string(NULL, name));
+
++ sysfs_attr_init(&ea->attr.attr);
+ ea->attr.attr.name = devm_kstrdup(pdev, name, GFP_KERNEL);
+ if (!ea->attr.attr.name)
+ return -ENOMEM;
+diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
+index 46bb2e421bb9..ad19d9c716f4 100644
+--- a/drivers/iio/accel/cros_ec_accel_legacy.c
++++ b/drivers/iio/accel/cros_ec_accel_legacy.c
+@@ -319,7 +319,6 @@ static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = {
+ .modified = 1, \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_RAW) | \
+- BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = cros_ec_accel_legacy_ext_info, \
+diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
+index 92b1d5037ac9..e234970b7150 100644
+--- a/drivers/iio/adc/ingenic-adc.c
++++ b/drivers/iio/adc/ingenic-adc.c
+@@ -11,6 +11,7 @@
+ #include <linux/iio/iio.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
++#include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/platform_device.h>
+@@ -22,8 +23,11 @@
+ #define JZ_ADC_REG_ADTCH 0x18
+ #define JZ_ADC_REG_ADBDAT 0x1c
+ #define JZ_ADC_REG_ADSDAT 0x20
++#define JZ_ADC_REG_ADCLK 0x28
+
+ #define JZ_ADC_REG_CFG_BAT_MD BIT(4)
++#define JZ_ADC_REG_ADCLK_CLKDIV_LSB 0
++#define JZ_ADC_REG_ADCLK_CLKDIV10US_LSB 16
+
+ #define JZ_ADC_AUX_VREF 3300
+ #define JZ_ADC_AUX_VREF_BITS 12
+@@ -34,6 +38,8 @@
+ #define JZ4740_ADC_BATTERY_HIGH_VREF (7500 * 0.986)
+ #define JZ4740_ADC_BATTERY_HIGH_VREF_BITS 12
+
++struct ingenic_adc;
++
+ struct ingenic_adc_soc_data {
+ unsigned int battery_high_vref;
+ unsigned int battery_high_vref_bits;
+@@ -41,6 +47,7 @@ struct ingenic_adc_soc_data {
+ size_t battery_raw_avail_size;
+ const int *battery_scale_avail;
+ size_t battery_scale_avail_size;
++ int (*init_clk_div)(struct device *dev, struct ingenic_adc *adc);
+ };
+
+ struct ingenic_adc {
+@@ -151,6 +158,42 @@ static const int jz4740_adc_battery_scale_avail[] = {
+ JZ_ADC_BATTERY_LOW_VREF, JZ_ADC_BATTERY_LOW_VREF_BITS,
+ };
+
++static int jz4725b_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc)
++{
++ struct clk *parent_clk;
++ unsigned long parent_rate, rate;
++ unsigned int div_main, div_10us;
++
++ parent_clk = clk_get_parent(adc->clk);
++ if (!parent_clk) {
++ dev_err(dev, "ADC clock has no parent\n");
++ return -ENODEV;
++ }
++ parent_rate = clk_get_rate(parent_clk);
++
++ /*
++ * The JZ4725B ADC works at 500 kHz to 8 MHz.
++ * We pick the highest rate possible.
++ * In practice we typically get 6 MHz, half of the 12 MHz EXT clock.
++ */
++ div_main = DIV_ROUND_UP(parent_rate, 8000000);
++ div_main = clamp(div_main, 1u, 64u);
++ rate = parent_rate / div_main;
++ if (rate < 500000 || rate > 8000000) {
++ dev_err(dev, "No valid divider for ADC main clock\n");
++ return -EINVAL;
++ }
++
++ /* We also need a divider that produces a 10us clock. */
++ div_10us = DIV_ROUND_UP(rate, 100000);
++
++ writel(((div_10us - 1) << JZ_ADC_REG_ADCLK_CLKDIV10US_LSB) |
++ (div_main - 1) << JZ_ADC_REG_ADCLK_CLKDIV_LSB,
++ adc->base + JZ_ADC_REG_ADCLK);
++
++ return 0;
++}
++
+ static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = {
+ .battery_high_vref = JZ4725B_ADC_BATTERY_HIGH_VREF,
+ .battery_high_vref_bits = JZ4725B_ADC_BATTERY_HIGH_VREF_BITS,
+@@ -158,6 +201,7 @@ static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = {
+ .battery_raw_avail_size = ARRAY_SIZE(jz4725b_adc_battery_raw_avail),
+ .battery_scale_avail = jz4725b_adc_battery_scale_avail,
+ .battery_scale_avail_size = ARRAY_SIZE(jz4725b_adc_battery_scale_avail),
++ .init_clk_div = jz4725b_adc_init_clk_div,
+ };
+
+ static const struct ingenic_adc_soc_data jz4740_adc_soc_data = {
+@@ -167,6 +211,7 @@ static const struct ingenic_adc_soc_data jz4740_adc_soc_data = {
+ .battery_raw_avail_size = ARRAY_SIZE(jz4740_adc_battery_raw_avail),
+ .battery_scale_avail = jz4740_adc_battery_scale_avail,
+ .battery_scale_avail_size = ARRAY_SIZE(jz4740_adc_battery_scale_avail),
++ .init_clk_div = NULL, /* no ADCLK register on JZ4740 */
+ };
+
+ static int ingenic_adc_read_avail(struct iio_dev *iio_dev,
+@@ -317,6 +362,15 @@ static int ingenic_adc_probe(struct platform_device *pdev)
+ return ret;
+ }
+
++ /* Set clock dividers. */
++ if (soc_data->init_clk_div) {
++ ret = soc_data->init_clk_div(dev, adc);
++ if (ret) {
++ clk_disable_unprepare(adc->clk);
++ return ret;
++ }
++ }
++
+ /* Put hardware in a known passive state. */
+ writeb(0x00, adc->base + JZ_ADC_REG_ENABLE);
+ writeb(0xff, adc->base + JZ_ADC_REG_CTRL);
+diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
+index 917223d5ff5b..0e3c6529fc4c 100644
+--- a/drivers/iio/adc/max9611.c
++++ b/drivers/iio/adc/max9611.c
+@@ -83,7 +83,7 @@
+ #define MAX9611_TEMP_MAX_POS 0x7f80
+ #define MAX9611_TEMP_MAX_NEG 0xff80
+ #define MAX9611_TEMP_MIN_NEG 0xd980
+-#define MAX9611_TEMP_MASK GENMASK(7, 15)
++#define MAX9611_TEMP_MASK GENMASK(15, 7)
+ #define MAX9611_TEMP_SHIFT 0x07
+ #define MAX9611_TEMP_RAW(_r) ((_r) >> MAX9611_TEMP_SHIFT)
+ #define MAX9611_TEMP_SCALE_NUM 1000000
+diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
+index 2c0d0316d149..b373acce5927 100644
+--- a/drivers/iio/adc/rcar-gyroadc.c
++++ b/drivers/iio/adc/rcar-gyroadc.c
+@@ -382,7 +382,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
+ dev_err(dev,
+ "Only %i channels supported with %pOFn, but reg = <%i>.\n",
+ num_channels, child, reg);
+- return ret;
++ return -EINVAL;
+ }
+ }
+
+@@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
+ dev_err(dev,
+ "Channel %i uses different ADC mode than the rest.\n",
+ reg);
+- return ret;
++ return -EINVAL;
+ }
+
+ /* Channel is valid, grab the regulator. */
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+index 53a59957cc54..8a704cd5bddb 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+@@ -845,6 +845,25 @@ static const struct iio_chan_spec inv_mpu_channels[] = {
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
+ };
+
++static const unsigned long inv_mpu_scan_masks[] = {
++ /* 3-axis accel */
++ BIT(INV_MPU6050_SCAN_ACCL_X)
++ | BIT(INV_MPU6050_SCAN_ACCL_Y)
++ | BIT(INV_MPU6050_SCAN_ACCL_Z),
++ /* 3-axis gyro */
++ BIT(INV_MPU6050_SCAN_GYRO_X)
++ | BIT(INV_MPU6050_SCAN_GYRO_Y)
++ | BIT(INV_MPU6050_SCAN_GYRO_Z),
++ /* 6-axis accel + gyro */
++ BIT(INV_MPU6050_SCAN_ACCL_X)
++ | BIT(INV_MPU6050_SCAN_ACCL_Y)
++ | BIT(INV_MPU6050_SCAN_ACCL_Z)
++ | BIT(INV_MPU6050_SCAN_GYRO_X)
++ | BIT(INV_MPU6050_SCAN_GYRO_Y)
++ | BIT(INV_MPU6050_SCAN_GYRO_Z),
++ 0,
++};
++
+ static const struct iio_chan_spec inv_icm20602_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP),
+ {
+@@ -871,6 +890,28 @@ static const struct iio_chan_spec inv_icm20602_channels[] = {
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_ICM20602_SCAN_ACCL_Z),
+ };
+
++static const unsigned long inv_icm20602_scan_masks[] = {
++ /* 3-axis accel + temp (mandatory) */
++ BIT(INV_ICM20602_SCAN_ACCL_X)
++ | BIT(INV_ICM20602_SCAN_ACCL_Y)
++ | BIT(INV_ICM20602_SCAN_ACCL_Z)
++ | BIT(INV_ICM20602_SCAN_TEMP),
++ /* 3-axis gyro + temp (mandatory) */
++ BIT(INV_ICM20602_SCAN_GYRO_X)
++ | BIT(INV_ICM20602_SCAN_GYRO_Y)
++ | BIT(INV_ICM20602_SCAN_GYRO_Z)
++ | BIT(INV_ICM20602_SCAN_TEMP),
++ /* 6-axis accel + gyro + temp (mandatory) */
++ BIT(INV_ICM20602_SCAN_ACCL_X)
++ | BIT(INV_ICM20602_SCAN_ACCL_Y)
++ | BIT(INV_ICM20602_SCAN_ACCL_Z)
++ | BIT(INV_ICM20602_SCAN_GYRO_X)
++ | BIT(INV_ICM20602_SCAN_GYRO_Y)
++ | BIT(INV_ICM20602_SCAN_GYRO_Z)
++ | BIT(INV_ICM20602_SCAN_TEMP),
++ 0,
++};
++
+ /*
+ * The user can choose any frequency between INV_MPU6050_MIN_FIFO_RATE and
+ * INV_MPU6050_MAX_FIFO_RATE, but only these frequencies are matched by the
+@@ -1130,9 +1171,11 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
+ if (chip_type == INV_ICM20602) {
+ indio_dev->channels = inv_icm20602_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels);
++ indio_dev->available_scan_masks = inv_icm20602_scan_masks;
+ } else {
+ indio_dev->channels = inv_mpu_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
++ indio_dev->available_scan_masks = inv_mpu_scan_masks;
+ }
+
+ indio_dev->info = &mpu_info;
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index a4345052abd2..a47c7add4e0e 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1807,6 +1807,30 @@ static int elantech_create_smbus(struct psmouse *psmouse,
+ leave_breadcrumbs);
+ }
+
++static bool elantech_use_host_notify(struct psmouse *psmouse,
++ struct elantech_device_info *info)
++{
++ if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
++ return true;
++
++ switch (info->bus) {
++ case ETP_BUS_PS2_ONLY:
++ /* expected case */
++ break;
++ case ETP_BUS_SMB_HST_NTFY_ONLY:
++ case ETP_BUS_PS2_SMB_HST_NTFY:
++ /* SMbus implementation is stable since 2018 */
++ if (dmi_get_bios_year() >= 2018)
++ return true;
++ default:
++ psmouse_dbg(psmouse,
++ "Ignoring SMBus bus provider %d\n", info->bus);
++ break;
++ }
++
++ return false;
++}
++
+ /**
+ * elantech_setup_smbus - called once the PS/2 devices are enumerated
+ * and decides to instantiate a SMBus InterTouch device.
+@@ -1826,7 +1850,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
+ * i2c_blacklist_pnp_ids.
+ * Old ICs are up to the user to decide.
+ */
+- if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
++ if (!elantech_use_host_notify(psmouse, info) ||
+ psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
+ return -ENXIO;
+ }
+@@ -1846,34 +1870,6 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
+ return 0;
+ }
+
+-static bool elantech_use_host_notify(struct psmouse *psmouse,
+- struct elantech_device_info *info)
+-{
+- if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+- return true;
+-
+- switch (info->bus) {
+- case ETP_BUS_PS2_ONLY:
+- /* expected case */
+- break;
+- case ETP_BUS_SMB_ALERT_ONLY:
+- /* fall-through */
+- case ETP_BUS_PS2_SMB_ALERT:
+- psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
+- break;
+- case ETP_BUS_SMB_HST_NTFY_ONLY:
+- /* fall-through */
+- case ETP_BUS_PS2_SMB_HST_NTFY:
+- return true;
+- default:
+- psmouse_dbg(psmouse,
+- "Ignoring SMBus bus provider %d.\n",
+- info->bus);
+- }
+-
+- return false;
+-}
+-
+ int elantech_init_smbus(struct psmouse *psmouse)
+ {
+ struct elantech_device_info info;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 7f8f4780b511..c0e188cd3811 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -182,6 +182,7 @@ static const char * const smbus_pnp_ids[] = {
+ "LEN2055", /* E580 */
+ "SYN3052", /* HP EliteBook 840 G4 */
+ "SYN3221", /* HP 15-ay000 */
++ "SYN323d", /* HP Spectre X360 13-w013dx */
+ NULL
+ };
+
+diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
+index a2cec6cacf57..16d70201de4a 100644
+--- a/drivers/input/touchscreen/usbtouchscreen.c
++++ b/drivers/input/touchscreen/usbtouchscreen.c
+@@ -1659,6 +1659,8 @@ static int usbtouch_probe(struct usb_interface *intf,
+ if (!usbtouch || !input_dev)
+ goto out_free;
+
++ mutex_init(&usbtouch->pm_mutex);
++
+ type = &usbtouch_dev_info[id->driver_info];
+ usbtouch->type = type;
+ if (!type->process_pkt)
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 2101601adf57..1ad24367373f 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -1900,7 +1900,6 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
+
+ static void domain_exit(struct dmar_domain *domain)
+ {
+- struct page *freelist;
+
+ /* Remove associated devices and clear attached or cached domains */
+ rcu_read_lock();
+@@ -1910,9 +1909,12 @@ static void domain_exit(struct dmar_domain *domain)
+ /* destroy iovas */
+ put_iova_domain(&domain->iovad);
+
+- freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
++ if (domain->pgd) {
++ struct page *freelist;
+
+- dma_free_pagelist(freelist);
++ freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
++ dma_free_pagelist(freelist);
++ }
+
+ free_domain_mem(domain);
+ }
+diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
+index ed5cefb83768..89deb451e0ac 100644
+--- a/drivers/mmc/host/cavium.c
++++ b/drivers/mmc/host/cavium.c
+@@ -374,6 +374,7 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
+ {
+ data->bytes_xfered = data->blocks * data->blksz;
+ data->error = 0;
++ dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
+ return 1;
+ }
+
+@@ -1046,7 +1047,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
+ mmc->max_segs = 1;
+
+ /* DMA size field can address up to 8 MB */
+- mmc->max_seg_size = 8 * 1024 * 1024;
++ mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
++ dma_get_max_seg_size(host->dev));
+ mmc->max_req_size = mmc->max_seg_size;
+ /* External DMA is in 512 byte blocks */
+ mmc->max_blk_size = 512;
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index f2fe344593d5..fcec8bcb53d6 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -400,9 +400,10 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
+ priv->write(reg_mcr, ®s->mcr);
+ }
+
+-static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv)
++static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
+ {
+ struct flexcan_regs __iomem *regs = priv->regs;
++ unsigned int ackval;
+ u32 reg_mcr;
+
+ reg_mcr = priv->read(®s->mcr);
+@@ -412,20 +413,37 @@ static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv)
+ /* enable stop request */
+ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
++
++ /* get stop acknowledgment */
++ if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
++ ackval, ackval & (1 << priv->stm.ack_bit),
++ 0, FLEXCAN_TIMEOUT_US))
++ return -ETIMEDOUT;
++
++ return 0;
+ }
+
+-static inline void flexcan_exit_stop_mode(struct flexcan_priv *priv)
++static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
+ {
+ struct flexcan_regs __iomem *regs = priv->regs;
++ unsigned int ackval;
+ u32 reg_mcr;
+
+ /* remove stop request */
+ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 1 << priv->stm.req_bit, 0);
+
++ /* get stop acknowledgment */
++ if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
++ ackval, !(ackval & (1 << priv->stm.ack_bit)),
++ 0, FLEXCAN_TIMEOUT_US))
++ return -ETIMEDOUT;
++
+ reg_mcr = priv->read(®s->mcr);
+ reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
+ priv->write(reg_mcr, ®s->mcr);
++
++ return 0;
+ }
+
+ static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
+@@ -1437,10 +1455,10 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
+
+ priv = netdev_priv(dev);
+ priv->stm.gpr = syscon_node_to_regmap(gpr_np);
+- of_node_put(gpr_np);
+ if (IS_ERR(priv->stm.gpr)) {
+ dev_dbg(&pdev->dev, "could not find gpr regmap\n");
+- return PTR_ERR(priv->stm.gpr);
++ ret = PTR_ERR(priv->stm.gpr);
++ goto out_put_node;
+ }
+
+ priv->stm.req_gpr = out_val[1];
+@@ -1455,7 +1473,9 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
+
+ device_set_wakeup_capable(&pdev->dev, true);
+
+- return 0;
++out_put_node:
++ of_node_put(gpr_np);
++ return ret;
+ }
+
+ static const struct of_device_id flexcan_of_match[] = {
+@@ -1612,7 +1632,9 @@ static int __maybe_unused flexcan_suspend(struct device *device)
+ */
+ if (device_may_wakeup(device)) {
+ enable_irq_wake(dev->irq);
+- flexcan_enter_stop_mode(priv);
++ err = flexcan_enter_stop_mode(priv);
++ if (err)
++ return err;
+ } else {
+ err = flexcan_chip_disable(priv);
+ if (err)
+@@ -1662,10 +1684,13 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
+ {
+ struct net_device *dev = dev_get_drvdata(device);
+ struct flexcan_priv *priv = netdev_priv(dev);
++ int err;
+
+ if (netif_running(dev) && device_may_wakeup(device)) {
+ flexcan_enable_wakeup_irq(priv, false);
+- flexcan_exit_stop_mode(priv);
++ err = flexcan_exit_stop_mode(priv);
++ if (err)
++ return err;
+ }
+
+ return 0;
+diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
+index 05410008aa6b..de34a4b82d4a 100644
+--- a/drivers/net/can/rcar/rcar_canfd.c
++++ b/drivers/net/can/rcar/rcar_canfd.c
+@@ -1508,10 +1508,11 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
+
+ /* All packets processed */
+ if (num_pkts < quota) {
+- napi_complete_done(napi, num_pkts);
+- /* Enable Rx FIFO interrupts */
+- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+- RCANFD_RFCC_RFIE);
++ if (napi_complete_done(napi, num_pkts)) {
++ /* Enable Rx FIFO interrupts */
++ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
++ RCANFD_RFCC_RFIE);
++ }
+ }
+ return num_pkts;
+ }
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index 458154c9b482..22b9c8e6d040 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -568,16 +568,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
+ dev->state &= ~PCAN_USB_STATE_STARTED;
+ netif_stop_queue(netdev);
+
++ close_candev(netdev);
++
++ dev->can.state = CAN_STATE_STOPPED;
++
+ /* unlink all pending urbs and free used memory */
+ peak_usb_unlink_all_urbs(dev);
+
+ if (dev->adapter->dev_stop)
+ dev->adapter->dev_stop(dev);
+
+- close_candev(netdev);
+-
+- dev->can.state = CAN_STATE_STOPPED;
+-
+ /* can set bus off now */
+ if (dev->adapter->dev_set_bus) {
+ int err = dev->adapter->dev_set_bus(dev, 0);
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+index 34761c3a6286..47cc1ff5b88e 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+@@ -841,7 +841,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
+ goto err_out;
+
+ /* allocate command buffer once for all for the interface */
+- pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
++ pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
+ GFP_KERNEL);
+ if (!pdev->cmd_buffer_addr)
+ goto err_out_1;
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+index 178bb7cff0c1..53cb2f72bdd0 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+@@ -494,7 +494,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
+ u8 *buffer;
+ int err;
+
+- buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
++ buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+index cfaf8f618d1f..56742fa0c1af 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+@@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = {
+ static struct ch_tc_flower_entry *allocate_flower_entry(void)
+ {
+ struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
+- spin_lock_init(&new->lock);
++ if (new)
++ spin_lock_init(&new->lock);
+ return new;
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 559f6df1a74d..5af9959d05e5 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -753,7 +753,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+
+ for (i = 0; i < n_profiles; i++) {
+ /* the tables start at element 3 */
+- static int pos = 3;
++ int pos = 3;
+
+ /* The EWRD profiles officially go from 2 to 4, but we
+ * save them in sar_profiles[1-3] (because we don't
+@@ -874,6 +874,22 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+ }
+
++static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
++{
++ /*
++ * The GEO_TX_POWER_LIMIT command is not supported on earlier
++ * firmware versions. Unfortunately, we don't have a TLV API
++ * flag to rely on, so rely on the major version which is in
++ * the first byte of ucode_ver. This was implemented
++ * initially on version 38 and then backported to 36, 29 and
++ * 17.
++ */
++ return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
++}
++
+ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+ {
+ struct iwl_geo_tx_power_profiles_resp *resp;
+@@ -889,6 +905,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+ .data = { &geo_cmd },
+ };
+
++ if (!iwl_mvm_sar_geo_support(mvm))
++ return -EOPNOTSUPP;
++
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
+@@ -914,13 +933,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+ int ret, i, j;
+ u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
+
+- /*
+- * This command is not supported on earlier firmware versions.
+- * Unfortunately, we don't have a TLV API flag to rely on, so
+- * rely on the major version which is in the first byte of
+- * ucode_ver.
+- */
+- if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
++ if (!iwl_mvm_sar_geo_support(mvm))
+ return 0;
+
+ ret = iwl_mvm_sar_get_wgds_table(mvm);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 96f8d38ea321..a12ee20fb9ab 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -831,6 +831,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
+ unsigned int tcp_payload_len;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ bool ipv4 = (skb->protocol == htons(ETH_P_IP));
++ bool qos = ieee80211_is_data_qos(hdr->frame_control);
+ u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
+
+ skb_shinfo(skb)->gso_size = num_subframes * mss;
+@@ -864,7 +865,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
+ if (tcp_payload_len > mss) {
+ skb_shinfo(tmp)->gso_size = mss;
+ } else {
+- if (ieee80211_is_data_qos(hdr->frame_control)) {
++ if (qos) {
+ u8 *qc;
+
+ if (ipv4)
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+index fa4245d0d4a8..2f0ba7ef53b8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+@@ -435,6 +435,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
+ DMA_TO_DEVICE);
+ }
+
++ meta->tbs = 0;
++
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
+index b025ba164412..e39bb5c42c9a 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.h
++++ b/drivers/net/wireless/marvell/mwifiex/main.h
+@@ -124,6 +124,7 @@ enum {
+
+ #define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
+
++#define WPA_GTK_OUI_OFFSET 2
+ #define RSN_GTK_OUI_OFFSET 2
+
+ #define MWIFIEX_OUI_NOT_PRESENT 0
+diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
+index e2786ab612ca..dd02bbd9544e 100644
+--- a/drivers/net/wireless/marvell/mwifiex/scan.c
++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
+@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
+ u8 ret = MWIFIEX_OUI_NOT_PRESENT;
+
+ if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
+- iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
++ iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
++ WPA_GTK_OUI_OFFSET);
+ oui = &mwifiex_wpa_oui[cipher][0];
+ ret = mwifiex_search_oui_in_ie(iebody, oui);
+ if (ret)
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 4a1d2ab4d161..5deb4deb3820 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2264,17 +2264,15 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
+ memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
+ }
+
+-static void __nvme_release_subsystem(struct nvme_subsystem *subsys)
++static void nvme_release_subsystem(struct device *dev)
+ {
++ struct nvme_subsystem *subsys =
++ container_of(dev, struct nvme_subsystem, dev);
++
+ ida_simple_remove(&nvme_subsystems_ida, subsys->instance);
+ kfree(subsys);
+ }
+
+-static void nvme_release_subsystem(struct device *dev)
+-{
+- __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev));
+-}
+-
+ static void nvme_destroy_subsystem(struct kref *ref)
+ {
+ struct nvme_subsystem *subsys =
+@@ -2429,7 +2427,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+ mutex_lock(&nvme_subsystems_lock);
+ found = __nvme_find_get_subsystem(subsys->subnqn);
+ if (found) {
+- __nvme_release_subsystem(subsys);
++ put_device(&subsys->dev);
+ subsys = found;
+
+ if (!nvme_validate_cntlid(subsys, ctrl, id)) {
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 7fbcd72c438f..f9959eaaa185 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2959,6 +2959,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ .driver_data = NVME_QUIRK_LIGHTNVM, },
+ { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
+ .driver_data = NVME_QUIRK_LIGHTNVM, },
++ { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
++ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 720da09d4d73..088fcdc8d2b4 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1004,10 +1004,15 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
+ if (state == PCI_D0) {
+ pci_platform_power_transition(dev, PCI_D0);
+ /*
+- * Mandatory power management transition delays are
+- * handled in the PCIe portdrv resume hooks.
++ * Mandatory power management transition delays, see
++ * PCI Express Base Specification Revision 2.0 Section
++ * 6.6.1: Conventional Reset. Do not delay for
++ * devices powered on/off by corresponding bridge,
++ * because have already delayed for the bridge.
+ */
+ if (dev->runtime_d3cold) {
++ if (dev->d3cold_delay && !dev->imm_ready)
++ msleep(dev->d3cold_delay);
+ /*
+ * When powering on a bridge from D3cold, the
+ * whole hierarchy may be powered on into
+@@ -4570,16 +4575,14 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
+
+ return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
+ }
+-
+ /**
+- * pcie_wait_for_link_delay - Wait until link is active or inactive
++ * pcie_wait_for_link - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
+- * @delay: Delay to wait after link has become active (in ms)
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+-bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay)
++bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+ {
+ int timeout = 1000;
+ bool ret;
+@@ -4616,25 +4619,13 @@ bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay)
+ timeout -= 10;
+ }
+ if (active && ret)
+- msleep(delay);
++ msleep(100);
+ else if (ret != active)
+ pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
+ active ? "set" : "cleared");
+ return ret == active;
+ }
+
+-/**
+- * pcie_wait_for_link - Wait until link is active or inactive
+- * @pdev: Bridge device
+- * @active: waiting for active or inactive?
+- *
+- * Use this to wait till link becomes active or inactive.
+- */
+-bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+-{
+- return pcie_wait_for_link_delay(pdev, active, 100);
+-}
+-
+ void pci_reset_secondary_bus(struct pci_dev *dev)
+ {
+ u16 ctrl;
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 59802b3def4b..9cb99380c61e 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -493,7 +493,6 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
+ void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
+ u32 service);
+
+-bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay);
+ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
+ #ifdef CONFIG_PCIEASPM
+ void pcie_aspm_init_link_state(struct pci_dev *pdev);
+diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
+index 308c3e0c4a34..1b330129089f 100644
+--- a/drivers/pci/pcie/portdrv_core.c
++++ b/drivers/pci/pcie/portdrv_core.c
+@@ -9,7 +9,6 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/kernel.h>
+-#include <linux/delay.h>
+ #include <linux/errno.h>
+ #include <linux/pm.h>
+ #include <linux/pm_runtime.h>
+@@ -379,67 +378,6 @@ static int pm_iter(struct device *dev, void *data)
+ return 0;
+ }
+
+-static int get_downstream_delay(struct pci_bus *bus)
+-{
+- struct pci_dev *pdev;
+- int min_delay = 100;
+- int max_delay = 0;
+-
+- list_for_each_entry(pdev, &bus->devices, bus_list) {
+- if (!pdev->imm_ready)
+- min_delay = 0;
+- else if (pdev->d3cold_delay < min_delay)
+- min_delay = pdev->d3cold_delay;
+- if (pdev->d3cold_delay > max_delay)
+- max_delay = pdev->d3cold_delay;
+- }
+-
+- return max(min_delay, max_delay);
+-}
+-
+-/*
+- * wait_for_downstream_link - Wait for downstream link to establish
+- * @pdev: PCIe port whose downstream link is waited
+- *
+- * Handle delays according to PCIe 4.0 section 6.6.1 before configuration
+- * access to the downstream component is permitted.
+- *
+- * This blocks PCI core resume of the hierarchy below this port until the
+- * link is trained. Should be called before resuming port services to
+- * prevent pciehp from starting to tear-down the hierarchy too soon.
+- */
+-static void wait_for_downstream_link(struct pci_dev *pdev)
+-{
+- int delay;
+-
+- if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
+- pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
+- return;
+-
+- if (pci_dev_is_disconnected(pdev))
+- return;
+-
+- if (!pdev->subordinate || list_empty(&pdev->subordinate->devices) ||
+- !pdev->bridge_d3)
+- return;
+-
+- delay = get_downstream_delay(pdev->subordinate);
+- if (!delay)
+- return;
+-
+- dev_dbg(&pdev->dev, "waiting downstream link for %d ms\n", delay);
+-
+- /*
+- * If downstream port does not support speeds greater than 5 GT/s
+- * need to wait 100ms. For higher speeds (gen3) we need to wait
+- * first for the data link layer to become active.
+- */
+- if (pcie_get_speed_cap(pdev) <= PCIE_SPEED_5_0GT)
+- msleep(delay);
+- else
+- pcie_wait_for_link_delay(pdev, true, delay);
+-}
+-
+ /**
+ * pcie_port_device_suspend - suspend port services associated with a PCIe port
+ * @dev: PCI Express port to handle
+@@ -453,8 +391,6 @@ int pcie_port_device_suspend(struct device *dev)
+ int pcie_port_device_resume_noirq(struct device *dev)
+ {
+ size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
+-
+- wait_for_downstream_link(to_pci_dev(dev));
+ return device_for_each_child(dev, &off, pm_iter);
+ }
+
+@@ -485,8 +421,6 @@ int pcie_port_device_runtime_suspend(struct device *dev)
+ int pcie_port_device_runtime_resume(struct device *dev)
+ {
+ size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
+-
+- wait_for_downstream_link(to_pci_dev(dev));
+ return device_for_each_child(dev, &off, pm_iter);
+ }
+ #endif /* PM */
+diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
+index 730c4e68094b..7f5adf02f095 100644
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -1558,13 +1558,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
+ rc = qdio_kick_outbound_q(q, phys_aob);
+ } else if (need_siga_sync(q)) {
+ rc = qdio_siga_sync_q(q);
++ } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
++ get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
++ state == SLSB_CU_OUTPUT_PRIMED) {
++ /* The previous buffer is not processed yet, tack on. */
++ qperf_inc(q, fast_requeue);
+ } else {
+- /* try to fast requeue buffers */
+- get_buf_state(q, prev_buf(bufnr), &state, 0);
+- if (state != SLSB_CU_OUTPUT_PRIMED)
+- rc = qdio_kick_outbound_q(q, 0);
+- else
+- qperf_inc(q, fast_requeue);
++ rc = qdio_kick_outbound_q(q, 0);
+ }
+
+ /* in case of SIGA errors we must process the error immediately */
+diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
+index 0e79799e9a71..79eb40bdaf9f 100644
+--- a/drivers/s390/cio/vfio_ccw_cp.c
++++ b/drivers/s390/cio/vfio_ccw_cp.c
+@@ -89,8 +89,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
+ sizeof(*pa->pa_iova_pfn) +
+ sizeof(*pa->pa_pfn),
+ GFP_KERNEL);
+- if (unlikely(!pa->pa_iova_pfn))
++ if (unlikely(!pa->pa_iova_pfn)) {
++ pa->pa_nr = 0;
+ return -ENOMEM;
++ }
+ pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
+
+ pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
+diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
+index 9125f7f4e64c..8a8fbde7e186 100644
+--- a/drivers/s390/cio/vfio_ccw_drv.c
++++ b/drivers/s390/cio/vfio_ccw_drv.c
+@@ -88,7 +88,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
+ (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
+ if (scsw_is_solicited(&irb->scsw)) {
+ cp_update_scsw(&private->cp, &irb->scsw);
+- if (is_final)
++ if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING)
+ cp_free(&private->cp);
+ }
+ mutex_lock(&private->io_mutex);
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index f0066f8a1786..4971104b1817 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -40,6 +40,7 @@
+ #define ALUA_FAILOVER_TIMEOUT 60
+ #define ALUA_FAILOVER_RETRIES 5
+ #define ALUA_RTPG_DELAY_MSECS 5
++#define ALUA_RTPG_RETRY_DELAY 2
+
+ /* device handler flags */
+ #define ALUA_OPTIMIZE_STPG 0x01
+@@ -682,7 +683,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
+ case SCSI_ACCESS_STATE_TRANSITIONING:
+ if (time_before(jiffies, pg->expiry)) {
+ /* State transition, retry */
+- pg->interval = 2;
++ pg->interval = ALUA_RTPG_RETRY_DELAY;
+ err = SCSI_DH_RETRY;
+ } else {
+ struct alua_dh_data *h;
+@@ -807,6 +808,8 @@ static void alua_rtpg_work(struct work_struct *work)
+ spin_lock_irqsave(&pg->lock, flags);
+ pg->flags &= ~ALUA_PG_RUNNING;
+ pg->flags |= ALUA_PG_RUN_RTPG;
++ if (!pg->interval)
++ pg->interval = ALUA_RTPG_RETRY_DELAY;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ queue_delayed_work(kaluad_wq, &pg->rtpg_work,
+ pg->interval * HZ);
+@@ -818,6 +821,8 @@ static void alua_rtpg_work(struct work_struct *work)
+ spin_lock_irqsave(&pg->lock, flags);
+ if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
+ pg->flags &= ~ALUA_PG_RUNNING;
++ if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
++ pg->interval = ALUA_RTPG_RETRY_DELAY;
+ pg->flags |= ALUA_PG_RUN_RTPG;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ queue_delayed_work(kaluad_wq, &pg->rtpg_work,
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index acd16e0d52cf..8cdbac076a1b 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -4864,8 +4864,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_purge_requests(vhost, DID_ERROR);
+- ibmvfc_free_event_pool(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ ibmvfc_free_event_pool(vhost);
+
+ ibmvfc_free_mem(vhost);
+ spin_lock(&ibmvfc_driver_lock);
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 7237114a1d53..5f30016e9b64 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3045,6 +3045,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
+ u32 size;
+ unsigned long buff_addr;
+ unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
++ unsigned long chunk_left_bytes;
+ unsigned long src_addr;
+ unsigned long flags;
+ u32 buff_offset;
+@@ -3070,6 +3071,8 @@ megasas_fw_crash_buffer_show(struct device *cdev,
+ }
+
+ size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
++ chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
++ size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
+ size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
+
+ src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
+diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
+index fd4995fb676e..f85ec5b16b65 100644
+--- a/drivers/staging/android/ion/ion_page_pool.c
++++ b/drivers/staging/android/ion/ion_page_pool.c
+@@ -8,11 +8,14 @@
+ #include <linux/list.h>
+ #include <linux/slab.h>
+ #include <linux/swap.h>
++#include <linux/sched/signal.h>
+
+ #include "ion.h"
+
+ static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+ {
++ if (fatal_signal_pending(current))
++ return NULL;
+ return alloc_pages(pool->gfp_mask, pool->order);
+ }
+
+diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
+index 9b07badf4c6c..bc750250ccd6 100644
+--- a/drivers/staging/fbtft/fbtft-core.c
++++ b/drivers/staging/fbtft/fbtft-core.c
+@@ -76,21 +76,18 @@ static int fbtft_request_one_gpio(struct fbtft_par *par,
+ struct gpio_desc **gpiop)
+ {
+ struct device *dev = par->info->device;
+- struct device_node *node = dev->of_node;
+ int ret = 0;
+
+- if (of_find_property(node, name, NULL)) {
+- *gpiop = devm_gpiod_get_index(dev, dev->driver->name, index,
+- GPIOD_OUT_HIGH);
+- if (IS_ERR(*gpiop)) {
+- ret = PTR_ERR(*gpiop);
+- dev_err(dev,
+- "Failed to request %s GPIO:%d\n", name, ret);
+- return ret;
+- }
+- fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
+- __func__, name);
++ *gpiop = devm_gpiod_get_index_optional(dev, name, index,
++ GPIOD_OUT_HIGH);
++ if (IS_ERR(*gpiop)) {
++ ret = PTR_ERR(*gpiop);
++ dev_err(dev,
++ "Failed to request %s GPIO: %d\n", name, ret);
++ return ret;
+ }
++ fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
++ __func__, name);
+
+ return ret;
+ }
+@@ -103,34 +100,34 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par)
+ if (!par->info->device->of_node)
+ return -EINVAL;
+
+- ret = fbtft_request_one_gpio(par, "reset-gpios", 0, &par->gpio.reset);
++ ret = fbtft_request_one_gpio(par, "reset", 0, &par->gpio.reset);
+ if (ret)
+ return ret;
+- ret = fbtft_request_one_gpio(par, "dc-gpios", 0, &par->gpio.dc);
++ ret = fbtft_request_one_gpio(par, "dc", 0, &par->gpio.dc);
+ if (ret)
+ return ret;
+- ret = fbtft_request_one_gpio(par, "rd-gpios", 0, &par->gpio.rd);
++ ret = fbtft_request_one_gpio(par, "rd", 0, &par->gpio.rd);
+ if (ret)
+ return ret;
+- ret = fbtft_request_one_gpio(par, "wr-gpios", 0, &par->gpio.wr);
++ ret = fbtft_request_one_gpio(par, "wr", 0, &par->gpio.wr);
+ if (ret)
+ return ret;
+- ret = fbtft_request_one_gpio(par, "cs-gpios", 0, &par->gpio.cs);
++ ret = fbtft_request_one_gpio(par, "cs", 0, &par->gpio.cs);
+ if (ret)
+ return ret;
+- ret = fbtft_request_one_gpio(par, "latch-gpios", 0, &par->gpio.latch);
++ ret = fbtft_request_one_gpio(par, "latch", 0, &par->gpio.latch);
+ if (ret)
+ return ret;
+ for (i = 0; i < 16; i++) {
+- ret = fbtft_request_one_gpio(par, "db-gpios", i,
++ ret = fbtft_request_one_gpio(par, "db", i,
+ &par->gpio.db[i]);
+ if (ret)
+ return ret;
+- ret = fbtft_request_one_gpio(par, "led-gpios", i,
++ ret = fbtft_request_one_gpio(par, "led", i,
+ &par->gpio.led[i]);
+ if (ret)
+ return ret;
+- ret = fbtft_request_one_gpio(par, "aux-gpios", i,
++ ret = fbtft_request_one_gpio(par, "aux", i,
+ &par->gpio.aux[i]);
+ if (ret)
+ return ret;
+@@ -234,9 +231,9 @@ static void fbtft_reset(struct fbtft_par *par)
+ if (!par->gpio.reset)
+ return;
+ fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__);
+- gpiod_set_value_cansleep(par->gpio.reset, 0);
+- usleep_range(20, 40);
+ gpiod_set_value_cansleep(par->gpio.reset, 1);
++ usleep_range(20, 40);
++ gpiod_set_value_cansleep(par->gpio.reset, 0);
+ msleep(120);
+ }
+
+diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
+index 2be45ee9d061..464648ee2036 100644
+--- a/drivers/staging/gasket/apex_driver.c
++++ b/drivers/staging/gasket/apex_driver.c
+@@ -532,7 +532,7 @@ static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
+ break;
+ case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE:
+ ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+- gasket_page_table_num_entries(
++ gasket_page_table_num_simple_entries(
+ gasket_dev->page_table[0]));
+ break;
+ case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES:
+diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+index f6825727bf77..3592b545246c 100644
+--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
++++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+@@ -1789,6 +1789,7 @@ void wilc_deinit_host_int(struct net_device *net)
+
+ priv->p2p_listen_state = false;
+
++ flush_workqueue(vif->wilc->hif_workqueue);
+ mutex_destroy(&priv->scan_req_lock);
+ ret = wilc_deinit(vif);
+
+diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
+index 717292c1c0df..60ff236a3d63 100644
+--- a/drivers/tty/tty_ldsem.c
++++ b/drivers/tty/tty_ldsem.c
+@@ -93,8 +93,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
+
+ list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
+ tsk = waiter->task;
+- smp_mb();
+- waiter->task = NULL;
++ smp_store_release(&waiter->task, NULL);
+ wake_up_process(tsk);
+ put_task_struct(tsk);
+ }
+@@ -194,7 +193,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+- if (!waiter.task)
++ if (!smp_load_acquire(&waiter.task))
+ break;
+ if (!timeout)
+ break;
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index a02448105527..86130e8d35f9 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1788,8 +1788,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
+ return 0;
+
+ error:
+- if (as && as->usbm)
+- dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
+ kfree(isopkt);
+ kfree(dr);
+ if (as)
+diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
+index 671bce18782c..8616c52849c6 100644
+--- a/drivers/usb/host/xhci-rcar.c
++++ b/drivers/usb/host/xhci-rcar.c
+@@ -238,10 +238,15 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
+ * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+ * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+ * xhci_gen_setup().
++ *
++ * And, since the firmware/internal CPU control the USBSTS.STS_HALT
++ * and the process speed is down when the roothub port enters U3,
++ * long delay for the handshake of STS_HALT is neeed in xhci_suspend().
+ */
+ if (xhci_rcar_is_gen2(hcd->self.controller) ||
+- xhci_rcar_is_gen3(hcd->self.controller))
+- xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
++ xhci_rcar_is_gen3(hcd->self.controller)) {
++ xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND;
++ }
+
+ if (!xhci_rcar_wait_for_pll_active(hcd))
+ return -ETIMEDOUT;
+diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
+index ba05dd80a020..f5bed9f29e56 100644
+--- a/drivers/usb/misc/iowarrior.c
++++ b/drivers/usb/misc/iowarrior.c
+@@ -866,19 +866,20 @@ static void iowarrior_disconnect(struct usb_interface *interface)
+ dev = usb_get_intfdata(interface);
+ mutex_lock(&iowarrior_open_disc_lock);
+ usb_set_intfdata(interface, NULL);
++ /* prevent device read, write and ioctl */
++ dev->present = 0;
+
+ minor = dev->minor;
++ mutex_unlock(&iowarrior_open_disc_lock);
++ /* give back our minor - this will call close() locks need to be dropped at this point*/
+
+- /* give back our minor */
+ usb_deregister_dev(interface, &iowarrior_class);
+
+ mutex_lock(&dev->mutex);
+
+ /* prevent device read, write and ioctl */
+- dev->present = 0;
+
+ mutex_unlock(&dev->mutex);
+- mutex_unlock(&iowarrior_open_disc_lock);
+
+ if (dev->opened) {
+ /* There is a process that holds a filedescriptor to the device ,
+diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
+index 27e9c78a791e..a32d61a79ab8 100644
+--- a/drivers/usb/misc/rio500.c
++++ b/drivers/usb/misc/rio500.c
+@@ -51,6 +51,7 @@ struct rio_usb_data {
+ char *obuf, *ibuf; /* transfer buffers */
+ char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */
+ wait_queue_head_t wait_q; /* for timeouts */
++ struct mutex lock; /* general race avoidance */
+ };
+
+ static DEFINE_MUTEX(rio500_mutex);
+@@ -62,8 +63,10 @@ static int open_rio(struct inode *inode, struct file *file)
+
+ /* against disconnect() */
+ mutex_lock(&rio500_mutex);
++ mutex_lock(&(rio->lock));
+
+ if (rio->isopen || !rio->present) {
++ mutex_unlock(&(rio->lock));
+ mutex_unlock(&rio500_mutex);
+ return -EBUSY;
+ }
+@@ -71,6 +74,7 @@ static int open_rio(struct inode *inode, struct file *file)
+
+ init_waitqueue_head(&rio->wait_q);
+
++ mutex_unlock(&(rio->lock));
+
+ dev_info(&rio->rio_dev->dev, "Rio opened.\n");
+ mutex_unlock(&rio500_mutex);
+@@ -84,6 +88,7 @@ static int close_rio(struct inode *inode, struct file *file)
+
+ /* against disconnect() */
+ mutex_lock(&rio500_mutex);
++ mutex_lock(&(rio->lock));
+
+ rio->isopen = 0;
+ if (!rio->present) {
+@@ -95,6 +100,7 @@ static int close_rio(struct inode *inode, struct file *file)
+ } else {
+ dev_info(&rio->rio_dev->dev, "Rio closed.\n");
+ }
++ mutex_unlock(&(rio->lock));
+ mutex_unlock(&rio500_mutex);
+ return 0;
+ }
+@@ -109,7 +115,7 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
+ int retries;
+ int retval=0;
+
+- mutex_lock(&rio500_mutex);
++ mutex_lock(&(rio->lock));
+ /* Sanity check to make sure rio is connected, powered, etc */
+ if (rio->present == 0 || rio->rio_dev == NULL) {
+ retval = -ENODEV;
+@@ -253,7 +259,7 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
+
+
+ err_out:
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return retval;
+ }
+
+@@ -273,12 +279,12 @@ write_rio(struct file *file, const char __user *buffer,
+ int errn = 0;
+ int intr;
+
+- intr = mutex_lock_interruptible(&rio500_mutex);
++ intr = mutex_lock_interruptible(&(rio->lock));
+ if (intr)
+ return -EINTR;
+ /* Sanity check to make sure rio is connected, powered, etc */
+ if (rio->present == 0 || rio->rio_dev == NULL) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return -ENODEV;
+ }
+
+@@ -301,7 +307,7 @@ write_rio(struct file *file, const char __user *buffer,
+ goto error;
+ }
+ if (signal_pending(current)) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return bytes_written ? bytes_written : -EINTR;
+ }
+
+@@ -339,12 +345,12 @@ write_rio(struct file *file, const char __user *buffer,
+ buffer += copy_size;
+ } while (count > 0);
+
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+
+ return bytes_written ? bytes_written : -EIO;
+
+ error:
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return errn;
+ }
+
+@@ -361,12 +367,12 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
+ char *ibuf;
+ int intr;
+
+- intr = mutex_lock_interruptible(&rio500_mutex);
++ intr = mutex_lock_interruptible(&(rio->lock));
+ if (intr)
+ return -EINTR;
+ /* Sanity check to make sure rio is connected, powered, etc */
+ if (rio->present == 0 || rio->rio_dev == NULL) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return -ENODEV;
+ }
+
+@@ -377,11 +383,11 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
+
+ while (count > 0) {
+ if (signal_pending(current)) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return read_count ? read_count : -EINTR;
+ }
+ if (!rio->rio_dev) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return -ENODEV;
+ }
+ this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
+@@ -399,7 +405,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
+ count = this_read = partial;
+ } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */
+ if (!maxretry--) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ dev_err(&rio->rio_dev->dev,
+ "read_rio: maxretry timeout\n");
+ return -ETIME;
+@@ -409,19 +415,19 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
+ finish_wait(&rio->wait_q, &wait);
+ continue;
+ } else if (result != -EREMOTEIO) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ dev_err(&rio->rio_dev->dev,
+ "Read Whoops - result:%d partial:%u this_read:%u\n",
+ result, partial, this_read);
+ return -EIO;
+ } else {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return (0);
+ }
+
+ if (this_read) {
+ if (copy_to_user(buffer, ibuf, this_read)) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return -EFAULT;
+ }
+ count -= this_read;
+@@ -429,7 +435,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
+ buffer += this_read;
+ }
+ }
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return read_count;
+ }
+
+@@ -494,6 +500,8 @@ static int probe_rio(struct usb_interface *intf,
+ }
+ dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf);
+
++ mutex_init(&(rio->lock));
++
+ usb_set_intfdata (intf, rio);
+ rio->present = 1;
+ bail_out:
+@@ -511,10 +519,12 @@ static void disconnect_rio(struct usb_interface *intf)
+ if (rio) {
+ usb_deregister_dev(intf, &usb_rio_class);
+
++ mutex_lock(&(rio->lock));
+ if (rio->isopen) {
+ rio->isopen = 0;
+ /* better let it finish - the release will do whats needed */
+ rio->rio_dev = NULL;
++ mutex_unlock(&(rio->lock));
+ mutex_unlock(&rio500_mutex);
+ return;
+ }
+@@ -524,6 +534,7 @@ static void disconnect_rio(struct usb_interface *intf)
+ dev_info(&intf->dev, "USB Rio disconnected.\n");
+
+ rio->present = 0;
++ mutex_unlock(&(rio->lock));
+ }
+ mutex_unlock(&rio500_mutex);
+ }
+diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
+index 7b306aa22d25..6715a128e6c8 100644
+--- a/drivers/usb/misc/yurex.c
++++ b/drivers/usb/misc/yurex.c
+@@ -92,7 +92,6 @@ static void yurex_delete(struct kref *kref)
+
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
+
+- usb_put_dev(dev->udev);
+ if (dev->cntl_urb) {
+ usb_kill_urb(dev->cntl_urb);
+ kfree(dev->cntl_req);
+@@ -108,6 +107,7 @@ static void yurex_delete(struct kref *kref)
+ dev->int_buffer, dev->urb->transfer_dma);
+ usb_free_urb(dev->urb);
+ }
++ usb_put_dev(dev->udev);
+ kfree(dev);
+ }
+
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index fba32d84e578..15abe1d9958f 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -379,7 +379,8 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
+ return SNK_UNATTACHED;
+ else if (port->try_role == TYPEC_SOURCE)
+ return SRC_UNATTACHED;
+- else if (port->tcpc->config->default_role == TYPEC_SINK)
++ else if (port->tcpc->config &&
++ port->tcpc->config->default_role == TYPEC_SINK)
+ return SNK_UNATTACHED;
+ /* Fall through to return SRC_UNATTACHED */
+ } else if (port->port_type == TYPEC_PORT_SNK) {
+@@ -586,7 +587,20 @@ static void tcpm_debugfs_init(struct tcpm_port *port)
+
+ static void tcpm_debugfs_exit(struct tcpm_port *port)
+ {
++ int i;
++
++ mutex_lock(&port->logbuffer_lock);
++ for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
++ kfree(port->logbuffer[i]);
++ port->logbuffer[i] = NULL;
++ }
++ mutex_unlock(&port->logbuffer_lock);
++
+ debugfs_remove(port->dentry);
++ if (list_empty(&rootdir->d_subdirs)) {
++ debugfs_remove(rootdir);
++ rootdir = NULL;
++ }
+ }
+
+ #else
+@@ -1095,7 +1109,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
+ break;
+ case CMD_ATTENTION:
+ /* Attention command does not have response */
+- typec_altmode_attention(adev, p[1]);
++ if (adev)
++ typec_altmode_attention(adev, p[1]);
+ return 0;
+ default:
+ break;
+@@ -1147,20 +1162,26 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
+ }
+ break;
+ case CMD_ENTER_MODE:
+- typec_altmode_update_active(pdev, true);
+-
+- if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
+- response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE);
+- response[0] |= VDO_OPOS(adev->mode);
+- return 1;
++ if (adev && pdev) {
++ typec_altmode_update_active(pdev, true);
++
++ if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
++ response[0] = VDO(adev->svid, 1,
++ CMD_EXIT_MODE);
++ response[0] |= VDO_OPOS(adev->mode);
++ return 1;
++ }
+ }
+ return 0;
+ case CMD_EXIT_MODE:
+- typec_altmode_update_active(pdev, false);
++ if (adev && pdev) {
++ typec_altmode_update_active(pdev, false);
+
+- /* Back to USB Operation */
+- WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
+- NULL));
++ /* Back to USB Operation */
++ WARN_ON(typec_altmode_notify(adev,
++ TYPEC_STATE_USB,
++ NULL));
++ }
+ break;
+ default:
+ break;
+@@ -1170,8 +1191,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ /* Back to USB Operation */
+- WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
+- NULL));
++ if (adev)
++ WARN_ON(typec_altmode_notify(adev,
++ TYPEC_STATE_USB,
++ NULL));
+ break;
+ default:
+ break;
+@@ -1182,7 +1205,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
+ }
+
+ /* Informing the alternate mode drivers about everything */
+- typec_altmode_vdm(adev, p[0], &p[1], cnt);
++ if (adev)
++ typec_altmode_vdm(adev, p[0], &p[1], cnt);
+
+ return rlen;
+ }
+@@ -4114,7 +4138,7 @@ static int tcpm_try_role(const struct typec_capability *cap, int role)
+ mutex_lock(&port->lock);
+ if (tcpc->try_role)
+ ret = tcpc->try_role(tcpc, role);
+- if (!ret && !tcpc->config->try_role_hw)
++ if (!ret && (!tcpc->config || !tcpc->config->try_role_hw))
+ port->try_role = role;
+ port->try_src_count = 0;
+ port->try_snk_count = 0;
+@@ -4701,7 +4725,7 @@ static int tcpm_copy_caps(struct tcpm_port *port,
+ port->typec_caps.prefer_role = tcfg->default_role;
+ port->typec_caps.type = tcfg->type;
+ port->typec_caps.data = tcfg->data;
+- port->self_powered = port->tcpc->config->self_powered;
++ port->self_powered = tcfg->self_powered;
+
+ return 0;
+ }
+diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
+index bf63074675fc..c274a36c2fe4 100644
+--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
++++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
+@@ -963,7 +963,7 @@ release_fw:
+ ******************************************************************************/
+ static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode)
+ {
+- int err;
++ int err = 0;
+
+ while (flash_mode != FLASH_NOT_NEEDED) {
+ err = do_flash(uc, flash_mode);
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 09c9d6726f07..a9e7c1b69437 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1723,7 +1723,10 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
+
+ /* finish claiming */
+ mutex_lock(&bdev->bd_mutex);
+- bd_finish_claiming(bdev, whole, holder);
++ if (!res)
++ bd_finish_claiming(bdev, whole, holder);
++ else
++ bd_abort_claiming(bdev, whole, holder);
+ /*
+ * Block event polling for write claims if requested. Any
+ * write holder makes the write_holder state stick until
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 75311a8a68bf..c3c8de5513db 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -252,7 +252,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
+ if (tcon == NULL)
+ return 0;
+
+- if (smb2_command == SMB2_TREE_CONNECT)
++ if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
+ return 0;
+
+ if (tcon->tidStatus == CifsExiting) {
+@@ -1173,7 +1173,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
+ else
+ req->SecurityMode = 0;
+
++#ifdef CONFIG_CIFS_DFS_UPCALL
++ req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
++#else
+ req->Capabilities = 0;
++#endif /* DFS_UPCALL */
++
+ req->Channel = 0; /* MBZ */
+
+ sess_data->iov[0].iov_base = (char *)req;
+diff --git a/fs/dax.c b/fs/dax.c
+index 7d0e99982d48..4f42b852375b 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -601,7 +601,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
+ * guaranteed to either see new references or prevent new
+ * references from being established.
+ */
+- unmap_mapping_range(mapping, 0, 0, 1);
++ unmap_mapping_range(mapping, 0, 0, 0);
+
+ xas_lock_irq(&xas);
+ xas_for_each(&xas, entry, ULONG_MAX) {
+diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
+index 93ea1d529aa3..253e2f939d5f 100644
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -390,6 +390,19 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
+ return mp->mp_aheight - x - 1;
+ }
+
++static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
++{
++ sector_t factor = 1, block = 0;
++ int hgt;
++
++ for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
++ if (hgt < mp->mp_aheight)
++ block += mp->mp_list[hgt] * factor;
++ factor *= sdp->sd_inptrs;
++ }
++ return block;
++}
++
+ static void release_metapath(struct metapath *mp)
+ {
+ int i;
+@@ -430,60 +443,84 @@ static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *pt
+ return ptr - first;
+ }
+
+-typedef const __be64 *(*gfs2_metadata_walker)(
+- struct metapath *mp,
+- const __be64 *start, const __be64 *end,
+- u64 factor, void *data);
++enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
+
+-#define WALK_STOP ((__be64 *)0)
+-#define WALK_NEXT ((__be64 *)1)
++/*
++ * gfs2_metadata_walker - walk an indirect block
++ * @mp: Metapath to indirect block
++ * @ptrs: Number of pointers to look at
++ *
++ * When returning WALK_FOLLOW, the walker must update @mp to point at the right
++ * indirect block to follow.
++ */
++typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
++ unsigned int ptrs);
+
+-static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
+- u64 len, struct metapath *mp, gfs2_metadata_walker walker,
+- void *data)
++/*
++ * gfs2_walk_metadata - walk a tree of indirect blocks
++ * @inode: The inode
++ * @mp: Starting point of walk
++ * @max_len: Maximum number of blocks to walk
++ * @walker: Called during the walk
++ *
++ * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
++ * past the end of metadata, and a negative error code otherwise.
++ */
++
++static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
++ u64 max_len, gfs2_metadata_walker walker)
+ {
+- struct metapath clone;
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+- const __be64 *start, *end, *ptr;
+ u64 factor = 1;
+ unsigned int hgt;
+- int ret = 0;
++ int ret;
+
+- for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--)
++ /*
++ * The walk starts in the lowest allocated indirect block, which may be
++ * before the position indicated by @mp. Adjust @max_len accordingly
++ * to avoid a short walk.
++ */
++ for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
++ max_len += mp->mp_list[hgt] * factor;
++ mp->mp_list[hgt] = 0;
+ factor *= sdp->sd_inptrs;
++ }
+
+ for (;;) {
+- u64 step;
++ u16 start = mp->mp_list[hgt];
++ enum walker_status status;
++ unsigned int ptrs;
++ u64 len;
+
+ /* Walk indirect block. */
+- start = metapointer(hgt, mp);
+- end = metaend(hgt, mp);
+-
+- step = (end - start) * factor;
+- if (step > len)
+- end = start + DIV_ROUND_UP_ULL(len, factor);
+-
+- ptr = walker(mp, start, end, factor, data);
+- if (ptr == WALK_STOP)
++ ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
++ len = ptrs * factor;
++ if (len > max_len)
++ ptrs = DIV_ROUND_UP_ULL(max_len, factor);
++ status = walker(mp, ptrs);
++ switch (status) {
++ case WALK_STOP:
++ return 1;
++ case WALK_FOLLOW:
++ BUG_ON(mp->mp_aheight == mp->mp_fheight);
++ ptrs = mp->mp_list[hgt] - start;
++ len = ptrs * factor;
+ break;
+- if (step >= len)
++ case WALK_CONTINUE:
+ break;
+- len -= step;
+- if (ptr != WALK_NEXT) {
+- BUG_ON(!*ptr);
+- mp->mp_list[hgt] += ptr - start;
+- goto fill_up_metapath;
+ }
++ if (len >= max_len)
++ break;
++ max_len -= len;
++ if (status == WALK_FOLLOW)
++ goto fill_up_metapath;
+
+ lower_metapath:
+ /* Decrease height of metapath. */
+- if (mp != &clone) {
+- clone_metapath(&clone, mp);
+- mp = &clone;
+- }
+ brelse(mp->mp_bh[hgt]);
+ mp->mp_bh[hgt] = NULL;
++ mp->mp_list[hgt] = 0;
+ if (!hgt)
+ break;
+ hgt--;
+@@ -491,10 +528,7 @@ lower_metapath:
+
+ /* Advance in metadata tree. */
+ (mp->mp_list[hgt])++;
+- start = metapointer(hgt, mp);
+- end = metaend(hgt, mp);
+- if (start >= end) {
+- mp->mp_list[hgt] = 0;
++ if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
+ if (!hgt)
+ break;
+ goto lower_metapath;
+@@ -502,44 +536,36 @@ lower_metapath:
+
+ fill_up_metapath:
+ /* Increase height of metapath. */
+- if (mp != &clone) {
+- clone_metapath(&clone, mp);
+- mp = &clone;
+- }
+ ret = fillup_metapath(ip, mp, ip->i_height - 1);
+ if (ret < 0)
+- break;
++ return ret;
+ hgt += ret;
+ for (; ret; ret--)
+ do_div(factor, sdp->sd_inptrs);
+ mp->mp_aheight = hgt + 1;
+ }
+- if (mp == &clone)
+- release_metapath(mp);
+- return ret;
++ return 0;
+ }
+
+-struct gfs2_hole_walker_args {
+- u64 blocks;
+-};
+-
+-static const __be64 *gfs2_hole_walker(struct metapath *mp,
+- const __be64 *start, const __be64 *end,
+- u64 factor, void *data)
++static enum walker_status gfs2_hole_walker(struct metapath *mp,
++ unsigned int ptrs)
+ {
+- struct gfs2_hole_walker_args *args = data;
+- const __be64 *ptr;
++ const __be64 *start, *ptr, *end;
++ unsigned int hgt;
++
++ hgt = mp->mp_aheight - 1;
++ start = metapointer(hgt, mp);
++ end = start + ptrs;
+
+ for (ptr = start; ptr < end; ptr++) {
+ if (*ptr) {
+- args->blocks += (ptr - start) * factor;
++ mp->mp_list[hgt] += ptr - start;
+ if (mp->mp_aheight == mp->mp_fheight)
+ return WALK_STOP;
+- return ptr; /* increase height */
++ return WALK_FOLLOW;
+ }
+ }
+- args->blocks += (end - start) * factor;
+- return WALK_NEXT;
++ return WALK_CONTINUE;
+ }
+
+ /**
+@@ -557,12 +583,24 @@ static const __be64 *gfs2_hole_walker(struct metapath *mp,
+ static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
+ struct metapath *mp, struct iomap *iomap)
+ {
+- struct gfs2_hole_walker_args args = { };
+- int ret = 0;
++ struct metapath clone;
++ u64 hole_size;
++ int ret;
+
+- ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args);
+- if (!ret)
+- iomap->length = args.blocks << inode->i_blkbits;
++ clone_metapath(&clone, mp);
++ ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
++ if (ret < 0)
++ goto out;
++
++ if (ret == 1)
++ hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
++ else
++ hole_size = len;
++ iomap->length = hole_size << inode->i_blkbits;
++ ret = 0;
++
++out:
++ release_metapath(&clone);
+ return ret;
+ }
+
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 0ff3facf81da..0af854cce8ff 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -153,7 +153,7 @@ again:
+ /* Block nfs4_proc_unlck */
+ mutex_lock(&sp->so_delegreturn_mutex);
+ seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
+- err = nfs4_open_delegation_recall(ctx, state, stateid, type);
++ err = nfs4_open_delegation_recall(ctx, state, stateid);
+ if (!err)
+ err = nfs_delegation_claim_locks(state, stateid);
+ if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
+diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
+index 5799777df5ec..9eb87ae4c982 100644
+--- a/fs/nfs/delegation.h
++++ b/fs/nfs/delegation.h
+@@ -63,7 +63,7 @@ void nfs_reap_expired_delegations(struct nfs_client *clp);
+
+ /* NFSv4 delegation-related procedures */
+ int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync);
+-int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
++int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
+ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
+ bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred);
+ bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 6418cb6c079b..63edda145d1b 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1878,8 +1878,9 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
+ if (data->o_res.delegation_type != 0)
+ nfs4_opendata_check_deleg(data, state);
+ update:
+- update_open_stateid(state, &data->o_res.stateid, NULL,
+- data->o_arg.fmode);
++ if (!update_open_stateid(state, &data->o_res.stateid,
++ NULL, data->o_arg.fmode))
++ return ERR_PTR(-EAGAIN);
+ refcount_inc(&state->count);
+
+ return state;
+@@ -1944,8 +1945,11 @@ _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
+
+ if (data->o_res.delegation_type != 0)
+ nfs4_opendata_check_deleg(data, state);
+- update_open_stateid(state, &data->o_res.stateid, NULL,
+- data->o_arg.fmode);
++ if (!update_open_stateid(state, &data->o_res.stateid,
++ NULL, data->o_arg.fmode)) {
++ nfs4_put_open_state(state);
++ state = ERR_PTR(-EAGAIN);
++ }
+ out:
+ nfs_release_seqid(data->o_arg.seqid);
+ return state;
+@@ -2148,12 +2152,10 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_DEADSESSION:
+- set_bit(NFS_DELEGATED_STATE, &state->flags);
+ nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
+ return -EAGAIN;
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_STALE_STATEID:
+- set_bit(NFS_DELEGATED_STATE, &state->flags);
+ /* Don't recall a delegation if it was lost */
+ nfs4_schedule_lease_recovery(server->nfs_client);
+ return -EAGAIN;
+@@ -2174,7 +2176,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
+ return -EAGAIN;
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_GRACE:
+- set_bit(NFS_DELEGATED_STATE, &state->flags);
+ ssleep(1);
+ return -EAGAIN;
+ case -ENOMEM:
+@@ -2190,8 +2191,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
+ }
+
+ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
+- struct nfs4_state *state, const nfs4_stateid *stateid,
+- fmode_t type)
++ struct nfs4_state *state, const nfs4_stateid *stateid)
+ {
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ struct nfs4_opendata *opendata;
+@@ -2202,20 +2202,23 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
+ if (IS_ERR(opendata))
+ return PTR_ERR(opendata);
+ nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
+- nfs_state_clear_delegation(state);
+- switch (type & (FMODE_READ|FMODE_WRITE)) {
+- case FMODE_READ|FMODE_WRITE:
+- case FMODE_WRITE:
++ if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
+ err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
+ if (err)
+- break;
++ goto out;
++ }
++ if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
+ err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
+ if (err)
+- break;
+- /* Fall through */
+- case FMODE_READ:
++ goto out;
++ }
++ if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
+ err = nfs4_open_recover_helper(opendata, FMODE_READ);
++ if (err)
++ goto out;
+ }
++ nfs_state_clear_delegation(state);
++out:
+ nfs4_opendata_put(opendata);
+ return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
+ }
+@@ -3172,7 +3175,7 @@ static int _nfs4_do_setattr(struct inode *inode,
+
+ if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
+ /* Use that stateid */
+- } else if (ctx != NULL) {
++ } else if (ctx != NULL && ctx->state) {
+ struct nfs_lock_context *l_ctx;
+ if (!nfs4_valid_open_stateid(ctx->state))
+ return -EBADF;
+diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
+index 46bbc949c20a..7a30524a80ee 100644
+--- a/include/kvm/arm_vgic.h
++++ b/include/kvm/arm_vgic.h
+@@ -350,6 +350,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
+
+ void kvm_vgic_load(struct kvm_vcpu *vcpu);
+ void kvm_vgic_put(struct kvm_vcpu *vcpu);
++void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
+
+ #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
+ #define vgic_initialized(k) ((k)->arch.vgic.initialized)
+diff --git a/include/linux/ccp.h b/include/linux/ccp.h
+index 55cb455cfcb0..a5dfbaf2470d 100644
+--- a/include/linux/ccp.h
++++ b/include/linux/ccp.h
+@@ -170,6 +170,8 @@ struct ccp_aes_engine {
+ enum ccp_aes_mode mode;
+ enum ccp_aes_action action;
+
++ u32 authsize;
++
+ struct scatterlist *key;
+ u32 key_len; /* In bytes */
+
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index d1ad38a3f048..7546cbf3dfb0 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -871,6 +871,7 @@ void kvm_arch_check_processor_compat(void *rtn);
+ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
+ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
+
+ #ifndef __KVM_HAVE_ARCH_VM_ALLOC
+ /*
+diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
+index c5188ff724d1..bc88d6f964da 100644
+--- a/include/sound/compress_driver.h
++++ b/include/sound/compress_driver.h
+@@ -173,10 +173,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
+ if (snd_BUG_ON(!stream))
+ return;
+
+- if (stream->direction == SND_COMPRESS_PLAYBACK)
+- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+- else
+- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
++ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+
+ wake_up(&stream->runtime->sleep);
+ }
+diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
+index 6f09d1500960..70da1c6cdd07 100644
+--- a/include/uapi/linux/nl80211.h
++++ b/include/uapi/linux/nl80211.h
+@@ -2844,7 +2844,7 @@ enum nl80211_attrs {
+ #define NL80211_HT_CAPABILITY_LEN 26
+ #define NL80211_VHT_CAPABILITY_LEN 12
+ #define NL80211_HE_MIN_CAPABILITY_LEN 16
+-#define NL80211_HE_MAX_CAPABILITY_LEN 51
++#define NL80211_HE_MAX_CAPABILITY_LEN 54
+ #define NL80211_MAX_NR_CIPHER_SUITES 5
+ #define NL80211_MAX_NR_AKM_SUITES 2
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index f851934d55d4..4bc15cff1026 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -11266,7 +11266,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
+ goto err_unlock;
+ }
+
+- perf_install_in_context(ctx, event, cpu);
++ perf_install_in_context(ctx, event, event->cpu);
+ perf_unpin_context(ctx);
+ mutex_unlock(&ctx->mutex);
+
+diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
+index f18cd5aa33e8..656c14333613 100644
+--- a/kernel/irq/affinity.c
++++ b/kernel/irq/affinity.c
+@@ -253,11 +253,9 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
+ * Determine the number of vectors which need interrupt affinities
+ * assigned. If the pre/post request exhausts the available vectors
+ * then nothing to do here except for invoking the calc_sets()
+- * callback so the device driver can adjust to the situation. If there
+- * is only a single vector, then managing the queue is pointless as
+- * well.
++ * callback so the device driver can adjust to the situation.
+ */
+- if (nvecs > 1 && nvecs > affd->pre_vectors + affd->post_vectors)
++ if (nvecs > affd->pre_vectors + affd->post_vectors)
+ affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
+ else
+ affvecs = 0;
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index 83ea6c4e623c..6ca97a63b3d6 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -886,8 +886,11 @@ static int __init test_firmware_init(void)
+ return -ENOMEM;
+
+ rc = __test_firmware_config_init();
+- if (rc)
++ if (rc) {
++ kfree(test_fw_config);
++ pr_err("could not init firmware test config: %d\n", rc);
+ return rc;
++ }
+
+ rc = misc_register(&test_fw_misc_device);
+ if (rc) {
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 0f76cca32a1c..080d30408ce3 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1213,6 +1213,12 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
+ if (unlikely(valist == NULL))
+ return false;
+
++ /*
++ * First make sure the mappings are removed from all page-tables
++ * before they are freed.
++ */
++ vmalloc_sync_all();
++
+ /*
+ * TODO: to calculate a flush range without looping.
+ * The list can be up to lazy_max_pages() elements.
+@@ -3001,6 +3007,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
+ /*
+ * Implement a stub for vmalloc_sync_all() if the architecture chose not to
+ * have one.
++ *
++ * The purpose of this function is to make sure the vmalloc area
++ * mappings are identical in all page-tables in the system.
+ */
+ void __weak vmalloc_sync_all(void)
+ {
+diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
+index 59031670b16a..cc23f1ce239c 100644
+--- a/net/ipv4/netfilter/ipt_rpfilter.c
++++ b/net/ipv4/netfilter/ipt_rpfilter.c
+@@ -78,6 +78,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
+ flow.flowi4_tos = RT_TOS(iph->tos);
+ flow.flowi4_scope = RT_SCOPE_UNIVERSE;
++ flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
+
+ return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
+ }
+diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
+index 6bcaf7357183..d800801a5dd2 100644
+--- a/net/ipv6/netfilter/ip6t_rpfilter.c
++++ b/net/ipv6/netfilter/ip6t_rpfilter.c
+@@ -55,7 +55,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
+ if (rpfilter_addr_linklocal(&iph->saddr)) {
+ lookup_flags |= RT6_LOOKUP_F_IFACE;
+ fl6.flowi6_oif = dev->ifindex;
+- } else if ((flags & XT_RPFILTER_LOOSE) == 0)
++ /* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
++ } else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
++ (flags & XT_RPFILTER_LOOSE) == 0)
+ fl6.flowi6_oif = dev->ifindex;
+
+ rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
+@@ -70,7 +72,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
+ goto out;
+ }
+
+- if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
++ if (rt->rt6i_idev->dev == dev ||
++ l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
++ (flags & XT_RPFILTER_LOOSE))
+ ret = true;
+ out:
+ ip6_rt_put(rt);
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index a1973a26c7fc..b8288125e05d 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -935,8 +935,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
+
+ err = ieee80211_set_probe_resp(sdata, params->probe_resp,
+ params->probe_resp_len, csa);
+- if (err < 0)
++ if (err < 0) {
++ kfree(new);
+ return err;
++ }
+ if (err == 0)
+ changed |= BSS_CHANGED_AP_PROBE_RESP;
+
+@@ -948,8 +950,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
+ params->civicloc,
+ params->civicloc_len);
+
+- if (err < 0)
++ if (err < 0) {
++ kfree(new);
+ return err;
++ }
+
+ changed |= BSS_CHANGED_FTM_RESPONDER;
+ }
+diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
+index acd4afb4944b..c9a8a2433e8a 100644
+--- a/net/mac80211/driver-ops.c
++++ b/net/mac80211/driver-ops.c
+@@ -187,11 +187,16 @@ int drv_conf_tx(struct ieee80211_local *local,
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+- if (WARN_ONCE(params->cw_min == 0 ||
+- params->cw_min > params->cw_max,
+- "%s: invalid CW_min/CW_max: %d/%d\n",
+- sdata->name, params->cw_min, params->cw_max))
++ if (params->cw_min == 0 || params->cw_min > params->cw_max) {
++ /*
++ * If we can't configure hardware anyway, don't warn. We may
++ * never have initialized the CW parameters.
++ */
++ WARN_ONCE(local->ops->conf_tx,
++ "%s: invalid CW_min/CW_max: %d/%d\n",
++ sdata->name, params->cw_min, params->cw_max);
+ return -EINVAL;
++ }
+
+ trace_drv_conf_tx(local, sdata, ac, params);
+ if (local->ops->conf_tx)
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 379d2ab6d327..36b60f39930b 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -2041,6 +2041,16 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local,
+ ieee80211_regulatory_limit_wmm_params(sdata, ¶ms[ac], ac);
+ }
+
++ /* WMM specification requires all 4 ACIs. */
++ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
++ if (params[ac].cw_min == 0) {
++ sdata_info(sdata,
++ "AP has invalid WMM params (missing AC %d), using defaults\n",
++ ac);
++ return false;
++ }
++ }
++
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ mlme_dbg(sdata,
+ "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index 1e2cc83ff5da..ae1f8c6b3a97 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -472,6 +472,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
+ struct ip_ct_tcp_state *receiver = &state->seen[!dir];
+ const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
+ __u32 seq, ack, sack, end, win, swin;
++ u16 win_raw;
+ s32 receiver_offset;
+ bool res, in_recv_win;
+
+@@ -480,7 +481,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
+ */
+ seq = ntohl(tcph->seq);
+ ack = sack = ntohl(tcph->ack_seq);
+- win = ntohs(tcph->window);
++ win_raw = ntohs(tcph->window);
++ win = win_raw;
+ end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
+
+ if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
+@@ -655,14 +657,14 @@ static bool tcp_in_window(const struct nf_conn *ct,
+ && state->last_seq == seq
+ && state->last_ack == ack
+ && state->last_end == end
+- && state->last_win == win)
++ && state->last_win == win_raw)
+ state->retrans++;
+ else {
+ state->last_dir = dir;
+ state->last_seq = seq;
+ state->last_ack = ack;
+ state->last_end = end;
+- state->last_win = win;
++ state->last_win = win_raw;
+ state->retrans = 0;
+ }
+ }
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index 92077d459109..4abbb452cf6c 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -578,7 +578,7 @@ static int nfnetlink_bind(struct net *net, int group)
+ ss = nfnetlink_get_subsys(type << 8);
+ rcu_read_unlock();
+ if (!ss)
+- request_module("nfnetlink-subsys-%d", type);
++ request_module_nowait("nfnetlink-subsys-%d", type);
+ return 0;
+ }
+ #endif
+diff --git a/net/netfilter/nft_chain_nat.c b/net/netfilter/nft_chain_nat.c
+index 2f89bde3c61c..ff9ac8ae0031 100644
+--- a/net/netfilter/nft_chain_nat.c
++++ b/net/netfilter/nft_chain_nat.c
+@@ -142,3 +142,6 @@ MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
+ #ifdef CONFIG_NF_TABLES_IPV6
+ MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat");
+ #endif
++#ifdef CONFIG_NF_TABLES_INET
++MODULE_ALIAS_NFT_CHAIN(1, "nat"); /* NFPROTO_INET */
++#endif
+diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
+index fe93e731dc7f..b836d550b919 100644
+--- a/net/netfilter/nft_hash.c
++++ b/net/netfilter/nft_hash.c
+@@ -129,7 +129,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
+ priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
+
+ priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
+- if (priv->modulus <= 1)
++ if (priv->modulus < 1)
+ return -ERANGE;
+
+ if (priv->offset + priv->modulus - 1 < priv->offset)
+diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
+index 8487eeff5c0e..43eeb1f609f1 100644
+--- a/net/netfilter/nft_redir.c
++++ b/net/netfilter/nft_redir.c
+@@ -291,4 +291,4 @@ module_exit(nft_redir_module_exit);
+
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
+-MODULE_ALIAS_NFT_EXPR("nat");
++MODULE_ALIAS_NFT_EXPR("redir");
+diff --git a/scripts/gen_compile_commands.py b/scripts/gen_compile_commands.py
+index 7915823b92a5..c458696ef3a7 100755
+--- a/scripts/gen_compile_commands.py
++++ b/scripts/gen_compile_commands.py
+@@ -21,9 +21,9 @@ _LINE_PATTERN = r'^cmd_[^ ]*\.o := (.* )([^ ]*\.c)$'
+ _VALID_LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
+
+ # A kernel build generally has over 2000 entries in its compile_commands.json
+-# database. If this code finds 500 or fewer, then warn the user that they might
++# database. If this code finds 300 or fewer, then warn the user that they might
+ # not have all the .cmd files, and they might need to compile the kernel.
+-_LOW_COUNT_THRESHOLD = 500
++_LOW_COUNT_THRESHOLD = 300
+
+
+ def parse_arguments():
+diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
+index 9be208db88d3..1f9f0a334c24 100755
+--- a/scripts/sphinx-pre-install
++++ b/scripts/sphinx-pre-install
+@@ -77,6 +77,17 @@ sub check_missing(%)
+ foreach my $prog (sort keys %missing) {
+ my $is_optional = $missing{$prog};
+
++ # At least on some LTS distros like CentOS 7, texlive doesn't
++ # provide all packages we need. When such distros are
++ # detected, we have to disable PDF output.
++ #
++ # So, we need to ignore the packages that distros would
++ # need for LaTeX to work
++ if ($is_optional == 2 && !$pdf) {
++ $optional--;
++ next;
++ }
++
+ if ($is_optional) {
+ print "Warning: better to also install \"$prog\".\n";
+ } else {
+@@ -326,10 +337,10 @@ sub give_debian_hints()
+
+ if ($pdf) {
+ check_missing_file("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",
+- "fonts-dejavu", 1);
++ "fonts-dejavu", 2);
+ }
+
+- check_program("dvipng", 1) if ($pdf);
++ check_program("dvipng", 2) if ($pdf);
+ check_missing(\%map);
+
+ return if (!$need && !$optional);
+@@ -364,22 +375,40 @@ sub give_redhat_hints()
+ #
+ # Checks valid for RHEL/CentOS version 7.x.
+ #
+- if (! $system_release =~ /Fedora/) {
++ my $old = 0;
++ my $rel;
++ $rel = $1 if ($system_release =~ /release\s+(\d+)/);
++
++ if (!($system_release =~ /Fedora/)) {
+ $map{"virtualenv"} = "python-virtualenv";
+- }
+
+- my $release;
++ if ($rel && $rel < 8) {
++ $old = 1;
++ $pdf = 0;
+
+- $release = $1 if ($system_release =~ /Fedora\s+release\s+(\d+)/);
++ printf("Note: texlive packages on RHEL/CENTOS <= 7 are incomplete. Can't support PDF output\n");
++ printf("If you want to build PDF, please read:\n");
++ printf("\thttps://www.systutorials.com/241660/how-to-install-tex-live-on-centos-7-linux/\n");
++ }
++ } else {
++ if ($rel && $rel < 26) {
++ $old = 1;
++ }
++ }
++ if (!$rel) {
++ printf("Couldn't identify release number\n");
++ $old = 1;
++ $pdf = 0;
++ }
+
+- check_rpm_missing(\@fedora26_opt_pkgs, 1) if ($pdf && $release >= 26);
+- check_rpm_missing(\@fedora_tex_pkgs, 1) if ($pdf);
+- check_missing_tex(1) if ($pdf);
++ check_rpm_missing(\@fedora26_opt_pkgs, 2) if ($pdf && !$old);
++ check_rpm_missing(\@fedora_tex_pkgs, 2) if ($pdf);
++ check_missing_tex(2) if ($pdf);
+ check_missing(\%map);
+
+ return if (!$need && !$optional);
+
+- if ($release >= 18) {
++ if (!$old) {
+ # dnf, for Fedora 18+
+ printf("You should run:\n\n\tsudo dnf install -y $install\n");
+ } else {
+@@ -418,8 +447,10 @@ sub give_opensuse_hints()
+ "texlive-zapfding",
+ );
+
+- check_rpm_missing(\@suse_tex_pkgs, 1) if ($pdf);
+- check_missing_tex(1) if ($pdf);
++ $map{"latexmk"} = "texlive-latexmk-bin";
++
++ check_rpm_missing(\@suse_tex_pkgs, 2) if ($pdf);
++ check_missing_tex(2) if ($pdf);
+ check_missing(\%map);
+
+ return if (!$need && !$optional);
+@@ -443,7 +474,9 @@ sub give_mageia_hints()
+ "texlive-fontsextra",
+ );
+
+- check_rpm_missing(\@tex_pkgs, 1) if ($pdf);
++ $map{"latexmk"} = "texlive-collection-basic";
++
++ check_rpm_missing(\@tex_pkgs, 2) if ($pdf);
+ check_missing(\%map);
+
+ return if (!$need && !$optional);
+@@ -466,7 +499,8 @@ sub give_arch_linux_hints()
+ "texlive-latexextra",
+ "ttf-dejavu",
+ );
+- check_pacman_missing(\@archlinux_tex_pkgs, 1) if ($pdf);
++ check_pacman_missing(\@archlinux_tex_pkgs, 2) if ($pdf);
++
+ check_missing(\%map);
+
+ return if (!$need && !$optional);
+@@ -485,7 +519,7 @@ sub give_gentoo_hints()
+ );
+
+ check_missing_file("/usr/share/fonts/dejavu/DejaVuSans.ttf",
+- "media-fonts/dejavu", 1) if ($pdf);
++ "media-fonts/dejavu", 2) if ($pdf);
+
+ check_missing(\%map);
+
+@@ -553,7 +587,7 @@ sub check_distros()
+ my %map = (
+ "sphinx-build" => "sphinx"
+ );
+- check_missing_tex(1) if ($pdf);
++ check_missing_tex(2) if ($pdf);
+ check_missing(\%map);
+ print "I don't know distro $system_release.\n";
+ print "So, I can't provide you a hint with the install procedure.\n";
+@@ -591,11 +625,13 @@ sub check_needs()
+ check_program("make", 0);
+ check_program("gcc", 0);
+ check_python_module("sphinx_rtd_theme", 1) if (!$virtualenv);
+- check_program("xelatex", 1) if ($pdf);
+ check_program("dot", 1);
+ check_program("convert", 1);
+- check_program("rsvg-convert", 1) if ($pdf);
+- check_program("latexmk", 1) if ($pdf);
++
++ # Extra PDF files - should use 2 for is_optional
++ check_program("xelatex", 2) if ($pdf);
++ check_program("rsvg-convert", 2) if ($pdf);
++ check_program("latexmk", 2) if ($pdf);
+
+ check_distros();
+
+diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
+index 99b882158705..41905afada63 100644
+--- a/sound/core/compress_offload.c
++++ b/sound/core/compress_offload.c
+@@ -574,10 +574,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
+ stream->metadata_set = false;
+ stream->next_track = false;
+
+- if (stream->direction == SND_COMPRESS_PLAYBACK)
+- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+- else
+- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
++ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ } else {
+ return -EPERM;
+ }
+@@ -693,8 +690,17 @@ static int snd_compr_start(struct snd_compr_stream *stream)
+ {
+ int retval;
+
+- if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
++ switch (stream->runtime->state) {
++ case SNDRV_PCM_STATE_SETUP:
++ if (stream->direction != SND_COMPRESS_CAPTURE)
++ return -EPERM;
++ break;
++ case SNDRV_PCM_STATE_PREPARED:
++ break;
++ default:
+ return -EPERM;
++ }
++
+ retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
+ if (!retval)
+ stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
+@@ -705,9 +711,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
+ {
+ int retval;
+
+- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
+- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
++ switch (stream->runtime->state) {
++ case SNDRV_PCM_STATE_OPEN:
++ case SNDRV_PCM_STATE_SETUP:
++ case SNDRV_PCM_STATE_PREPARED:
+ return -EPERM;
++ default:
++ break;
++ }
++
+ retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
+ if (!retval) {
+ snd_compr_drain_notify(stream);
+@@ -795,9 +807,17 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
+ {
+ int retval;
+
+- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
+- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
++ switch (stream->runtime->state) {
++ case SNDRV_PCM_STATE_OPEN:
++ case SNDRV_PCM_STATE_SETUP:
++ case SNDRV_PCM_STATE_PREPARED:
++ case SNDRV_PCM_STATE_PAUSED:
+ return -EPERM;
++ case SNDRV_PCM_STATE_XRUN:
++ return -EPIPE;
++ default:
++ break;
++ }
+
+ retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
+ if (retval) {
+@@ -817,6 +837,10 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
+ if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
+ return -EPERM;
+
++ /* next track doesn't have any meaning for capture streams */
++ if (stream->direction == SND_COMPRESS_CAPTURE)
++ return -EPERM;
++
+ /* you can signal next track if this is intended to be a gapless stream
+ * and current track metadata is set
+ */
+@@ -834,9 +858,23 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
+ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
+ {
+ int retval;
+- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
+- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
++
++ switch (stream->runtime->state) {
++ case SNDRV_PCM_STATE_OPEN:
++ case SNDRV_PCM_STATE_SETUP:
++ case SNDRV_PCM_STATE_PREPARED:
++ case SNDRV_PCM_STATE_PAUSED:
++ return -EPERM;
++ case SNDRV_PCM_STATE_XRUN:
++ return -EPIPE;
++ default:
++ break;
++ }
++
++ /* partial drain doesn't have any meaning for capture streams */
++ if (stream->direction == SND_COMPRESS_CAPTURE)
+ return -EPERM;
++
+ /* stream can be drained only when next track has been signalled */
+ if (stream->next_track == false)
+ return -EPERM;
+diff --git a/sound/firewire/packets-buffer.c b/sound/firewire/packets-buffer.c
+index 0d35359d25cd..0ecafd0c6722 100644
+--- a/sound/firewire/packets-buffer.c
++++ b/sound/firewire/packets-buffer.c
+@@ -37,7 +37,7 @@ int iso_packets_buffer_init(struct iso_packets_buffer *b, struct fw_unit *unit,
+ packets_per_page = PAGE_SIZE / packet_size;
+ if (WARN_ON(!packets_per_page)) {
+ err = -EINVAL;
+- goto error;
++ goto err_packets;
+ }
+ pages = DIV_ROUND_UP(count, packets_per_page);
+
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index 232a1926758a..dd96def48a3a 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -598,11 +598,9 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
+ }
+ runtime->private_data = azx_dev;
+
+- if (chip->gts_present)
+- azx_pcm_hw.info = azx_pcm_hw.info |
+- SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
+-
+ runtime->hw = azx_pcm_hw;
++ if (chip->gts_present)
++ runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
+ runtime->hw.channels_min = hinfo->channels_min;
+ runtime->hw.channels_max = hinfo->channels_max;
+ runtime->hw.formats = hinfo->formats;
+@@ -615,6 +613,13 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
+ 20,
+ 178000000);
+
++ /* by some reason, the playback stream stalls on PulseAudio with
++ * tsched=1 when a capture stream triggers. Until we figure out the
++ * real cause, disable tsched mode by telling the PCM info flag.
++ */
++ if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
++ runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
++
+ if (chip->align_buffer_size)
+ /* constrain buffer sizes to be multiple of 128
+ bytes. This is more efficient in terms of memory
+diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
+index 3aa5c957ffbf..863695d025af 100644
+--- a/sound/pci/hda/hda_controller.h
++++ b/sound/pci/hda/hda_controller.h
+@@ -31,7 +31,7 @@
+ /* 14 unused */
+ #define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
+ #define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
+-/* 17 unused */
++#define AZX_DCAPS_AMD_WORKAROUND (1 << 17) /* AMD-specific workaround */
+ #define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */
+ #define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */
+ #define AZX_DCAPS_OLD_SSYNC (1 << 20) /* Old SSYNC reg for ICH */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index d438c450f04d..fb8f452a1c78 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -64,6 +64,7 @@ enum {
+ POS_FIX_VIACOMBO,
+ POS_FIX_COMBO,
+ POS_FIX_SKL,
++ POS_FIX_FIFO,
+ };
+
+ /* Defines for ATI HD Audio support in SB450 south bridge */
+@@ -135,7 +136,7 @@ module_param_array(model, charp, NULL, 0444);
+ MODULE_PARM_DESC(model, "Use the given board model.");
+ module_param_array(position_fix, int, NULL, 0444);
+ MODULE_PARM_DESC(position_fix, "DMA pointer read method."
+- "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+).");
++ "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+, 6 = FIFO).");
+ module_param_array(bdl_pos_adj, int, NULL, 0644);
+ MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset.");
+ module_param_array(probe_mask, int, NULL, 0444);
+@@ -332,6 +333,11 @@ enum {
+ #define AZX_DCAPS_PRESET_ATI_HDMI_NS \
+ (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
+
++/* quirks for AMD SB */
++#define AZX_DCAPS_PRESET_AMD_SB \
++ (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_AMD_WORKAROUND |\
++ AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
++
+ /* quirks for Nvidia */
+ #define AZX_DCAPS_PRESET_NVIDIA \
+ (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
+@@ -841,6 +847,49 @@ static unsigned int azx_via_get_position(struct azx *chip,
+ return bound_pos + mod_dma_pos;
+ }
+
++#define AMD_FIFO_SIZE 32
++
++/* get the current DMA position with FIFO size correction */
++static unsigned int azx_get_pos_fifo(struct azx *chip, struct azx_dev *azx_dev)
++{
++ struct snd_pcm_substream *substream = azx_dev->core.substream;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ unsigned int pos, delay;
++
++ pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
++ if (!runtime)
++ return pos;
++
++ runtime->delay = AMD_FIFO_SIZE;
++ delay = frames_to_bytes(runtime, AMD_FIFO_SIZE);
++ if (azx_dev->insufficient) {
++ if (pos < delay) {
++ delay = pos;
++ runtime->delay = bytes_to_frames(runtime, pos);
++ } else {
++ azx_dev->insufficient = 0;
++ }
++ }
++
++ /* correct the DMA position for capture stream */
++ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
++ if (pos < delay)
++ pos += azx_dev->core.bufsize;
++ pos -= delay;
++ }
++
++ return pos;
++}
++
++static int azx_get_delay_from_fifo(struct azx *chip, struct azx_dev *azx_dev,
++ unsigned int pos)
++{
++ struct snd_pcm_substream *substream = azx_dev->core.substream;
++
++ /* just read back the calculated value in the above */
++ return substream->runtime->delay;
++}
++
+ static unsigned int azx_skl_get_dpib_pos(struct azx *chip,
+ struct azx_dev *azx_dev)
+ {
+@@ -1417,6 +1466,7 @@ static int check_position_fix(struct azx *chip, int fix)
+ case POS_FIX_VIACOMBO:
+ case POS_FIX_COMBO:
+ case POS_FIX_SKL:
++ case POS_FIX_FIFO:
+ return fix;
+ }
+
+@@ -1433,6 +1483,10 @@ static int check_position_fix(struct azx *chip, int fix)
+ dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n");
+ return POS_FIX_VIACOMBO;
+ }
++ if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) {
++ dev_dbg(chip->card->dev, "Using FIFO position fix\n");
++ return POS_FIX_FIFO;
++ }
+ if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
+ dev_dbg(chip->card->dev, "Using LPIB position fix\n");
+ return POS_FIX_LPIB;
+@@ -1453,6 +1507,7 @@ static void assign_position_fix(struct azx *chip, int fix)
+ [POS_FIX_VIACOMBO] = azx_via_get_position,
+ [POS_FIX_COMBO] = azx_get_pos_lpib,
+ [POS_FIX_SKL] = azx_get_pos_skl,
++ [POS_FIX_FIFO] = azx_get_pos_fifo,
+ };
+
+ chip->get_position[0] = chip->get_position[1] = callbacks[fix];
+@@ -1467,6 +1522,9 @@ static void assign_position_fix(struct azx *chip, int fix)
+ azx_get_delay_from_lpib;
+ }
+
++ if (fix == POS_FIX_FIFO)
++ chip->get_delay[0] = chip->get_delay[1] =
++ azx_get_delay_from_fifo;
+ }
+
+ /*
+@@ -2444,6 +2502,9 @@ static const struct pci_device_id azx_ids[] = {
+ /* AMD Hudson */
+ { PCI_DEVICE(0x1022, 0x780d),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
++ /* AMD, X370 & co */
++ { PCI_DEVICE(0x1022, 0x1457),
++ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
+ /* AMD Stoney */
+ { PCI_DEVICE(0x1022, 0x157a),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+diff --git a/sound/sound_core.c b/sound/sound_core.c
+index b730d97c4de6..90d118cd9164 100644
+--- a/sound/sound_core.c
++++ b/sound/sound_core.c
+@@ -275,7 +275,8 @@ retry:
+ goto retry;
+ }
+ spin_unlock(&sound_loader_lock);
+- return -EBUSY;
++ r = -EBUSY;
++ goto fail;
+ }
+ }
+
+diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
+index 14fc1e1d5d13..c406497c5919 100644
+--- a/sound/usb/hiface/pcm.c
++++ b/sound/usb/hiface/pcm.c
+@@ -600,14 +600,13 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
+ ret = hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP,
+ hiface_pcm_out_urb_handler);
+ if (ret < 0)
+- return ret;
++ goto error;
+ }
+
+ ret = snd_pcm_new(chip->card, "USB-SPDIF Audio", 0, 1, 0, &pcm);
+ if (ret < 0) {
+- kfree(rt);
+ dev_err(&chip->dev->dev, "Cannot create pcm instance\n");
+- return ret;
++ goto error;
+ }
+
+ pcm->private_data = rt;
+@@ -620,4 +619,10 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
+
+ chip->pcm = rt;
+ return 0;
++
++error:
++ for (i = 0; i < PCM_N_URBS; i++)
++ kfree(rt->out_urbs[i].buffer);
++ kfree(rt);
++ return ret;
+ }
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 7ee9d17d0143..e852c7fd6109 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -1043,6 +1043,7 @@ found_clock:
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
++ kfree(fp->chmap);
+ kfree(fp->rate_table);
+ kfree(fp);
+ return NULL;
+diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
+index a19690a17291..c8c86a0c9b79 100644
+--- a/tools/perf/arch/s390/util/machine.c
++++ b/tools/perf/arch/s390/util/machine.c
+@@ -6,8 +6,9 @@
+ #include "machine.h"
+ #include "api/fs/fs.h"
+ #include "debug.h"
++#include "symbol.h"
+
+-int arch__fix_module_text_start(u64 *start, const char *name)
++int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
+ {
+ u64 m_start = *start;
+ char path[PATH_MAX];
+@@ -17,7 +18,35 @@ int arch__fix_module_text_start(u64 *start, const char *name)
+ if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
+ pr_debug2("Using module %s start:%#lx\n", path, m_start);
+ *start = m_start;
++ } else {
++ /* Successful read of the modules segment text start address.
++ * Calculate difference between module start address
++ * in memory and module text segment start address.
++ * For example module load address is 0x3ff8011b000
++ * (from /proc/modules) and module text segment start
++ * address is 0x3ff8011b870 (from file above).
++ *
++ * Adjust the module size and subtract the GOT table
++ * size located at the beginning of the module.
++ */
++ *size -= (*start - m_start);
+ }
+
+ return 0;
+ }
++
++/* On s390 kernel text segment start is located at very low memory addresses,
++ * for example 0x10000. Modules are located at very high memory addresses,
++ * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
++ * and beginning of first module's text segment is very big.
++ * Therefore do not fill this gap and do not assign it to the kernel dso map.
++ */
++void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
++{
++ if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
++ /* Last kernel symbol mapped to end of page */
++ p->end = roundup(p->end, page_size);
++ else
++ p->end = c->start;
++ pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
++}
+diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
+index 8bb124e55c6d..2c376f5b2120 100644
+--- a/tools/perf/builtin-probe.c
++++ b/tools/perf/builtin-probe.c
+@@ -698,6 +698,16 @@ __cmd_probe(int argc, const char **argv)
+
+ ret = perf_add_probe_events(params.events, params.nevents);
+ if (ret < 0) {
++
++ /*
++ * When perf_add_probe_events() fails it calls
++ * cleanup_perf_probe_events(pevs, npevs), i.e.
++ * cleanup_perf_probe_events(params.events, params.nevents), which
++ * will call clear_perf_probe_event(), so set nevents to zero
++ * to avoid cleanup_params() to call clear_perf_probe_event() again
++ * on the same pevs.
++ */
++ params.nevents = 0;
+ pr_err_with_code(" Error: Failed to add events.", ret);
+ return ret;
+ }
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index d089eb706d18..4380474c8c35 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -1057,7 +1057,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
+
+ printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
+ if (ip == end) {
+- printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp,
++ printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, ++insn, fp,
+ &total_cycles);
+ if (PRINT_FIELD(SRCCODE))
+ printed += print_srccode(thread, x.cpumode, ip);
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index e28002d90573..c6c550dbb947 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -607,7 +607,13 @@ try_again:
+ * group leaders.
+ */
+ read_counters(&(struct timespec) { .tv_nsec = t1-t0 });
+- perf_evlist__close(evsel_list);
++
++ /*
++ * We need to keep evsel_list alive, because it's processed
++ * later the evsel_list will be closed after.
++ */
++ if (!STAT_RECORD)
++ perf_evlist__close(evsel_list);
+
+ return WEXITSTATUS(status);
+ }
+@@ -1922,6 +1928,7 @@ int cmd_stat(int argc, const char **argv)
+ perf_session__write_header(perf_stat.session, evsel_list, fd, true);
+ }
+
++ perf_evlist__close(evsel_list);
+ perf_session__delete(perf_stat.session);
+ }
+
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index 2c46f9aa416c..b854541604df 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -1282,6 +1282,7 @@ static void perf_evsel__free_id(struct perf_evsel *evsel)
+ xyarray__delete(evsel->sample_id);
+ evsel->sample_id = NULL;
+ zfree(&evsel->id);
++ evsel->ids = 0;
+ }
+
+ static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
+@@ -2074,6 +2075,7 @@ void perf_evsel__close(struct perf_evsel *evsel)
+
+ perf_evsel__close_fd(evsel);
+ perf_evsel__free_fd(evsel);
++ perf_evsel__free_id(evsel);
+ }
+
+ int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index b82d4577d969..e84b70be3fc1 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -3666,7 +3666,7 @@ int perf_event__process_feature(struct perf_session *session,
+ return 0;
+
+ ff.buf = (void *)fe->data;
+- ff.size = event->header.size - sizeof(event->header);
++ ff.size = event->header.size - sizeof(*fe);
+ ff.ph = &session->header;
+
+ if (feat_ops[feat].process(&ff, NULL))
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index dc7aafe45a2b..081fe4bdebaa 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -1365,6 +1365,7 @@ static int machine__set_modules_path(struct machine *machine)
+ return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
+ }
+ int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
++ u64 *size __maybe_unused,
+ const char *name __maybe_unused)
+ {
+ return 0;
+@@ -1376,7 +1377,7 @@ static int machine__create_module(void *arg, const char *name, u64 start,
+ struct machine *machine = arg;
+ struct map *map;
+
+- if (arch__fix_module_text_start(&start, name) < 0)
++ if (arch__fix_module_text_start(&start, &size, name) < 0)
+ return -1;
+
+ map = machine__findnew_module_map(machine, start, name);
+diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
+index f70ab98a7bde..7aa38da26427 100644
+--- a/tools/perf/util/machine.h
++++ b/tools/perf/util/machine.h
+@@ -222,7 +222,7 @@ struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
+
+ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
+ const char *filename);
+-int arch__fix_module_text_start(u64 *start, const char *name);
++int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
+
+ int machine__load_kallsyms(struct machine *machine, const char *filename);
+
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 2e61dd6a3574..d78984096044 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -36,10 +36,16 @@ static int perf_session__process_compressed_event(struct perf_session *session,
+ void *src;
+ size_t decomp_size, src_size;
+ u64 decomp_last_rem = 0;
+- size_t decomp_len = session->header.env.comp_mmap_len;
++ size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
+ struct decomp *decomp, *decomp_last = session->decomp_last;
+
+- decomp = mmap(NULL, sizeof(struct decomp) + decomp_len, PROT_READ|PROT_WRITE,
++ if (decomp_last) {
++ decomp_last_rem = decomp_last->size - decomp_last->head;
++ decomp_len += decomp_last_rem;
++ }
++
++ mmap_len = sizeof(struct decomp) + decomp_len;
++ decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ if (decomp == MAP_FAILED) {
+ pr_err("Couldn't allocate memory for decompression\n");
+@@ -47,10 +53,10 @@ static int perf_session__process_compressed_event(struct perf_session *session,
+ }
+
+ decomp->file_pos = file_offset;
++ decomp->mmap_len = mmap_len;
+ decomp->head = 0;
+
+- if (decomp_last) {
+- decomp_last_rem = decomp_last->size - decomp_last->head;
++ if (decomp_last_rem) {
+ memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
+ decomp->size = decomp_last_rem;
+ }
+@@ -61,7 +67,7 @@ static int perf_session__process_compressed_event(struct perf_session *session,
+ decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
+ &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
+ if (!decomp_size) {
+- munmap(decomp, sizeof(struct decomp) + decomp_len);
++ munmap(decomp, mmap_len);
+ pr_err("Couldn't decompress data\n");
+ return -1;
+ }
+@@ -255,15 +261,15 @@ static void perf_session__delete_threads(struct perf_session *session)
+ static void perf_session__release_decomp_events(struct perf_session *session)
+ {
+ struct decomp *next, *decomp;
+- size_t decomp_len;
++ size_t mmap_len;
+ next = session->decomp;
+- decomp_len = session->header.env.comp_mmap_len;
+ do {
+ decomp = next;
+ if (decomp == NULL)
+ break;
+ next = decomp->next;
+- munmap(decomp, decomp_len + sizeof(struct decomp));
++ mmap_len = decomp->mmap_len;
++ munmap(decomp, mmap_len);
+ } while (1);
+ }
+
+diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
+index dd8920b745bc..863dbad87849 100644
+--- a/tools/perf/util/session.h
++++ b/tools/perf/util/session.h
+@@ -46,6 +46,7 @@ struct perf_session {
+ struct decomp {
+ struct decomp *next;
+ u64 file_pos;
++ size_t mmap_len;
+ u64 head;
+ size_t size;
+ char data[];
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index 5cbad55cd99d..3b49eb4e3ed9 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -91,6 +91,11 @@ static int prefix_underscores_count(const char *str)
+ return tail - str;
+ }
+
++void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
++{
++ p->end = c->start;
++}
++
+ const char * __weak arch__normalize_symbol_name(const char *name)
+ {
+ return name;
+@@ -217,7 +222,7 @@ void symbols__fixup_end(struct rb_root_cached *symbols)
+ curr = rb_entry(nd, struct symbol, rb_node);
+
+ if (prev->end == prev->start && prev->end != curr->start)
+- prev->end = curr->start;
++ arch__symbols__fixup_end(prev, curr);
+ }
+
+ /* Last entry */
+diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
+index 9a8fe012910a..f30ab608ea54 100644
+--- a/tools/perf/util/symbol.h
++++ b/tools/perf/util/symbol.h
+@@ -277,6 +277,7 @@ const char *arch__normalize_symbol_name(const char *name);
+ #define SYMBOL_A 0
+ #define SYMBOL_B 1
+
++void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
+ int arch__compare_symbol_names(const char *namea, const char *nameb);
+ int arch__compare_symbol_names_n(const char *namea, const char *nameb,
+ unsigned int n);
+diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
+index b413ba5b9835..4a9f88d9b7ab 100644
+--- a/tools/perf/util/thread.c
++++ b/tools/perf/util/thread.c
+@@ -197,14 +197,24 @@ struct comm *thread__comm(const struct thread *thread)
+
+ struct comm *thread__exec_comm(const struct thread *thread)
+ {
+- struct comm *comm, *last = NULL;
++ struct comm *comm, *last = NULL, *second_last = NULL;
+
+ list_for_each_entry(comm, &thread->comm_list, list) {
+ if (comm->exec)
+ return comm;
++ second_last = last;
+ last = comm;
+ }
+
++ /*
++ * 'last' with no start time might be the parent's comm of a synthesized
++ * thread (created by processing a synthesized fork event). For a main
++ * thread, that is very probably wrong. Prefer a later comm to avoid
++ * that case.
++ */
++ if (second_last && !last->start && thread->pid_ == thread->tid)
++ return second_last;
++
+ return last;
+ }
+
+diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c
+index 23bdb9884576..d2202392ffdb 100644
+--- a/tools/perf/util/zstd.c
++++ b/tools/perf/util/zstd.c
+@@ -99,8 +99,8 @@ size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size
+ while (input.pos < input.size) {
+ ret = ZSTD_decompressStream(data->dstream, &output, &input);
+ if (ZSTD_isError(ret)) {
+- pr_err("failed to decompress (B): %ld -> %ld : %s\n",
+- src_size, output.size, ZSTD_getErrorName(ret));
++ pr_err("failed to decompress (B): %ld -> %ld, dst_size %ld : %s\n",
++ src_size, output.size, dst_size, ZSTD_getErrorName(ret));
+ break;
+ }
+ output.dst = dst + output.pos;
+diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
+index bd5c55916d0d..9b778c51af1b 100644
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -323,6 +323,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+
+ void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
+ {
++ /*
++ * If we're about to block (most likely because we've just hit a
++ * WFI), we need to sync back the state of the GIC CPU interface
++ * so that we have the lastest PMR and group enables. This ensures
++ * that kvm_arch_vcpu_runnable has up-to-date data to decide
++ * whether we have pending interrupts.
++ */
++ preempt_disable();
++ kvm_vgic_vmcr_sync(vcpu);
++ preempt_enable();
++
+ kvm_vgic_v4_enable_doorbell(vcpu);
+ }
+
+diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
+index 6dd5ad706c92..96aab77d0471 100644
+--- a/virt/kvm/arm/vgic/vgic-v2.c
++++ b/virt/kvm/arm/vgic/vgic-v2.c
+@@ -484,10 +484,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
+ kvm_vgic_global_state.vctrl_base + GICH_APR);
+ }
+
+-void vgic_v2_put(struct kvm_vcpu *vcpu)
++void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+
+ cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
++}
++
++void vgic_v2_put(struct kvm_vcpu *vcpu)
++{
++ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
++
++ vgic_v2_vmcr_sync(vcpu);
+ cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
+ }
+diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
+index c2c9ce009f63..0c653a1e5215 100644
+--- a/virt/kvm/arm/vgic/vgic-v3.c
++++ b/virt/kvm/arm/vgic/vgic-v3.c
+@@ -662,12 +662,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
+ __vgic_v3_activate_traps(vcpu);
+ }
+
+-void vgic_v3_put(struct kvm_vcpu *vcpu)
++void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+
+ if (likely(cpu_if->vgic_sre))
+ cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
++}
++
++void vgic_v3_put(struct kvm_vcpu *vcpu)
++{
++ vgic_v3_vmcr_sync(vcpu);
+
+ kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
+
+diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
+index 04786c8ec77e..13d4b38a94ec 100644
+--- a/virt/kvm/arm/vgic/vgic.c
++++ b/virt/kvm/arm/vgic/vgic.c
+@@ -919,6 +919,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
+ vgic_v3_put(vcpu);
+ }
+
++void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
++{
++ if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
++ return;
++
++ if (kvm_vgic_global_state.type == VGIC_V2)
++ vgic_v2_vmcr_sync(vcpu);
++ else
++ vgic_v3_vmcr_sync(vcpu);
++}
++
+ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
+index 57205beaa981..11adbdac1d56 100644
+--- a/virt/kvm/arm/vgic/vgic.h
++++ b/virt/kvm/arm/vgic/vgic.h
+@@ -193,6 +193,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
+ void vgic_v2_init_lrs(void);
+ void vgic_v2_load(struct kvm_vcpu *vcpu);
+ void vgic_v2_put(struct kvm_vcpu *vcpu);
++void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
+
+ void vgic_v2_save_state(struct kvm_vcpu *vcpu);
+ void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+@@ -223,6 +224,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
+
+ void vgic_v3_load(struct kvm_vcpu *vcpu);
+ void vgic_v3_put(struct kvm_vcpu *vcpu);
++void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
+
+ bool vgic_has_its(struct kvm *kvm);
+ int kvm_vgic_register_its_device(void);
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index e629766f0ec8..7e0d18ac62bf 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2475,6 +2475,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
+ #endif
+ }
+
++/*
++ * Unlike kvm_arch_vcpu_runnable, this function is called outside
++ * a vcpu_load/vcpu_put pair. However, for most architectures
++ * kvm_arch_vcpu_runnable does not require vcpu_load.
++ */
++bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
++{
++ return kvm_arch_vcpu_runnable(vcpu);
++}
++
++static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
++{
++ if (kvm_arch_dy_runnable(vcpu))
++ return true;
++
++#ifdef CONFIG_KVM_ASYNC_PF
++ if (!list_empty_careful(&vcpu->async_pf.done))
++ return true;
++#endif
++
++ return false;
++}
++
+ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
+ {
+ struct kvm *kvm = me->kvm;
+@@ -2504,7 +2527,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
+ continue;
+ if (vcpu == me)
+ continue;
+- if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
++ if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
+ continue;
+ if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
+ continue;
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-08-09 17:47 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-08-09 17:47 UTC (permalink / raw
To: gentoo-commits
commit: 387de4a9273f6e5a4bc69bbc9bcf5d4f4bdce3ea
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Aug 9 17:47:28 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Aug 9 17:47:28 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=387de4a9
Linux patch 5.2.8
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1007_linux-5.2.8.patch | 2838 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 2842 insertions(+)
diff --git a/0000_README b/0000_README
index 139084e..6e8d29d 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch: 1006_linux-5.2.7.patch
From: https://www.kernel.org
Desc: Linux 5.2.7
+Patch: 1007_linux-5.2.8.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.8
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1007_linux-5.2.8.patch b/1007_linux-5.2.8.patch
new file mode 100644
index 0000000..25fd638
--- /dev/null
+++ b/1007_linux-5.2.8.patch
@@ -0,0 +1,2838 @@
+diff --git a/Makefile b/Makefile
+index 359a6b49e576..bad87c4c8117 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
+index 302cf0ba1600..8c7a996d1f16 100644
+--- a/drivers/atm/iphase.c
++++ b/drivers/atm/iphase.c
+@@ -63,6 +63,7 @@
+ #include <asm/byteorder.h>
+ #include <linux/vmalloc.h>
+ #include <linux/jiffies.h>
++#include <linux/nospec.h>
+ #include "iphase.h"
+ #include "suni.h"
+ #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
+@@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
+ }
+ if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
+ board = ia_cmds.status;
+- if ((board < 0) || (board > iadev_count))
+- board = 0;
++
++ if ((board < 0) || (board > iadev_count))
++ board = 0;
++ board = array_index_nospec(board, iadev_count + 1);
++
+ iadev = ia_dev[board];
+ switch (ia_cmds.cmd) {
+ case MEMDUMP:
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 1dc8d03ff127..ee6fa75d65a2 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -762,7 +762,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
+ }
+
+ if (bdb->version >= 226) {
+- u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time;
++ u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
+
+ wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
+ switch (wakeup_time) {
+diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
+index fdbbb9a53804..796c070bbe6f 100644
+--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
++++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
+@@ -772,13 +772,13 @@ struct psr_table {
+ /* TP wake up time in multiple of 100 */
+ u16 tp1_wakeup_time;
+ u16 tp2_tp3_wakeup_time;
+-
+- /* PSR2 TP2/TP3 wakeup time for 16 panels */
+- u32 psr2_tp2_tp3_wakeup_time;
+ } __packed;
+
+ struct bdb_psr {
+ struct psr_table psr_table[16];
++
++ /* PSR2 TP2/TP3 wakeup time for 16 panels */
++ u32 psr2_tp2_tp3_wakeup_time;
+ } __packed;
+
+ /*
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index bfc584ada4eb..34a812025b94 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -568,6 +568,7 @@
+ #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
+ #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
+ #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
++#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
+
+ #define USB_VENDOR_ID_HUION 0x256c
+ #define USB_DEVICE_ID_HUION_TABLET 0x006e
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 1549c7a2f04c..5b669f7d653f 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -91,6 +91,7 @@ static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
++ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 489436503e49..926c597f5f46 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -533,14 +533,14 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
+ */
+ buttons = (data[4] << 1) | (data[3] & 0x01);
+ } else if (features->type == CINTIQ_COMPANION_2) {
+- /* d-pad right -> data[4] & 0x10
+- * d-pad up -> data[4] & 0x20
+- * d-pad left -> data[4] & 0x40
+- * d-pad down -> data[4] & 0x80
+- * d-pad center -> data[3] & 0x01
++ /* d-pad right -> data[2] & 0x10
++ * d-pad up -> data[2] & 0x20
++ * d-pad left -> data[2] & 0x40
++ * d-pad down -> data[2] & 0x80
++ * d-pad center -> data[1] & 0x01
+ */
+ buttons = ((data[2] >> 4) << 7) |
+- ((data[1] & 0x04) << 6) |
++ ((data[1] & 0x04) << 4) |
+ ((data[2] & 0x0F) << 2) |
+ (data[1] & 0x03);
+ } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index bf39fc83d577..4039a9599d79 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -1934,8 +1934,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+ }
+
+ /* select a non-FCoE queue */
+- return netdev_pick_tx(dev, skb, NULL) %
+- (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
++ return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
+ }
+
+ void bnx2x_set_num_queues(struct bnx2x *bp)
+diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
+index ee7857298361..aca878a3f81f 100644
+--- a/drivers/net/ethernet/marvell/mvmdio.c
++++ b/drivers/net/ethernet/marvell/mvmdio.c
+@@ -319,15 +319,31 @@ static int orion_mdio_probe(struct platform_device *pdev)
+
+ init_waitqueue_head(&dev->smi_busy_wait);
+
+- for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+- dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
+- if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
++ if (pdev->dev.of_node) {
++ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
++ dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
++ if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
++ ret = -EPROBE_DEFER;
++ goto out_clk;
++ }
++ if (IS_ERR(dev->clk[i]))
++ break;
++ clk_prepare_enable(dev->clk[i]);
++ }
++
++ if (!IS_ERR(of_clk_get(pdev->dev.of_node,
++ ARRAY_SIZE(dev->clk))))
++ dev_warn(&pdev->dev,
++ "unsupported number of clocks, limiting to the first "
++ __stringify(ARRAY_SIZE(dev->clk)) "\n");
++ } else {
++ dev->clk[0] = clk_get(&pdev->dev, NULL);
++ if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_clk;
+ }
+- if (IS_ERR(dev->clk[i]))
+- break;
+- clk_prepare_enable(dev->clk[i]);
++ if (!IS_ERR(dev->clk[0]))
++ clk_prepare_enable(dev->clk[0]);
+ }
+
+ dev->err_interrupt = platform_get_irq(pdev, 0);
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+index d8e5241097a9..50ed1bdb632d 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -3609,6 +3609,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
+ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
+ {
+ struct mvpp2_port *port = netdev_priv(dev);
++ bool running = netif_running(dev);
+ int err;
+
+ if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
+@@ -3617,40 +3618,24 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
+ mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
+ }
+
+- if (!netif_running(dev)) {
+- err = mvpp2_bm_update_mtu(dev, mtu);
+- if (!err) {
+- port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+- return 0;
+- }
+-
+- /* Reconfigure BM to the original MTU */
+- err = mvpp2_bm_update_mtu(dev, dev->mtu);
+- if (err)
+- goto log_error;
+- }
+-
+- mvpp2_stop_dev(port);
++ if (running)
++ mvpp2_stop_dev(port);
+
+ err = mvpp2_bm_update_mtu(dev, mtu);
+- if (!err) {
++ if (err) {
++ netdev_err(dev, "failed to change MTU\n");
++ /* Reconfigure BM to the original MTU */
++ mvpp2_bm_update_mtu(dev, dev->mtu);
++ } else {
+ port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+- goto out_start;
+ }
+
+- /* Reconfigure BM to the original MTU */
+- err = mvpp2_bm_update_mtu(dev, dev->mtu);
+- if (err)
+- goto log_error;
+-
+-out_start:
+- mvpp2_start_dev(port);
+- mvpp2_egress_enable(port);
+- mvpp2_ingress_enable(port);
++ if (running) {
++ mvpp2_start_dev(port);
++ mvpp2_egress_enable(port);
++ mvpp2_ingress_enable(port);
++ }
+
+- return 0;
+-log_error:
+- netdev_err(dev, "failed to change MTU\n");
+ return err;
+ }
+
+@@ -5609,9 +5594,6 @@ static int mvpp2_remove(struct platform_device *pdev)
+
+ mvpp2_dbgfs_cleanup(priv);
+
+- flush_workqueue(priv->stats_queue);
+- destroy_workqueue(priv->stats_queue);
+-
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ if (priv->port_list[i]) {
+ mutex_destroy(&priv->port_list[i]->gather_stats_lock);
+@@ -5620,6 +5602,8 @@ static int mvpp2_remove(struct platform_device *pdev)
+ i++;
+ }
+
++ destroy_workqueue(priv->stats_queue);
++
+ for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+ struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+index f6b1da99e6c2..ba5f46da1c5c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+@@ -213,7 +213,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev)
+ struct mlx5_interface *intf;
+
+ mutex_lock(&mlx5_intf_mutex);
+- list_for_each_entry(intf, &intf_list, list)
++ list_for_each_entry_reverse(intf, &intf_list, list)
+ mlx5_remove_device(intf, priv);
+ list_del(&priv->dev_list);
+ mutex_unlock(&mlx5_intf_mutex);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+index d5e5afbdca6d..f777994f3005 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+@@ -78,9 +78,10 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
+ };
+
+ static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
+- const u32 **arr, u32 *size)
++ const u32 **arr, u32 *size,
++ bool force_legacy)
+ {
+- bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
++ bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+
+ *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
+ ARRAY_SIZE(mlx5e_link_speed);
+@@ -152,7 +153,8 @@ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
+ sizeof(out), MLX5_REG_PTYS, 0, 1);
+ }
+
+-u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
++u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
++ bool force_legacy)
+ {
+ unsigned long temp = eth_proto_oper;
+ const u32 *table;
+@@ -160,7 +162,7 @@ u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
+ u32 max_size;
+ int i;
+
+- mlx5e_port_get_speed_arr(mdev, &table, &max_size);
++ mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ i = find_first_bit(&temp, max_size);
+ if (i < max_size)
+ speed = table[i];
+@@ -170,6 +172,7 @@ u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
+ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+ {
+ struct mlx5e_port_eth_proto eproto;
++ bool force_legacy = false;
+ bool ext;
+ int err;
+
+@@ -177,8 +180,13 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+ err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
+ if (err)
+ goto out;
+-
+- *speed = mlx5e_port_ptys2speed(mdev, eproto.oper);
++ if (ext && !eproto.admin) {
++ force_legacy = true;
++ err = mlx5_port_query_eth_proto(mdev, 1, false, &eproto);
++ if (err)
++ goto out;
++ }
++ *speed = mlx5e_port_ptys2speed(mdev, eproto.oper, force_legacy);
+ if (!(*speed))
+ err = -EINVAL;
+
+@@ -201,7 +209,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+ if (err)
+ return err;
+
+- mlx5e_port_get_speed_arr(mdev, &table, &max_size);
++ mlx5e_port_get_speed_arr(mdev, &table, &max_size, false);
+ for (i = 0; i < max_size; ++i)
+ if (eproto.cap & MLX5E_PROT_MASK(i))
+ max_speed = max(max_speed, table[i]);
+@@ -210,14 +218,15 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+ return 0;
+ }
+
+-u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed)
++u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
++ bool force_legacy)
+ {
+ u32 link_modes = 0;
+ const u32 *table;
+ u32 max_size;
+ int i;
+
+- mlx5e_port_get_speed_arr(mdev, &table, &max_size);
++ mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
+ for (i = 0; i < max_size; ++i) {
+ if (table[i] == speed)
+ link_modes |= MLX5E_PROT_MASK(i);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+index 70f536ec51c4..4a7f4497692b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+@@ -48,10 +48,12 @@ void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
+ u8 *an_disable_cap, u8 *an_disable_admin);
+ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
+ u32 proto_admin, bool ext);
+-u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper);
++u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
++ bool force_legacy);
+ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+-u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed);
++u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
++ bool force_legacy);
+
+ int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
+ int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index dd764e0471f2..f637d81f08bc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -764,7 +764,7 @@ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings
+ }
+
+ static void get_speed_duplex(struct net_device *netdev,
+- u32 eth_proto_oper,
++ u32 eth_proto_oper, bool force_legacy,
+ struct ethtool_link_ksettings *link_ksettings)
+ {
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+@@ -774,7 +774,7 @@ static void get_speed_duplex(struct net_device *netdev,
+ if (!netif_carrier_ok(netdev))
+ goto out;
+
+- speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper);
++ speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
+ if (!speed) {
+ speed = SPEED_UNKNOWN;
+ goto out;
+@@ -893,8 +893,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
+ /* Fields: eth_proto_admin and ext_eth_proto_admin are
+ * mutually exclusive. Hence try reading legacy advertising
+ * when extended advertising is zero.
+- * admin_ext indicates how eth_proto_admin should be
+- * interpreted
++ * admin_ext indicates which proto_admin (ext vs. legacy)
++ * should be read and interpreted
+ */
+ admin_ext = ext;
+ if (ext && !eth_proto_admin) {
+@@ -903,7 +903,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
+ admin_ext = false;
+ }
+
+- eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
++ eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, admin_ext,
+ eth_proto_oper);
+ eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+ an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
+@@ -918,7 +918,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
+ get_supported(mdev, eth_proto_cap, link_ksettings);
+ get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
+ admin_ext);
+- get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
++ get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
++ link_ksettings);
+
+ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+
+@@ -995,45 +996,69 @@ static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes)
+ return ptys_modes;
+ }
+
++static bool ext_link_mode_requested(const unsigned long *adver)
++{
++#define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
++ int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT;
++ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
++
++ bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size);
++ return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
++}
++
++static bool ext_speed_requested(u32 speed)
++{
++#define MLX5E_MAX_PTYS_LEGACY_SPEED 100000
++ return !!(speed > MLX5E_MAX_PTYS_LEGACY_SPEED);
++}
++
++static bool ext_requested(u8 autoneg, const unsigned long *adver, u32 speed)
++{
++ bool ext_link_mode = ext_link_mode_requested(adver);
++ bool ext_speed = ext_speed_requested(speed);
++
++ return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_speed;
++}
++
+ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
+ const struct ethtool_link_ksettings *link_ksettings)
+ {
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_port_eth_proto eproto;
++ const unsigned long *adver;
+ bool an_changes = false;
+ u8 an_disable_admin;
+ bool ext_supported;
+- bool ext_requested;
+ u8 an_disable_cap;
+ bool an_disable;
+ u32 link_modes;
+ u8 an_status;
++ u8 autoneg;
+ u32 speed;
++ bool ext;
+ int err;
+
+ u32 (*ethtool2ptys_adver_func)(const unsigned long *adver);
+
+-#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
++ adver = link_ksettings->link_modes.advertising;
++ autoneg = link_ksettings->base.autoneg;
++ speed = link_ksettings->base.speed;
+
+- ext_requested = !!(link_ksettings->link_modes.advertising[0] >
+- MLX5E_PTYS_EXT ||
+- link_ksettings->link_modes.advertising[1]);
++ ext = ext_requested(autoneg, adver, speed),
+ ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+- ext_requested &= ext_supported;
++ if (!ext_supported && ext)
++ return -EOPNOTSUPP;
+
+- speed = link_ksettings->base.speed;
+- ethtool2ptys_adver_func = ext_requested ?
+- mlx5e_ethtool2ptys_ext_adver_link :
++ ethtool2ptys_adver_func = ext ? mlx5e_ethtool2ptys_ext_adver_link :
+ mlx5e_ethtool2ptys_adver_link;
+- err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
++ err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
+ if (err) {
+ netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+- link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
+- ethtool2ptys_adver_func(link_ksettings->link_modes.advertising) :
+- mlx5e_port_speed2linkmodes(mdev, speed);
++ link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
++ mlx5e_port_speed2linkmodes(mdev, speed, !ext);
+
+ link_modes = link_modes & eproto.cap;
+ if (!link_modes) {
+@@ -1046,14 +1071,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
+ mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap,
+ &an_disable_admin);
+
+- an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE;
++ an_disable = autoneg == AUTONEG_DISABLE;
+ an_changes = ((!an_disable && an_disable_admin) ||
+ (an_disable && !an_disable_admin));
+
+ if (!an_changes && link_modes == eproto.admin)
+ goto out;
+
+- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
++ mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
+ mlx5_toggle_port_link(mdev);
+
+ out:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index a44c24280128..882d26b8095d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -340,12 +340,11 @@ static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
+
+ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
+ {
+- struct mlx5e_wqe_frag_info next_frag, *prev;
++ struct mlx5e_wqe_frag_info next_frag = {};
++ struct mlx5e_wqe_frag_info *prev = NULL;
+ int i;
+
+ next_frag.di = &rq->wqe.di[0];
+- next_frag.offset = 0;
+- prev = NULL;
+
+ for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
+ struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index e40c60d1631f..ee95f96ead4e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -1210,13 +1210,13 @@ static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
+ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
+ {
+ struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
+- u64 bytes, packets, lastuse = 0;
+ struct mlx5e_tc_flow *flow;
+ struct mlx5e_encap_entry *e;
+ struct mlx5_fc *counter;
+ struct neigh_table *tbl;
+ bool neigh_used = false;
+ struct neighbour *n;
++ u64 lastuse;
+
+ if (m_neigh->family == AF_INET)
+ tbl = &arp_tbl;
+@@ -1236,7 +1236,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
+ encaps[efi->index]);
+ if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ counter = mlx5e_tc_get_counter(flow);
+- mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
++ lastuse = mlx5_fc_query_lastuse(counter);
+ if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
+ neigh_used = true;
+ break;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+index a08c3d09a50f..2664a05eee00 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+@@ -68,7 +68,7 @@ enum fs_flow_table_type {
+ FS_FT_SNIFFER_RX = 0X5,
+ FS_FT_SNIFFER_TX = 0X6,
+ FS_FT_RDMA_RX = 0X7,
+- FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
++ FS_FT_MAX_TYPE = FS_FT_RDMA_RX,
+ };
+
+ enum fs_flow_table_op_mod {
+@@ -274,7 +274,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
+ (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
+ (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \
+ (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
+- (BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\
++ (type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
++ (BUILD_BUG_ON_ZERO(FS_FT_RDMA_RX != FS_FT_MAX_TYPE))\
+ )
+
+ #endif
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+index c6c28f56aa29..add9db67028f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+@@ -367,6 +367,11 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
+ }
+ EXPORT_SYMBOL(mlx5_fc_query);
+
++u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
++{
++ return counter->cache.lastuse;
++}
++
+ void mlx5_fc_query_cached(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse)
+ {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 23204356ad88..d51442e63aba 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -5989,7 +5989,7 @@ static int __init mlxsw_sp_module_init(void)
+ return 0;
+
+ err_sp2_pci_driver_register:
+- mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
++ mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
+ err_sp1_pci_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
+ err_sp2_core_driver_register:
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+index 1537f70bc26d..888ba4300bcc 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+@@ -437,8 +437,8 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
+ MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
+ };
+
+-#define MLXSW_SP2_SB_PR_INGRESS_SIZE 38128752
+-#define MLXSW_SP2_SB_PR_EGRESS_SIZE 38128752
++#define MLXSW_SP2_SB_PR_INGRESS_SIZE 35297568
++#define MLXSW_SP2_SB_PR_EGRESS_SIZE 35297568
+ #define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
+
+ /* Order according to mlxsw_sp2_sb_pool_dess */
+diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
+index 02ad11e0b0d8..58e76e7cb0d6 100644
+--- a/drivers/net/ethernet/mscc/ocelot.c
++++ b/drivers/net/ethernet/mscc/ocelot.c
+@@ -1797,6 +1797,7 @@ EXPORT_SYMBOL(ocelot_init);
+
+ void ocelot_deinit(struct ocelot *ocelot)
+ {
++ cancel_delayed_work(&ocelot->stats_work);
+ destroy_workqueue(ocelot->stats_queue);
+ mutex_destroy(&ocelot->stats_lock);
+ }
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+index 4bf20d0651c4..90ad5694e2af 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+@@ -51,7 +51,7 @@ struct rmnet_map_dl_csum_trailer {
+ struct rmnet_map_ul_csum_header {
+ __be16 csum_start_offset;
+ u16 csum_insert_offset:14;
+- u16 udp_ip4_ind:1;
++ u16 udp_ind:1;
+ u16 csum_enabled:1;
+ } __aligned(1);
+
+diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+index 60189923737a..21d38167f961 100644
+--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+@@ -206,9 +206,9 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr,
+ ul_header->csum_insert_offset = skb->csum_offset;
+ ul_header->csum_enabled = 1;
+ if (ip4h->protocol == IPPROTO_UDP)
+- ul_header->udp_ip4_ind = 1;
++ ul_header->udp_ind = 1;
+ else
+- ul_header->udp_ip4_ind = 0;
++ ul_header->udp_ind = 0;
+
+ /* Changing remaining fields to network order */
+ hdr++;
+@@ -239,6 +239,7 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+ {
++ struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
+ __be16 *hdr = (__be16 *)ul_header, offset;
+
+ offset = htons((__force u16)(skb_transport_header(skb) -
+@@ -246,7 +247,11 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+ ul_header->csum_start_offset = offset;
+ ul_header->csum_insert_offset = skb->csum_offset;
+ ul_header->csum_enabled = 1;
+- ul_header->udp_ip4_ind = 0;
++
++ if (ip6h->nexthdr == IPPROTO_UDP)
++ ul_header->udp_ind = 1;
++ else
++ ul_header->udp_ind = 0;
+
+ /* Changing remaining fields to network order */
+ hdr++;
+@@ -419,7 +424,7 @@ sw_csum:
+ ul_header->csum_start_offset = 0;
+ ul_header->csum_insert_offset = 0;
+ ul_header->csum_enabled = 0;
+- ul_header->udp_ip4_ind = 0;
++ ul_header->udp_ind = 0;
+
+ priv->stats.csum_sw++;
+ }
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 96637fcbe65d..36261b2959b4 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -7050,13 +7050,18 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
+ {
+ unsigned int flags;
+
+- if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
++ switch (tp->mac_version) {
++ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
+ rtl_unlock_config_regs(tp);
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
+ rtl_lock_config_regs(tp);
++ /* fall through */
++ case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
+ flags = PCI_IRQ_LEGACY;
+- } else {
++ break;
++ default:
+ flags = PCI_IRQ_ALL_TYPES;
++ break;
+ }
+
+ return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
+diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
+index 3e5bc1fc3c46..c245a0f15066 100644
+--- a/drivers/net/ethernet/rocker/rocker_main.c
++++ b/drivers/net/ethernet/rocker/rocker_main.c
+@@ -2208,6 +2208,7 @@ static int rocker_router_fib_event(struct notifier_block *nb,
+
+ if (fen_info->fi->fib_nh_is_v6) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
++ kfree(fib_work);
+ return notifier_from_errno(-EINVAL);
+ }
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 932e54e25b71..b14f46a57154 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -4374,8 +4374,9 @@ int stmmac_dvr_probe(struct device *device,
+ NAPI_POLL_WEIGHT);
+ }
+ if (queue < priv->plat->tx_queues_to_use) {
+- netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
+- NAPI_POLL_WEIGHT);
++ netif_tx_napi_add(ndev, &ch->tx_napi,
++ stmmac_napi_poll_tx,
++ NAPI_POLL_WEIGHT);
+ }
+ }
+
+diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
+index 3ffe46df249e..7c5265fd2b94 100644
+--- a/drivers/net/phy/fixed_phy.c
++++ b/drivers/net/phy/fixed_phy.c
+@@ -216,8 +216,10 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
+ if (IS_ERR(gpiod)) {
+ if (PTR_ERR(gpiod) == -EPROBE_DEFER)
+ return gpiod;
+- pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
+- fixed_link_node);
++
++ if (PTR_ERR(gpiod) != -ENOENT)
++ pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
++ fixed_link_node);
+ gpiod = NULL;
+ }
+
+diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
+index 28676af97b42..645d354ffb48 100644
+--- a/drivers/net/phy/mscc.c
++++ b/drivers/net/phy/mscc.c
+@@ -2226,8 +2226,8 @@ static int vsc8514_probe(struct phy_device *phydev)
+ vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
+ vsc8531->hw_stats = vsc85xx_hw_stats;
+ vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
+- vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+- sizeof(u64), GFP_KERNEL);
++ vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
++ sizeof(u64), GFP_KERNEL);
+ if (!vsc8531->stats)
+ return -ENOMEM;
+
+@@ -2251,8 +2251,8 @@ static int vsc8574_probe(struct phy_device *phydev)
+ vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
+ vsc8531->hw_stats = vsc8584_hw_stats;
+ vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
+- vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+- sizeof(u64), GFP_KERNEL);
++ vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
++ sizeof(u64), GFP_KERNEL);
+ if (!vsc8531->stats)
+ return -ENOMEM;
+
+@@ -2281,8 +2281,8 @@ static int vsc8584_probe(struct phy_device *phydev)
+ vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
+ vsc8531->hw_stats = vsc8584_hw_stats;
+ vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
+- vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+- sizeof(u64), GFP_KERNEL);
++ vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
++ sizeof(u64), GFP_KERNEL);
+ if (!vsc8531->stats)
+ return -ENOMEM;
+
+@@ -2311,8 +2311,8 @@ static int vsc85xx_probe(struct phy_device *phydev)
+ vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
+ vsc8531->hw_stats = vsc85xx_hw_stats;
+ vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
+- vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+- sizeof(u64), GFP_KERNEL);
++ vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
++ sizeof(u64), GFP_KERNEL);
+ if (!vsc8531->stats)
+ return -ENOMEM;
+
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index a3f8740c6163..ffa402732aea 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1730,6 +1730,12 @@ done:
+ phydev->link = status & BMSR_LSTATUS ? 1 : 0;
+ phydev->autoneg_complete = status & BMSR_ANEGCOMPLETE ? 1 : 0;
+
++ /* Consider the case that autoneg was started and "aneg complete"
++ * bit has been reset, but "link up" bit not yet.
++ */
++ if (phydev->autoneg == AUTONEG_ENABLE && !phydev->autoneg_complete)
++ phydev->link = 0;
++
+ return 0;
+ }
+ EXPORT_SYMBOL(genphy_update_link);
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 4c0616ba314d..c45ee6e3fe01 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -195,6 +195,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
+ pl->supported, true);
+ linkmode_zero(pl->supported);
+ phylink_set(pl->supported, MII);
++ phylink_set(pl->supported, Pause);
++ phylink_set(pl->supported, Asym_Pause);
+ if (s) {
+ __set_bit(s->bit, pl->supported);
+ } else {
+@@ -912,10 +914,10 @@ void phylink_start(struct phylink *pl)
+
+ if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
+ mod_timer(&pl->link_poll, jiffies + HZ);
+- if (pl->sfp_bus)
+- sfp_upstream_start(pl->sfp_bus);
+ if (pl->phydev)
+ phy_start(pl->phydev);
++ if (pl->sfp_bus)
++ sfp_upstream_start(pl->sfp_bus);
+ }
+ EXPORT_SYMBOL_GPL(phylink_start);
+
+@@ -932,10 +934,10 @@ void phylink_stop(struct phylink *pl)
+ {
+ ASSERT_RTNL();
+
+- if (pl->phydev)
+- phy_stop(pl->phydev);
+ if (pl->sfp_bus)
+ sfp_upstream_stop(pl->sfp_bus);
++ if (pl->phydev)
++ phy_stop(pl->phydev);
+ if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
+ del_timer_sync(&pl->link_poll);
+
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 1d902ecb4aa8..a44dd3c8af63 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -1115,6 +1115,9 @@ static const struct proto_ops pppoe_ops = {
+ .recvmsg = pppoe_recvmsg,
+ .mmap = sock_no_mmap,
+ .ioctl = pppox_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = pppox_compat_ioctl,
++#endif
+ };
+
+ static const struct pppox_proto pppoe_proto = {
+diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
+index 5ef422a43d70..08364f10a43f 100644
+--- a/drivers/net/ppp/pppox.c
++++ b/drivers/net/ppp/pppox.c
+@@ -17,6 +17,7 @@
+ #include <linux/string.h>
+ #include <linux/module.h>
+ #include <linux/kernel.h>
++#include <linux/compat.h>
+ #include <linux/errno.h>
+ #include <linux/netdevice.h>
+ #include <linux/net.h>
+@@ -98,6 +99,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+
+ EXPORT_SYMBOL(pppox_ioctl);
+
++#ifdef CONFIG_COMPAT
++int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
++{
++ if (cmd == PPPOEIOCSFWD32)
++ cmd = PPPOEIOCSFWD;
++
++ return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
++}
++
++EXPORT_SYMBOL(pppox_compat_ioctl);
++#endif
++
+ static int pppox_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+ {
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index a8e52c8e4128..734de7de03f7 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -623,6 +623,9 @@ static const struct proto_ops pptp_ops = {
+ .recvmsg = sock_no_recvmsg,
+ .mmap = sock_no_mmap,
+ .ioctl = pppox_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = pppox_compat_ioctl,
++#endif
+ };
+
+ static const struct pppox_proto pppox_pptp_proto = {
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index d7c55e0fa8f4..192ac47fd055 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1600,7 +1600,8 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
+ return true;
+ }
+
+-static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf,
++static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
++ struct page_frag *alloc_frag, char *buf,
+ int buflen, int len, int pad)
+ {
+ struct sk_buff *skb = build_skb(buf, buflen);
+@@ -1610,6 +1611,7 @@ static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf,
+
+ skb_reserve(skb, pad);
+ skb_put(skb, len);
++ skb_set_owner_w(skb, tfile->socket.sk);
+
+ get_page(alloc_frag->page);
+ alloc_frag->offset += buflen;
+@@ -1687,7 +1689,8 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+ */
+ if (hdr->gso_type || !xdp_prog) {
+ *skb_xdp = 1;
+- return __tun_build_skb(alloc_frag, buf, buflen, len, pad);
++ return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
++ pad);
+ }
+
+ *skb_xdp = 0;
+@@ -1724,7 +1727,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+ rcu_read_unlock();
+ local_bh_enable();
+
+- return __tun_build_skb(alloc_frag, buf, buflen, len, pad);
++ return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
+
+ err_xdp:
+ put_page(alloc_frag->page);
+diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
+index e65d027b91fa..529be35ac178 100644
+--- a/drivers/nfc/nfcmrvl/main.c
++++ b/drivers/nfc/nfcmrvl/main.c
+@@ -244,7 +244,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
+ /* Reset possible fault of previous session */
+ clear_bit(NFCMRVL_PHY_ERROR, &priv->flags);
+
+- if (priv->config.reset_n_io) {
++ if (gpio_is_valid(priv->config.reset_n_io)) {
+ nfc_info(priv->dev, "reset the chip\n");
+ gpio_set_value(priv->config.reset_n_io, 0);
+ usleep_range(5000, 10000);
+@@ -255,7 +255,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
+
+ void nfcmrvl_chip_halt(struct nfcmrvl_private *priv)
+ {
+- if (priv->config.reset_n_io)
++ if (gpio_is_valid(priv->config.reset_n_io))
+ gpio_set_value(priv->config.reset_n_io, 0);
+ }
+
+diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
+index 9a22056e8d9e..e5a622ce4b95 100644
+--- a/drivers/nfc/nfcmrvl/uart.c
++++ b/drivers/nfc/nfcmrvl/uart.c
+@@ -26,7 +26,7 @@
+ static unsigned int hci_muxed;
+ static unsigned int flow_control;
+ static unsigned int break_control;
+-static unsigned int reset_n_io;
++static int reset_n_io = -EINVAL;
+
+ /*
+ ** NFCMRVL NCI OPS
+@@ -231,5 +231,5 @@ MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal.");
+ module_param(hci_muxed, uint, 0);
+ MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one.");
+
+-module_param(reset_n_io, uint, 0);
++module_param(reset_n_io, int, 0);
+ MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal.");
+diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
+index 945cc903d8f1..888e298f610b 100644
+--- a/drivers/nfc/nfcmrvl/usb.c
++++ b/drivers/nfc/nfcmrvl/usb.c
+@@ -305,6 +305,7 @@ static int nfcmrvl_probe(struct usb_interface *intf,
+
+ /* No configuration for USB */
+ memset(&config, 0, sizeof(config));
++ config.reset_n_io = -EINVAL;
+
+ nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
+
+diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
+index dfb93228d6a7..df41f3571dc9 100644
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -887,10 +887,12 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
+ do {
+ if (nvdimm_bus->probe_active == 0)
+ break;
+- nvdimm_bus_unlock(&nvdimm_bus->dev);
++ nvdimm_bus_unlock(dev);
++ device_unlock(dev);
+ wait_event(nvdimm_bus->wait,
+ nvdimm_bus->probe_active == 0);
+- nvdimm_bus_lock(&nvdimm_bus->dev);
++ device_lock(dev);
++ nvdimm_bus_lock(dev);
+ } while (true);
+ }
+
+@@ -973,20 +975,19 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ int read_only, unsigned int ioctl_cmd, unsigned long arg)
+ {
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
+- static char out_env[ND_CMD_MAX_ENVELOPE];
+- static char in_env[ND_CMD_MAX_ENVELOPE];
+ const struct nd_cmd_desc *desc = NULL;
+ unsigned int cmd = _IOC_NR(ioctl_cmd);
+ struct device *dev = &nvdimm_bus->dev;
+ void __user *p = (void __user *) arg;
++ char *out_env = NULL, *in_env = NULL;
+ const char *cmd_name, *dimm_name;
+ u32 in_len = 0, out_len = 0;
+ unsigned int func = cmd;
+ unsigned long cmd_mask;
+ struct nd_cmd_pkg pkg;
+ int rc, i, cmd_rc;
++ void *buf = NULL;
+ u64 buf_len = 0;
+- void *buf;
+
+ if (nvdimm) {
+ desc = nd_cmd_dimm_desc(cmd);
+@@ -1017,7 +1018,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ case ND_CMD_ARS_START:
+ case ND_CMD_CLEAR_ERROR:
+ case ND_CMD_CALL:
+- dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
++ dev_dbg(dev, "'%s' command while read-only.\n",
+ nvdimm ? nvdimm_cmd_name(cmd)
+ : nvdimm_bus_cmd_name(cmd));
+ return -EPERM;
+@@ -1026,6 +1027,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ }
+
+ /* process an input envelope */
++ in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
++ if (!in_env)
++ return -ENOMEM;
+ for (i = 0; i < desc->in_num; i++) {
+ u32 in_size, copy;
+
+@@ -1033,14 +1037,17 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ if (in_size == UINT_MAX) {
+ dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
+ __func__, dimm_name, cmd_name, i);
+- return -ENXIO;
++ rc = -ENXIO;
++ goto out;
+ }
+- if (in_len < sizeof(in_env))
+- copy = min_t(u32, sizeof(in_env) - in_len, in_size);
++ if (in_len < ND_CMD_MAX_ENVELOPE)
++ copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
+ else
+ copy = 0;
+- if (copy && copy_from_user(&in_env[in_len], p + in_len, copy))
+- return -EFAULT;
++ if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
++ rc = -EFAULT;
++ goto out;
++ }
+ in_len += in_size;
+ }
+
+@@ -1052,6 +1059,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ }
+
+ /* process an output envelope */
++ out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
++ if (!out_env) {
++ rc = -ENOMEM;
++ goto out;
++ }
++
+ for (i = 0; i < desc->out_num; i++) {
+ u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
+ (u32 *) in_env, (u32 *) out_env, 0);
+@@ -1060,15 +1073,18 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ if (out_size == UINT_MAX) {
+ dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
+ dimm_name, cmd_name, i);
+- return -EFAULT;
++ rc = -EFAULT;
++ goto out;
+ }
+- if (out_len < sizeof(out_env))
+- copy = min_t(u32, sizeof(out_env) - out_len, out_size);
++ if (out_len < ND_CMD_MAX_ENVELOPE)
++ copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
+ else
+ copy = 0;
+ if (copy && copy_from_user(&out_env[out_len],
+- p + in_len + out_len, copy))
+- return -EFAULT;
++ p + in_len + out_len, copy)) {
++ rc = -EFAULT;
++ goto out;
++ }
+ out_len += out_size;
+ }
+
+@@ -1076,19 +1092,23 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ if (buf_len > ND_IOCTL_MAX_BUFLEN) {
+ dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
+ cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
+- return -EINVAL;
++ rc = -EINVAL;
++ goto out;
+ }
+
+ buf = vmalloc(buf_len);
+- if (!buf)
+- return -ENOMEM;
++ if (!buf) {
++ rc = -ENOMEM;
++ goto out;
++ }
+
+ if (copy_from_user(buf, p, buf_len)) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+- nvdimm_bus_lock(&nvdimm_bus->dev);
++ device_lock(dev);
++ nvdimm_bus_lock(dev);
+ rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
+ if (rc)
+ goto out_unlock;
+@@ -1103,17 +1123,16 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
+ clear_err->cleared);
+ }
+- nvdimm_bus_unlock(&nvdimm_bus->dev);
+
+ if (copy_to_user(p, buf, buf_len))
+ rc = -EFAULT;
+
+- vfree(buf);
+- return rc;
+-
+- out_unlock:
+- nvdimm_bus_unlock(&nvdimm_bus->dev);
+- out:
++out_unlock:
++ nvdimm_bus_unlock(dev);
++ device_unlock(dev);
++out:
++ kfree(in_env);
++ kfree(out_env);
+ vfree(buf);
+ return rc;
+ }
+diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
+index 4fed9ce9c2fe..a15276cdec7d 100644
+--- a/drivers/nvdimm/region_devs.c
++++ b/drivers/nvdimm/region_devs.c
+@@ -422,10 +422,12 @@ static ssize_t available_size_show(struct device *dev,
+ * memory nvdimm_bus_lock() is dropped, but that's userspace's
+ * problem to not race itself.
+ */
++ device_lock(dev);
+ nvdimm_bus_lock(dev);
+ wait_nvdimm_bus_probe_idle(dev);
+ available = nd_region_available_dpa(nd_region);
+ nvdimm_bus_unlock(dev);
++ device_unlock(dev);
+
+ return sprintf(buf, "%llu\n", available);
+ }
+@@ -437,10 +439,12 @@ static ssize_t max_available_extent_show(struct device *dev,
+ struct nd_region *nd_region = to_nd_region(dev);
+ unsigned long long available = 0;
+
++ device_lock(dev);
+ nvdimm_bus_lock(dev);
+ wait_nvdimm_bus_probe_idle(dev);
+ available = nd_region_allocatable_dpa(nd_region);
+ nvdimm_bus_unlock(dev);
++ device_unlock(dev);
+
+ return sprintf(buf, "%llu\n", available);
+ }
+diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
+index 590ec8009f52..9e444b1846ce 100644
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -2005,7 +2005,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
+ */
+ static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
+ {
+- return (struct fcoe_rport *)(rdata + 1);
++ return container_of(rdata, struct fcoe_rport, rdata);
+ }
+
+ /**
+@@ -2269,7 +2269,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
+ */
+ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
+ struct sk_buff *skb,
+- struct fc_rport_priv *rdata)
++ struct fcoe_rport *frport)
+ {
+ struct fip_header *fiph;
+ struct fip_desc *desc = NULL;
+@@ -2277,16 +2277,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
+ struct fip_wwn_desc *wwn = NULL;
+ struct fip_vn_desc *vn = NULL;
+ struct fip_size_desc *size = NULL;
+- struct fcoe_rport *frport;
+ size_t rlen;
+ size_t dlen;
+ u32 desc_mask = 0;
+ u32 dtype;
+ u8 sub;
+
+- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
+- frport = fcoe_ctlr_rport(rdata);
+-
+ fiph = (struct fip_header *)skb->data;
+ frport->flags = ntohs(fiph->fip_flags);
+
+@@ -2349,15 +2345,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
+ if (dlen != sizeof(struct fip_wwn_desc))
+ goto len_err;
+ wwn = (struct fip_wwn_desc *)desc;
+- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
++ frport->rdata.ids.node_name =
++ get_unaligned_be64(&wwn->fd_wwn);
+ break;
+ case FIP_DT_VN_ID:
+ if (dlen != sizeof(struct fip_vn_desc))
+ goto len_err;
+ vn = (struct fip_vn_desc *)desc;
+ memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
+- rdata->ids.port_id = ntoh24(vn->fd_fc_id);
+- rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
++ frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
++ frport->rdata.ids.port_name =
++ get_unaligned_be64(&vn->fd_wwpn);
+ break;
+ case FIP_DT_FC4F:
+ if (dlen != sizeof(struct fip_fc4_feat))
+@@ -2738,10 +2736,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
+ {
+ struct fip_header *fiph;
+ enum fip_vn2vn_subcode sub;
+- struct {
+- struct fc_rport_priv rdata;
+- struct fcoe_rport frport;
+- } buf;
++ struct fcoe_rport frport = { };
+ int rc, vlan_id = 0;
+
+ fiph = (struct fip_header *)skb->data;
+@@ -2757,7 +2752,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
+ goto drop;
+ }
+
+- rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
++ rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
+ if (rc) {
+ LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
+ goto drop;
+@@ -2766,19 +2761,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
+ mutex_lock(&fip->ctlr_mutex);
+ switch (sub) {
+ case FIP_SC_VN_PROBE_REQ:
+- fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
++ fcoe_ctlr_vn_probe_req(fip, &frport.rdata);
+ break;
+ case FIP_SC_VN_PROBE_REP:
+- fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
++ fcoe_ctlr_vn_probe_reply(fip, &frport.rdata);
+ break;
+ case FIP_SC_VN_CLAIM_NOTIFY:
+- fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
++ fcoe_ctlr_vn_claim_notify(fip, &frport.rdata);
+ break;
+ case FIP_SC_VN_CLAIM_REP:
+- fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
++ fcoe_ctlr_vn_claim_resp(fip, &frport.rdata);
+ break;
+ case FIP_SC_VN_BEACON:
+- fcoe_ctlr_vn_beacon(fip, &buf.rdata);
++ fcoe_ctlr_vn_beacon(fip, &frport.rdata);
+ break;
+ default:
+ LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
+@@ -2802,22 +2797,18 @@ drop:
+ */
+ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
+ struct sk_buff *skb,
+- struct fc_rport_priv *rdata)
++ struct fcoe_rport *frport)
+ {
+ struct fip_header *fiph;
+ struct fip_desc *desc = NULL;
+ struct fip_mac_desc *macd = NULL;
+ struct fip_wwn_desc *wwn = NULL;
+- struct fcoe_rport *frport;
+ size_t rlen;
+ size_t dlen;
+ u32 desc_mask = 0;
+ u32 dtype;
+ u8 sub;
+
+- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
+- frport = fcoe_ctlr_rport(rdata);
+-
+ fiph = (struct fip_header *)skb->data;
+ frport->flags = ntohs(fiph->fip_flags);
+
+@@ -2871,7 +2862,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
+ if (dlen != sizeof(struct fip_wwn_desc))
+ goto len_err;
+ wwn = (struct fip_wwn_desc *)desc;
+- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
++ frport->rdata.ids.node_name =
++ get_unaligned_be64(&wwn->fd_wwn);
+ break;
+ default:
+ LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
+@@ -2982,22 +2974,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
+ {
+ struct fip_header *fiph;
+ enum fip_vlan_subcode sub;
+- struct {
+- struct fc_rport_priv rdata;
+- struct fcoe_rport frport;
+- } buf;
++ struct fcoe_rport frport = { };
+ int rc;
+
+ fiph = (struct fip_header *)skb->data;
+ sub = fiph->fip_subcode;
+- rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
++ rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
+ if (rc) {
+ LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
+ goto drop;
+ }
+ mutex_lock(&fip->ctlr_mutex);
+ if (sub == FIP_SC_VL_REQ)
+- fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
++ fcoe_ctlr_vlan_disc_reply(fip, &frport.rdata);
+ mutex_unlock(&fip->ctlr_mutex);
+
+ drop:
+diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
+index e0f3852fdad1..da6e97d8dc3b 100644
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -128,6 +128,7 @@ EXPORT_SYMBOL(fc_rport_lookup);
+ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
+ {
+ struct fc_rport_priv *rdata;
++ size_t rport_priv_size = sizeof(*rdata);
+
+ lockdep_assert_held(&lport->disc.disc_mutex);
+
+@@ -135,7 +136,9 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
+ if (rdata)
+ return rdata;
+
+- rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
++ if (lport->rport_priv_size > 0)
++ rport_priv_size = lport->rport_priv_size;
++ rdata = kzalloc(rport_priv_size, GFP_KERNEL);
+ if (!rdata)
+ return NULL;
+
+diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
+index 402c1efcd762..6435b8652159 100644
+--- a/drivers/spi/spi-bcm2835.c
++++ b/drivers/spi/spi-bcm2835.c
+@@ -764,7 +764,8 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
+ bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
+
+ /* handle all the 3-wire mode */
+- if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
++ if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
++ tfr->rx_buf != master->dummy_rx)
+ cs |= BCM2835_SPI_CS_REN;
+ else
+ cs &= ~BCM2835_SPI_CS_REN;
+diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
+index 6e30949d9f77..a7ec2d3dff92 100644
+--- a/fs/compat_ioctl.c
++++ b/fs/compat_ioctl.c
+@@ -638,9 +638,6 @@ COMPATIBLE_IOCTL(PPPIOCDISCONN)
+ COMPATIBLE_IOCTL(PPPIOCATTCHAN)
+ COMPATIBLE_IOCTL(PPPIOCGCHAN)
+ COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
+-/* PPPOX */
+-COMPATIBLE_IOCTL(PPPOEIOCSFWD)
+-COMPATIBLE_IOCTL(PPPOEIOCDFWD)
+ /* Big A */
+ /* sparc only */
+ /* Big Q for sound/OSS */
+diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
+index 8b728750a625..69e813bcb947 100644
+--- a/include/linux/if_pppox.h
++++ b/include/linux/if_pppox.h
+@@ -80,6 +80,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
+ extern void unregister_pppox_proto(int proto_num);
+ extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
+ extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
++extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
++
++#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
+
+ /* PPPoX socket states */
+ enum {
+diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
+index e690ba0f965c..70185079f83e 100644
+--- a/include/linux/mlx5/fs.h
++++ b/include/linux/mlx5/fs.h
+@@ -211,6 +211,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
+
+ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
+ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
++u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
+ void mlx5_fc_query_cached(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse);
+ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 7e42efa143a0..29b55f8cd7b3 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -5865,10 +5865,12 @@ struct mlx5_ifc_modify_cq_in_bits {
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+- u8 reserved_at_280[0x40];
++ u8 reserved_at_280[0x60];
+
+ u8 cq_umem_valid[0x1];
+- u8 reserved_at_2c1[0x5bf];
++ u8 reserved_at_2e1[0x1f];
++
++ u8 reserved_at_300[0x580];
+
+ u8 pas[0][0x40];
+ };
+diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
+index c50fb297e265..e89a922ee849 100644
+--- a/include/scsi/libfcoe.h
++++ b/include/scsi/libfcoe.h
+@@ -229,6 +229,7 @@ struct fcoe_fcf {
+ * @vn_mac: VN_Node assigned MAC address for data
+ */
+ struct fcoe_rport {
++ struct fc_rport_priv rdata;
+ unsigned long time;
+ u16 fcoe_len;
+ u16 flags;
+diff --git a/net/bridge/br.c b/net/bridge/br.c
+index d164f63a4345..8a8f9e5f264f 100644
+--- a/net/bridge/br.c
++++ b/net/bridge/br.c
+@@ -37,12 +37,15 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
+ int err;
+
+ if (dev->priv_flags & IFF_EBRIDGE) {
++ err = br_vlan_bridge_event(dev, event, ptr);
++ if (err)
++ return notifier_from_errno(err);
++
+ if (event == NETDEV_REGISTER) {
+ /* register of bridge completed, add sysfs entries */
+ br_sysfs_addbr(dev);
+ return NOTIFY_DONE;
+ }
+- br_vlan_bridge_event(dev, event, ptr);
+ }
+
+ /* not a port of a bridge */
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 3d8deac2353d..f8cac3702712 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1388,6 +1388,9 @@ br_multicast_leave_group(struct net_bridge *br,
+ if (!br_port_group_equal(p, port, src))
+ continue;
+
++ if (p->flags & MDB_PG_FLAGS_PERMANENT)
++ break;
++
+ rcu_assign_pointer(*pp, p->next);
+ hlist_del_init(&p->mglist);
+ del_timer(&p->timer);
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 159a0e2cb0f6..9564a953bdf9 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -893,8 +893,8 @@ int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
+ void br_vlan_get_stats(const struct net_bridge_vlan *v,
+ struct br_vlan_stats *stats);
+ void br_vlan_port_event(struct net_bridge_port *p, unsigned long event);
+-void br_vlan_bridge_event(struct net_device *dev, unsigned long event,
+- void *ptr);
++int br_vlan_bridge_event(struct net_device *dev, unsigned long event,
++ void *ptr);
+
+ static inline struct net_bridge_vlan_group *br_vlan_group(
+ const struct net_bridge *br)
+@@ -1084,9 +1084,10 @@ static inline void br_vlan_port_event(struct net_bridge_port *p,
+ {
+ }
+
+-static inline void br_vlan_bridge_event(struct net_device *dev,
+- unsigned long event, void *ptr)
++static inline int br_vlan_bridge_event(struct net_device *dev,
++ unsigned long event, void *ptr)
+ {
++ return 0;
+ }
+ #endif
+
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index f47f526b4f19..6b2c48b07e04 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -1043,7 +1043,6 @@ int br_vlan_init(struct net_bridge *br)
+ {
+ struct net_bridge_vlan_group *vg;
+ int ret = -ENOMEM;
+- bool changed;
+
+ vg = kzalloc(sizeof(*vg), GFP_KERNEL);
+ if (!vg)
+@@ -1058,17 +1057,10 @@ int br_vlan_init(struct net_bridge *br)
+ br->vlan_proto = htons(ETH_P_8021Q);
+ br->default_pvid = 1;
+ rcu_assign_pointer(br->vlgrp, vg);
+- ret = br_vlan_add(br, 1,
+- BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
+- BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
+- if (ret)
+- goto err_vlan_add;
+
+ out:
+ return ret;
+
+-err_vlan_add:
+- vlan_tunnel_deinit(vg);
+ err_tunnel_init:
+ rhashtable_destroy(&vg->vlan_hash);
+ err_rhtbl:
+@@ -1443,13 +1435,23 @@ static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
+ }
+
+ /* Must be protected by RTNL. */
+-void br_vlan_bridge_event(struct net_device *dev, unsigned long event,
+- void *ptr)
++int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
+ {
+ struct netdev_notifier_changeupper_info *info;
+- struct net_bridge *br;
++ struct net_bridge *br = netdev_priv(dev);
++ bool changed;
++ int ret = 0;
+
+ switch (event) {
++ case NETDEV_REGISTER:
++ ret = br_vlan_add(br, br->default_pvid,
++ BRIDGE_VLAN_INFO_PVID |
++ BRIDGE_VLAN_INFO_UNTAGGED |
++ BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
++ break;
++ case NETDEV_UNREGISTER:
++ br_vlan_delete(br, br->default_pvid);
++ break;
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+ br_vlan_upper_change(dev, info->upper_dev, info->linking);
+@@ -1457,12 +1459,13 @@ void br_vlan_bridge_event(struct net_device *dev, unsigned long event,
+
+ case NETDEV_CHANGE:
+ case NETDEV_UP:
+- br = netdev_priv(dev);
+ if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
+- return;
++ break;
+ br_vlan_link_state_change(dev, br);
+ break;
+ }
++
++ return ret;
+ }
+
+ /* Must be protected by RTNL. */
+diff --git a/net/core/dev.c b/net/core/dev.c
+index d6edd218babd..29fcff2c3d51 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4382,12 +4382,17 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+
++ /* check if bpf_xdp_adjust_head was used */
+ off = xdp->data - orig_data;
+- if (off > 0)
+- __skb_pull(skb, off);
+- else if (off < 0)
+- __skb_push(skb, -off);
+- skb->mac_header += off;
++ if (off) {
++ if (off > 0)
++ __skb_pull(skb, off);
++ else if (off < 0)
++ __skb_push(skb, -off);
++
++ skb->mac_header += off;
++ skb_reset_network_header(skb);
++ }
+
+ /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
+ * pckt.
+@@ -9711,6 +9716,8 @@ static void __net_exit default_device_exit(struct net *net)
+
+ /* Push remaining network devices to init_net */
+ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
++ if (__dev_get_by_name(&init_net, fb_name))
++ snprintf(fb_name, IFNAMSIZ, "dev%%d");
+ err = dev_change_net_namespace(dev, &init_net, fb_name);
+ if (err) {
+ pr_emerg("%s: failed to move %s to init_net: %d\n",
+diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
+index 43adfc1641ba..2f01cf6fa0de 100644
+--- a/net/ipv4/ipip.c
++++ b/net/ipv4/ipip.c
+@@ -275,6 +275,9 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
+ const struct iphdr *tiph = &tunnel->parms.iph;
+ u8 ipproto;
+
++ if (!pskb_inet_may_pull(skb))
++ goto tx_error;
++
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ ipproto = IPPROTO_IPIP;
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index c2049c72f3e5..dd2d0b963260 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -660,12 +660,13 @@ static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
+ struct flowi6 *fl6, __u8 *dsfield,
+ int *encap_limit)
+ {
+- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
++ struct ipv6hdr *ipv6h;
+ struct ip6_tnl *t = netdev_priv(dev);
+ __u16 offset;
+
+ offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+ /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
++ ipv6h = ipv6_hdr(skb);
+
+ if (offset > 0) {
+ struct ipv6_tlv_tnl_enc_lim *tel;
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index b80fde1bc005..d10a9e40729f 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1278,12 +1278,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+
+ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
++ dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
+
+ if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
+ return -1;
+
+- dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
+-
+ skb_set_inner_ipproto(skb, IPPROTO_IPIP);
+
+ err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+@@ -1367,12 +1366,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+
+ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
++ dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
+
+ if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
+ return -1;
+
+- dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
+-
+ skb_set_inner_ipproto(skb, IPPROTO_IPV6);
+
+ err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 1d0e5904dedf..c54cb59593ef 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -1681,6 +1681,9 @@ static const struct proto_ops pppol2tp_ops = {
+ .recvmsg = pppol2tp_recvmsg,
+ .mmap = sock_no_mmap,
+ .ioctl = pppox_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = pppox_compat_ioctl,
++#endif
+ };
+
+ static const struct pppox_proto pppol2tp_proto = {
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 06aac0aaae64..8dc6580e1787 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -1222,7 +1222,6 @@ static void ieee80211_if_setup(struct net_device *dev)
+ static void ieee80211_if_setup_no_queue(struct net_device *dev)
+ {
+ ieee80211_if_setup(dev);
+- dev->features |= NETIF_F_LLTX;
+ dev->priv_flags |= IFF_NO_QUEUE;
+ }
+
+diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
+index 8126b26f125e..fd1f7e799e23 100644
+--- a/net/sched/act_bpf.c
++++ b/net/sched/act_bpf.c
+@@ -285,6 +285,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
+ struct tcf_bpf *prog;
+ bool is_bpf, is_ebpf;
+ int ret, res = 0;
++ u32 index;
+
+ if (!nla)
+ return -EINVAL;
+@@ -298,13 +299,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
+ return -EINVAL;
+
+ parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
+-
+- ret = tcf_idr_check_alloc(tn, &parm->index, act, bind);
++ index = parm->index;
++ ret = tcf_idr_check_alloc(tn, &index, act, bind);
+ if (!ret) {
+- ret = tcf_idr_create(tn, parm->index, est, act,
++ ret = tcf_idr_create(tn, index, est, act,
+ &act_bpf_ops, bind, true);
+ if (ret < 0) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return ret;
+ }
+
+diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
+index ce36b0f7e1dc..32ac04d77a45 100644
+--- a/net/sched/act_connmark.c
++++ b/net/sched/act_connmark.c
+@@ -103,6 +103,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
+ struct tcf_connmark_info *ci;
+ struct tc_connmark *parm;
+ int ret = 0, err;
++ u32 index;
+
+ if (!nla)
+ return -EINVAL;
+@@ -116,13 +117,13 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
+ return -EINVAL;
+
+ parm = nla_data(tb[TCA_CONNMARK_PARMS]);
+-
+- ret = tcf_idr_check_alloc(tn, &parm->index, a, bind);
++ index = parm->index;
++ ret = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (!ret) {
+- ret = tcf_idr_create(tn, parm->index, est, a,
++ ret = tcf_idr_create(tn, index, est, a,
+ &act_connmark_ops, bind, false);
+ if (ret) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return ret;
+ }
+
+diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
+index 621fb22ce2a9..9b9288267a54 100644
+--- a/net/sched/act_csum.c
++++ b/net/sched/act_csum.c
+@@ -52,6 +52,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
+ struct tc_csum *parm;
+ struct tcf_csum *p;
+ int ret = 0, err;
++ u32 index;
+
+ if (nla == NULL)
+ return -EINVAL;
+@@ -64,13 +65,13 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
+ if (tb[TCA_CSUM_PARMS] == NULL)
+ return -EINVAL;
+ parm = nla_data(tb[TCA_CSUM_PARMS]);
+-
+- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
++ index = parm->index;
++ err = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (!err) {
+- ret = tcf_idr_create(tn, parm->index, est, a,
++ ret = tcf_idr_create(tn, index, est, a,
+ &act_csum_ops, bind, true);
+ if (ret) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return ret;
+ }
+ ret = ACT_P_CREATED;
+diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
+index b2380c5284e6..8f0140c6ca58 100644
+--- a/net/sched/act_gact.c
++++ b/net/sched/act_gact.c
+@@ -61,6 +61,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
+ struct tc_gact *parm;
+ struct tcf_gact *gact;
+ int ret = 0;
++ u32 index;
+ int err;
+ #ifdef CONFIG_GACT_PROB
+ struct tc_gact_p *p_parm = NULL;
+@@ -77,6 +78,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
+ if (tb[TCA_GACT_PARMS] == NULL)
+ return -EINVAL;
+ parm = nla_data(tb[TCA_GACT_PARMS]);
++ index = parm->index;
+
+ #ifndef CONFIG_GACT_PROB
+ if (tb[TCA_GACT_PROB] != NULL)
+@@ -94,12 +96,12 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
+ }
+ #endif
+
+- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
++ err = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (!err) {
+- ret = tcf_idr_create(tn, parm->index, est, a,
++ ret = tcf_idr_create(tn, index, est, a,
+ &act_gact_ops, bind, true);
+ if (ret) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return ret;
+ }
+ ret = ACT_P_CREATED;
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
+index 41d5398dd2f2..92ee853d43e6 100644
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -479,8 +479,14 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ u8 *saddr = NULL;
+ bool exists = false;
+ int ret = 0;
++ u32 index;
+ int err;
+
++ if (!nla) {
++ NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed");
++ return -EINVAL;
++ }
++
+ err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy,
+ NULL);
+ if (err < 0)
+@@ -502,7 +508,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ if (!p)
+ return -ENOMEM;
+
+- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
++ index = parm->index;
++ err = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (err < 0) {
+ kfree(p);
+ return err;
+@@ -514,10 +521,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ }
+
+ if (!exists) {
+- ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
++ ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
+ bind, true);
+ if (ret) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ kfree(p);
+ return ret;
+ }
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
+index 58e7573dded4..d10dca7a13e1 100644
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -101,6 +101,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
+ struct net_device *dev;
+ bool exists = false;
+ int ret, err;
++ u32 index;
+
+ if (!nla) {
+ NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
+@@ -115,8 +116,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
+ return -EINVAL;
+ }
+ parm = nla_data(tb[TCA_MIRRED_PARMS]);
+-
+- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
++ index = parm->index;
++ err = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
+@@ -133,21 +134,21 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
+ if (exists)
+ tcf_idr_release(*a, bind);
+ else
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
+ return -EINVAL;
+ }
+
+ if (!exists) {
+ if (!parm->ifindex) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
+ return -EINVAL;
+ }
+- ret = tcf_idr_create(tn, parm->index, est, a,
++ ret = tcf_idr_create(tn, index, est, a,
+ &act_mirred_ops, bind, true);
+ if (ret) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return ret;
+ }
+ ret = ACT_P_CREATED;
+diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
+index 45923ebb7a4f..7b858c11b1b5 100644
+--- a/net/sched/act_nat.c
++++ b/net/sched/act_nat.c
+@@ -44,6 +44,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
+ struct tc_nat *parm;
+ int ret = 0, err;
+ struct tcf_nat *p;
++ u32 index;
+
+ if (nla == NULL)
+ return -EINVAL;
+@@ -56,13 +57,13 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
+ if (tb[TCA_NAT_PARMS] == NULL)
+ return -EINVAL;
+ parm = nla_data(tb[TCA_NAT_PARMS]);
+-
+- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
++ index = parm->index;
++ err = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (!err) {
+- ret = tcf_idr_create(tn, parm->index, est, a,
++ ret = tcf_idr_create(tn, index, est, a,
+ &act_nat_ops, bind, false);
+ if (ret) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return ret;
+ }
+ ret = ACT_P_CREATED;
+diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
+index 45e9d6bfddb3..17360c6faeaa 100644
+--- a/net/sched/act_pedit.c
++++ b/net/sched/act_pedit.c
+@@ -149,6 +149,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
+ struct tcf_pedit *p;
+ int ret = 0, err;
+ int ksize;
++ u32 index;
+
+ if (!nla) {
+ NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed");
+@@ -179,18 +180,19 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
+ if (IS_ERR(keys_ex))
+ return PTR_ERR(keys_ex);
+
+- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
++ index = parm->index;
++ err = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (!err) {
+ if (!parm->nkeys) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
+ ret = -EINVAL;
+ goto out_free;
+ }
+- ret = tcf_idr_create(tn, parm->index, est, a,
++ ret = tcf_idr_create(tn, index, est, a,
+ &act_pedit_ops, bind, false);
+ if (ret) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ goto out_free;
+ }
+ ret = ACT_P_CREATED;
+diff --git a/net/sched/act_police.c b/net/sched/act_police.c
+index a065f62fa79c..49cec3e64a4d 100644
+--- a/net/sched/act_police.c
++++ b/net/sched/act_police.c
+@@ -57,6 +57,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
+ struct tc_action_net *tn = net_generic(net, police_net_id);
+ struct tcf_police_params *new;
+ bool exists = false;
++ u32 index;
+
+ if (nla == NULL)
+ return -EINVAL;
+@@ -73,7 +74,8 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
+ return -EINVAL;
+
+ parm = nla_data(tb[TCA_POLICE_TBF]);
+- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
++ index = parm->index;
++ err = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
+@@ -81,10 +83,10 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
+ return 0;
+
+ if (!exists) {
+- ret = tcf_idr_create(tn, parm->index, NULL, a,
++ ret = tcf_idr_create(tn, index, NULL, a,
+ &act_police_ops, bind, true);
+ if (ret) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return ret;
+ }
+ ret = ACT_P_CREATED;
+diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
+index 274d7a0c0e25..595308d60133 100644
+--- a/net/sched/act_sample.c
++++ b/net/sched/act_sample.c
+@@ -41,8 +41,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
+ struct tc_action_net *tn = net_generic(net, sample_net_id);
+ struct nlattr *tb[TCA_SAMPLE_MAX + 1];
+ struct psample_group *psample_group;
++ u32 psample_group_num, rate, index;
+ struct tcf_chain *goto_ch = NULL;
+- u32 psample_group_num, rate;
+ struct tc_sample *parm;
+ struct tcf_sample *s;
+ bool exists = false;
+@@ -59,8 +59,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
+ return -EINVAL;
+
+ parm = nla_data(tb[TCA_SAMPLE_PARMS]);
+-
+- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
++ index = parm->index;
++ err = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
+@@ -68,10 +68,10 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
+ return 0;
+
+ if (!exists) {
+- ret = tcf_idr_create(tn, parm->index, est, a,
++ ret = tcf_idr_create(tn, index, est, a,
+ &act_sample_ops, bind, true);
+ if (ret) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return ret;
+ }
+ ret = ACT_P_CREATED;
+diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
+index f28ddbabff76..33aefa25b545 100644
+--- a/net/sched/act_simple.c
++++ b/net/sched/act_simple.c
+@@ -95,6 +95,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
+ struct tcf_defact *d;
+ bool exists = false;
+ int ret = 0, err;
++ u32 index;
+
+ if (nla == NULL)
+ return -EINVAL;
+@@ -108,7 +109,8 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
+ return -EINVAL;
+
+ parm = nla_data(tb[TCA_DEF_PARMS]);
+- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
++ index = parm->index;
++ err = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
+@@ -119,15 +121,15 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
+ if (exists)
+ tcf_idr_release(*a, bind);
+ else
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return -EINVAL;
+ }
+
+ if (!exists) {
+- ret = tcf_idr_create(tn, parm->index, est, a,
++ ret = tcf_idr_create(tn, index, est, a,
+ &act_simp_ops, bind, false);
+ if (ret) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return ret;
+ }
+
+diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
+index 215a06705cef..b100870f02a6 100644
+--- a/net/sched/act_skbedit.c
++++ b/net/sched/act_skbedit.c
+@@ -99,6 +99,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
+ u16 *queue_mapping = NULL, *ptype = NULL;
+ bool exists = false;
+ int ret = 0, err;
++ u32 index;
+
+ if (nla == NULL)
+ return -EINVAL;
+@@ -146,8 +147,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
+ }
+
+ parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
+-
+- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
++ index = parm->index;
++ err = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
+@@ -158,15 +159,15 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
+ if (exists)
+ tcf_idr_release(*a, bind);
+ else
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return -EINVAL;
+ }
+
+ if (!exists) {
+- ret = tcf_idr_create(tn, parm->index, est, a,
++ ret = tcf_idr_create(tn, index, est, a,
+ &act_skbedit_ops, bind, true);
+ if (ret) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return ret;
+ }
+
+diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
+index 4f07706eff07..7da3518e18ef 100644
+--- a/net/sched/act_skbmod.c
++++ b/net/sched/act_skbmod.c
+@@ -87,12 +87,12 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
+ struct tcf_skbmod_params *p, *p_old;
+ struct tcf_chain *goto_ch = NULL;
+ struct tc_skbmod *parm;
++ u32 lflags = 0, index;
+ struct tcf_skbmod *d;
+ bool exists = false;
+ u8 *daddr = NULL;
+ u8 *saddr = NULL;
+ u16 eth_type = 0;
+- u32 lflags = 0;
+ int ret = 0, err;
+
+ if (!nla)
+@@ -122,10 +122,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
+ }
+
+ parm = nla_data(tb[TCA_SKBMOD_PARMS]);
++ index = parm->index;
+ if (parm->flags & SKBMOD_F_SWAPMAC)
+ lflags = SKBMOD_F_SWAPMAC;
+
+- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
++ err = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
+@@ -136,15 +137,15 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
+ if (exists)
+ tcf_idr_release(*a, bind);
+ else
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return -EINVAL;
+ }
+
+ if (!exists) {
+- ret = tcf_idr_create(tn, parm->index, est, a,
++ ret = tcf_idr_create(tn, index, est, a,
+ &act_skbmod_ops, bind, true);
+ if (ret) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return ret;
+ }
+
+diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
+index 10dffda1d5cc..6d0debdc9b97 100644
+--- a/net/sched/act_tunnel_key.c
++++ b/net/sched/act_tunnel_key.c
+@@ -225,6 +225,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
+ __be16 flags = 0;
+ u8 tos, ttl;
+ int ret = 0;
++ u32 index;
+ int err;
+
+ if (!nla) {
+@@ -245,7 +246,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
+ }
+
+ parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
+- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
++ index = parm->index;
++ err = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
+@@ -345,7 +347,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
+ }
+
+ if (!exists) {
+- ret = tcf_idr_create(tn, parm->index, est, a,
++ ret = tcf_idr_create(tn, index, est, a,
+ &act_tunnel_key_ops, bind, true);
+ if (ret) {
+ NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
+@@ -403,7 +405,7 @@ err_out:
+ if (exists)
+ tcf_idr_release(*a, bind);
+ else
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return ret;
+ }
+
+diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
+index 9269d350fb8a..a3c9eea1ee8a 100644
+--- a/net/sched/act_vlan.c
++++ b/net/sched/act_vlan.c
+@@ -116,6 +116,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
+ u8 push_prio = 0;
+ bool exists = false;
+ int ret = 0, err;
++ u32 index;
+
+ if (!nla)
+ return -EINVAL;
+@@ -128,7 +129,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
+ if (!tb[TCA_VLAN_PARMS])
+ return -EINVAL;
+ parm = nla_data(tb[TCA_VLAN_PARMS]);
+- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
++ index = parm->index;
++ err = tcf_idr_check_alloc(tn, &index, a, bind);
+ if (err < 0)
+ return err;
+ exists = err;
+@@ -144,7 +146,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
+ if (exists)
+ tcf_idr_release(*a, bind);
+ else
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return -EINVAL;
+ }
+ push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
+@@ -152,7 +154,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
+ if (exists)
+ tcf_idr_release(*a, bind);
+ else
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return -ERANGE;
+ }
+
+@@ -166,7 +168,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
+ if (exists)
+ tcf_idr_release(*a, bind);
+ else
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return -EPROTONOSUPPORT;
+ }
+ } else {
+@@ -180,16 +182,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
+ if (exists)
+ tcf_idr_release(*a, bind);
+ else
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return -EINVAL;
+ }
+ action = parm->v_action;
+
+ if (!exists) {
+- ret = tcf_idr_create(tn, parm->index, est, a,
++ ret = tcf_idr_create(tn, index, est, a,
+ &act_vlan_ops, bind, true);
+ if (ret) {
+- tcf_idr_cleanup(tn, parm->index);
++ tcf_idr_cleanup(tn, index);
+ return ret;
+ }
+
+@@ -306,6 +308,14 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index)
+ return tcf_idr_search(tn, a, index);
+ }
+
++static size_t tcf_vlan_get_fill_size(const struct tc_action *act)
++{
++ return nla_total_size(sizeof(struct tc_vlan))
++ + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_ID */
++ + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_PROTOCOL */
++ + nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */
++}
++
+ static struct tc_action_ops act_vlan_ops = {
+ .kind = "vlan",
+ .id = TCA_ID_VLAN,
+@@ -315,6 +325,7 @@ static struct tc_action_ops act_vlan_ops = {
+ .init = tcf_vlan_init,
+ .cleanup = tcf_vlan_cleanup,
+ .walk = tcf_vlan_walker,
++ .get_fill_size = tcf_vlan_get_fill_size,
+ .lookup = tcf_vlan_search,
+ .size = sizeof(struct tcf_vlan),
+ };
+diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
+index 25ef172c23df..30169b3adbbb 100644
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -71,10 +71,10 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
+ struct Qdisc *sch = ctx;
+ struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+
+- if (skb)
++ if (skb) {
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+-
+- prefetch(&skb->end); /* we'll need skb_shinfo() */
++ prefetch(&skb->end); /* we'll need skb_shinfo() */
++ }
+ return skb;
+ }
+
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 7621ec2f539c..a3cc879d2589 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -253,7 +253,7 @@ static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
+
+ /* Check if socket is already active */
+ rc = -EINVAL;
+- if (sk->sk_state != SMC_INIT)
++ if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
+ goto out_rel;
+
+ smc->clcsock->sk->sk_reuse = sk->sk_reuse;
+@@ -1399,7 +1399,8 @@ static int smc_listen(struct socket *sock, int backlog)
+ lock_sock(sk);
+
+ rc = -EINVAL;
+- if ((sk->sk_state != SMC_INIT) && (sk->sk_state != SMC_LISTEN))
++ if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
++ smc->connect_nonblock)
+ goto out;
+
+ rc = 0;
+@@ -1527,7 +1528,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ goto out;
+
+ if (msg->msg_flags & MSG_FASTOPEN) {
+- if (sk->sk_state == SMC_INIT) {
++ if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
+ smc_switch_to_fallback(smc);
+ smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
+ } else {
+@@ -1741,14 +1742,18 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
+ }
+ break;
+ case TCP_NODELAY:
+- if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
++ if (sk->sk_state != SMC_INIT &&
++ sk->sk_state != SMC_LISTEN &&
++ sk->sk_state != SMC_CLOSED) {
+ if (val && !smc->use_fallback)
+ mod_delayed_work(system_wq, &smc->conn.tx_work,
+ 0);
+ }
+ break;
+ case TCP_CORK:
+- if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
++ if (sk->sk_state != SMC_INIT &&
++ sk->sk_state != SMC_LISTEN &&
++ sk->sk_state != SMC_CLOSED) {
+ if (!val && !smc->use_fallback)
+ mod_delayed_work(system_wq, &smc->conn.tx_work,
+ 0);
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index cf155061c472..acd8a72169c1 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
+ int rep_type;
+ int rep_size;
+ int req_type;
++ int req_size;
+ struct net *net;
+ struct sk_buff *rep;
+ struct tlv_desc *req;
+@@ -257,7 +258,8 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
+ int err;
+ struct sk_buff *arg;
+
+- if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
++ if (msg->req_type && (!msg->req_size ||
++ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
+ return -EINVAL;
+
+ msg->rep = tipc_tlv_alloc(msg->rep_size);
+@@ -354,7 +356,8 @@ static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
+ {
+ int err;
+
+- if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
++ if (msg->req_type && (!msg->req_size ||
++ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
+ return -EINVAL;
+
+ err = __tipc_nl_compat_doit(cmd, msg);
+@@ -1288,8 +1291,8 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
+ goto send;
+ }
+
+- len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
+- if (!len || !TLV_OK(msg.req, len)) {
++ msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
++ if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
+ msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
+ err = -EOPNOTSUPP;
+ goto send;
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index dd8537f988c4..83ae41d7e554 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -485,9 +485,8 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
+ tsk_set_unreturnable(tsk, true);
+ if (sock->type == SOCK_DGRAM)
+ tsk_set_unreliable(tsk, true);
+- __skb_queue_head_init(&tsk->mc_method.deferredq);
+ }
+-
++ __skb_queue_head_init(&tsk->mc_method.deferredq);
+ trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
+ return 0;
+ }
+diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
+index 62dcdf082349..6c81a911fc02 100644
+--- a/net/vmw_vsock/hyperv_transport.c
++++ b/net/vmw_vsock/hyperv_transport.c
+@@ -311,6 +311,11 @@ static void hvs_close_connection(struct vmbus_channel *chan)
+ lock_sock(sk);
+ hvs_do_close_lock_held(vsock_sk(sk), true);
+ release_sock(sk);
++
++ /* Release the refcnt for the channel that's opened in
++ * hvs_open_connection().
++ */
++ sock_put(sk);
+ }
+
+ static void hvs_open_connection(struct vmbus_channel *chan)
+@@ -378,6 +383,9 @@ static void hvs_open_connection(struct vmbus_channel *chan)
+ }
+
+ set_per_channel_state(chan, conn_from_host ? new : sk);
++
++ /* This reference will be dropped by hvs_close_connection(). */
++ sock_hold(conn_from_host ? new : sk);
+ vmbus_set_chn_rescind_callback(chan, hvs_close_connection);
+
+ /* Set the pending send size to max packet size to always get
+diff --git a/sound/usb/helper.c b/sound/usb/helper.c
+index 84aa265dd802..4c12cc5b53fd 100644
+--- a/sound/usb/helper.c
++++ b/sound/usb/helper.c
+@@ -63,6 +63,20 @@ void *snd_usb_find_csint_desc(void *buffer, int buflen, void *after, u8 dsubtype
+ return NULL;
+ }
+
++/* check the validity of pipe and EP types */
++int snd_usb_pipe_sanity_check(struct usb_device *dev, unsigned int pipe)
++{
++ static const int pipetypes[4] = {
++ PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
++ };
++ struct usb_host_endpoint *ep;
++
++ ep = usb_pipe_endpoint(dev, pipe);
++ if (!ep || usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
++ return -EINVAL;
++ return 0;
++}
++
+ /*
+ * Wrapper for usb_control_msg().
+ * Allocates a temp buffer to prevent dmaing from/to the stack.
+@@ -75,6 +89,9 @@ int snd_usb_ctl_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
+ void *buf = NULL;
+ int timeout;
+
++ if (snd_usb_pipe_sanity_check(dev, pipe))
++ return -EINVAL;
++
+ if (size > 0) {
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+diff --git a/sound/usb/helper.h b/sound/usb/helper.h
+index d338bd0e0ca6..6afb70156ec4 100644
+--- a/sound/usb/helper.h
++++ b/sound/usb/helper.h
+@@ -7,6 +7,7 @@ unsigned int snd_usb_combine_bytes(unsigned char *bytes, int size);
+ void *snd_usb_find_desc(void *descstart, int desclen, void *after, u8 dtype);
+ void *snd_usb_find_csint_desc(void *descstart, int desclen, void *after, u8 dsubtype);
+
++int snd_usb_pipe_sanity_check(struct usb_device *dev, unsigned int pipe);
+ int snd_usb_ctl_msg(struct usb_device *dev, unsigned int pipe,
+ __u8 request, __u8 requesttype, __u16 value, __u16 index,
+ void *data, __u16 size);
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index cf5cff10c08e..78858918cbc1 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -828,11 +828,13 @@ static int snd_usb_novation_boot_quirk(struct usb_device *dev)
+ static int snd_usb_accessmusic_boot_quirk(struct usb_device *dev)
+ {
+ int err, actual_length;
+-
+ /* "midi send" enable */
+ static const u8 seq[] = { 0x4e, 0x73, 0x52, 0x01 };
++ void *buf;
+
+- void *buf = kmemdup(seq, ARRAY_SIZE(seq), GFP_KERNEL);
++ if (snd_usb_pipe_sanity_check(dev, usb_sndintpipe(dev, 0x05)))
++ return -EINVAL;
++ buf = kmemdup(seq, ARRAY_SIZE(seq), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ err = usb_interrupt_msg(dev, usb_sndintpipe(dev, 0x05), buf,
+@@ -857,7 +859,11 @@ static int snd_usb_accessmusic_boot_quirk(struct usb_device *dev)
+
+ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev)
+ {
+- int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
++ int ret;
++
++ if (snd_usb_pipe_sanity_check(dev, usb_sndctrlpipe(dev, 0)))
++ return -EINVAL;
++ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 1, 0, NULL, 0, 1000);
+
+@@ -964,6 +970,8 @@ static int snd_usb_axefx3_boot_quirk(struct usb_device *dev)
+
+ dev_dbg(&dev->dev, "Waiting for Axe-Fx III to boot up...\n");
+
++ if (snd_usb_pipe_sanity_check(dev, usb_sndctrlpipe(dev, 0)))
++ return -EINVAL;
+ /* If the Axe-Fx III has not fully booted, it will timeout when trying
+ * to enable the audio streaming interface. A more generous timeout is
+ * used here to detect when the Axe-Fx III has finished booting as the
+@@ -996,6 +1004,8 @@ static int snd_usb_motu_microbookii_communicate(struct usb_device *dev, u8 *buf,
+ {
+ int err, actual_length;
+
++ if (snd_usb_pipe_sanity_check(dev, usb_sndintpipe(dev, 0x01)))
++ return -EINVAL;
+ err = usb_interrupt_msg(dev, usb_sndintpipe(dev, 0x01), buf, *length,
+ &actual_length, 1000);
+ if (err < 0)
+@@ -1006,6 +1016,8 @@ static int snd_usb_motu_microbookii_communicate(struct usb_device *dev, u8 *buf,
+
+ memset(buf, 0, buf_size);
+
++ if (snd_usb_pipe_sanity_check(dev, usb_rcvintpipe(dev, 0x82)))
++ return -EINVAL;
+ err = usb_interrupt_msg(dev, usb_rcvintpipe(dev, 0x82), buf, buf_size,
+ &actual_length, 1000);
+ if (err < 0)
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index f1573a11d3e4..b9e88ccc289b 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -51,7 +51,8 @@ TEST_PROGS := test_kmod.sh \
+ test_lirc_mode2.sh \
+ test_skb_cgroup_id.sh \
+ test_flow_dissector.sh \
+- test_xdp_vlan.sh \
++ test_xdp_vlan_mode_generic.sh \
++ test_xdp_vlan_mode_native.sh \
+ test_lwt_ip_encap.sh \
+ test_tcp_check_syncookie.sh \
+ test_tc_tunnel.sh \
+diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh
+index 51a3a31d1aac..bb8b0da91686 100755
+--- a/tools/testing/selftests/bpf/test_xdp_vlan.sh
++++ b/tools/testing/selftests/bpf/test_xdp_vlan.sh
+@@ -1,6 +1,14 @@
+ #!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++# Author: Jesper Dangaard Brouer <hawk@kernel.org>
+
+-TESTNAME=xdp_vlan
++# Allow wrapper scripts to name test
++if [ -z "$TESTNAME" ]; then
++ TESTNAME=xdp_vlan
++fi
++
++# Default XDP mode
++XDP_MODE=xdpgeneric
+
+ usage() {
+ echo "Testing XDP + TC eBPF VLAN manipulations: $TESTNAME"
+@@ -9,9 +17,23 @@ usage() {
+ echo " -v | --verbose : Verbose"
+ echo " --flush : Flush before starting (e.g. after --interactive)"
+ echo " --interactive : Keep netns setup running after test-run"
++ echo " --mode=XXX : Choose XDP mode (xdp | xdpgeneric | xdpdrv)"
+ echo ""
+ }
+
++valid_xdp_mode()
++{
++ local mode=$1
++
++ case "$mode" in
++ xdpgeneric | xdpdrv | xdp)
++ return 0
++ ;;
++ *)
++ return 1
++ esac
++}
++
+ cleanup()
+ {
+ local status=$?
+@@ -37,7 +59,7 @@ cleanup()
+
+ # Using external program "getopt" to get --long-options
+ OPTIONS=$(getopt -o hvfi: \
+- --long verbose,flush,help,interactive,debug -- "$@")
++ --long verbose,flush,help,interactive,debug,mode: -- "$@")
+ if (( $? != 0 )); then
+ usage
+ echo "selftests: $TESTNAME [FAILED] Error calling getopt, unknown option?"
+@@ -60,6 +82,11 @@ while true; do
+ cleanup
+ shift
+ ;;
++ --mode )
++ shift
++ XDP_MODE=$1
++ shift
++ ;;
+ -- )
+ shift
+ break
+@@ -81,8 +108,14 @@ if [ "$EUID" -ne 0 ]; then
+ exit 1
+ fi
+
+-ip link set dev lo xdp off 2>/dev/null > /dev/null
+-if [ $? -ne 0 ];then
++valid_xdp_mode $XDP_MODE
++if [ $? -ne 0 ]; then
++ echo "selftests: $TESTNAME [FAILED] unknown XDP mode ($XDP_MODE)"
++ exit 1
++fi
++
++ip link set dev lo xdpgeneric off 2>/dev/null > /dev/null
++if [ $? -ne 0 ]; then
+ echo "selftests: $TESTNAME [SKIP] need ip xdp support"
+ exit 0
+ fi
+@@ -155,7 +188,7 @@ ip netns exec ns2 ip link set lo up
+ # At this point, the hosts cannot reach each-other,
+ # because ns2 are using VLAN tags on the packets.
+
+-ip netns exec ns2 sh -c 'ping -W 1 -c 1 100.64.41.1 || echo "Okay ping fails"'
++ip netns exec ns2 sh -c 'ping -W 1 -c 1 100.64.41.1 || echo "Success: First ping must fail"'
+
+
+ # Now we can use the test_xdp_vlan.c program to pop/push these VLAN tags
+@@ -166,7 +199,7 @@ export FILE=test_xdp_vlan.o
+
+ # First test: Remove VLAN by setting VLAN ID 0, using "xdp_vlan_change"
+ export XDP_PROG=xdp_vlan_change
+-ip netns exec ns1 ip link set $DEVNS1 xdp object $FILE section $XDP_PROG
++ip netns exec ns1 ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PROG
+
+ # In ns1: egress use TC to add back VLAN tag 4011
+ # (del cmd)
+@@ -177,8 +210,8 @@ ip netns exec ns1 tc filter add dev $DEVNS1 egress \
+ prio 1 handle 1 bpf da obj $FILE sec tc_vlan_push
+
+ # Now the namespaces can reach each-other, test with ping:
+-ip netns exec ns2 ping -W 2 -c 3 $IPADDR1
+-ip netns exec ns1 ping -W 2 -c 3 $IPADDR2
++ip netns exec ns2 ping -i 0.2 -W 2 -c 2 $IPADDR1
++ip netns exec ns1 ping -i 0.2 -W 2 -c 2 $IPADDR2
+
+ # Second test: Replace xdp prog, that fully remove vlan header
+ #
+@@ -187,9 +220,9 @@ ip netns exec ns1 ping -W 2 -c 3 $IPADDR2
+ # ETH_P_8021Q indication, and this cause overwriting of our changes.
+ #
+ export XDP_PROG=xdp_vlan_remove_outer2
+-ip netns exec ns1 ip link set $DEVNS1 xdp off
+-ip netns exec ns1 ip link set $DEVNS1 xdp object $FILE section $XDP_PROG
++ip netns exec ns1 ip link set $DEVNS1 $XDP_MODE off
++ip netns exec ns1 ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PROG
+
+ # Now the namespaces should still be able reach each-other, test with ping:
+-ip netns exec ns2 ping -W 2 -c 3 $IPADDR1
+-ip netns exec ns1 ping -W 2 -c 3 $IPADDR2
++ip netns exec ns2 ping -i 0.2 -W 2 -c 2 $IPADDR1
++ip netns exec ns1 ping -i 0.2 -W 2 -c 2 $IPADDR2
+diff --git a/tools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh b/tools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh
+new file mode 100644
+index 000000000000..c515326d6d59
+--- /dev/null
++++ b/tools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh
+@@ -0,0 +1,9 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++
++# Exit on failure
++set -e
++
++# Wrapper script to test generic-XDP
++export TESTNAME=xdp_vlan_mode_generic
++./test_xdp_vlan.sh --mode=xdpgeneric
+diff --git a/tools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh b/tools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh
+new file mode 100644
+index 000000000000..5cf7ce1f16c1
+--- /dev/null
++++ b/tools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh
+@@ -0,0 +1,9 @@
++#!/bin/bash
++# SPDX-License-Identifier: GPL-2.0
++
++# Exit on failure
++set -e
++
++# Wrapper script to test native-XDP
++export TESTNAME=xdp_vlan_mode_native
++./test_xdp_vlan.sh --mode=xdpdrv
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-08-06 19:20 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-08-06 19:20 UTC (permalink / raw
To: gentoo-commits
commit: eef2f4486d276562e6a72b7255a2f82eb5e85521
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Aug 6 19:20:00 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Aug 6 19:20:00 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=eef2f448
Linux patch 5.2.7
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1006_linux-5.2.7.patch | 4991 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 4995 insertions(+)
diff --git a/0000_README b/0000_README
index 3a50bfb..139084e 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch: 1005_linux-5.2.6.patch
From: https://www.kernel.org
Desc: Linux 5.2.6
+Patch: 1006_linux-5.2.7.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.7
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1006_linux-5.2.7.patch b/1006_linux-5.2.7.patch
new file mode 100644
index 0000000..cd78fb8
--- /dev/null
+++ b/1006_linux-5.2.7.patch
@@ -0,0 +1,4991 @@
+diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
+index 25f3b2532198..e05e581af5cf 100644
+--- a/Documentation/admin-guide/hw-vuln/spectre.rst
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -41,10 +41,11 @@ Related CVEs
+
+ The following CVE entries describe Spectre variants:
+
+- ============= ======================= =================
++ ============= ======================= ==========================
+ CVE-2017-5753 Bounds check bypass Spectre variant 1
+ CVE-2017-5715 Branch target injection Spectre variant 2
+- ============= ======================= =================
++ CVE-2019-1125 Spectre v1 swapgs Spectre variant 1 (swapgs)
++ ============= ======================= ==========================
+
+ Problem
+ -------
+@@ -78,6 +79,13 @@ There are some extensions of Spectre variant 1 attacks for reading data
+ over the network, see :ref:`[12] <spec_ref12>`. However such attacks
+ are difficult, low bandwidth, fragile, and are considered low risk.
+
++Note that, despite "Bounds Check Bypass" name, Spectre variant 1 is not
++only about user-controlled array bounds checks. It can affect any
++conditional checks. The kernel entry code interrupt, exception, and NMI
++handlers all have conditional swapgs checks. Those may be problematic
++in the context of Spectre v1, as kernel code can speculatively run with
++a user GS.
++
+ Spectre variant 2 (Branch Target Injection)
+ -------------------------------------------
+
+@@ -132,6 +140,9 @@ not cover all possible attack vectors.
+ 1. A user process attacking the kernel
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
++Spectre variant 1
++~~~~~~~~~~~~~~~~~
++
+ The attacker passes a parameter to the kernel via a register or
+ via a known address in memory during a syscall. Such parameter may
+ be used later by the kernel as an index to an array or to derive
+@@ -144,7 +155,40 @@ not cover all possible attack vectors.
+ potentially be influenced for Spectre attacks, new "nospec" accessor
+ macros are used to prevent speculative loading of data.
+
+- Spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
++Spectre variant 1 (swapgs)
++~~~~~~~~~~~~~~~~~~~~~~~~~~
++
++ An attacker can train the branch predictor to speculatively skip the
++ swapgs path for an interrupt or exception. If they initialize
++ the GS register to a user-space value, if the swapgs is speculatively
++ skipped, subsequent GS-related percpu accesses in the speculation
++ window will be done with the attacker-controlled GS value. This
++ could cause privileged memory to be accessed and leaked.
++
++ For example:
++
++ ::
++
++ if (coming from user space)
++ swapgs
++ mov %gs:<percpu_offset>, %reg
++ mov (%reg), %reg1
++
++ When coming from user space, the CPU can speculatively skip the
++ swapgs, and then do a speculative percpu load using the user GS
++ value. So the user can speculatively force a read of any kernel
++ value. If a gadget exists which uses the percpu value as an address
++ in another load/store, then the contents of the kernel value may
++ become visible via an L1 side channel attack.
++
++ A similar attack exists when coming from kernel space. The CPU can
++ speculatively do the swapgs, causing the user GS to get used for the
++ rest of the speculative window.
++
++Spectre variant 2
++~~~~~~~~~~~~~~~~~
++
++ A spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
+ target buffer (BTB) before issuing syscall to launch an attack.
+ After entering the kernel, the kernel could use the poisoned branch
+ target buffer on indirect jump and jump to gadget code in speculative
+@@ -280,11 +324,18 @@ The sysfs file showing Spectre variant 1 mitigation status is:
+
+ The possible values in this file are:
+
+- ======================================= =================================
+- 'Mitigation: __user pointer sanitation' Protection in kernel on a case by
+- case base with explicit pointer
+- sanitation.
+- ======================================= =================================
++ .. list-table::
++
++ * - 'Not affected'
++ - The processor is not vulnerable.
++ * - 'Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers'
++ - The swapgs protections are disabled; otherwise it has
++ protection in the kernel on a case by case base with explicit
++ pointer sanitation and usercopy LFENCE barriers.
++ * - 'Mitigation: usercopy/swapgs barriers and __user pointer sanitization'
++ - Protection in the kernel on a case by case base with explicit
++ pointer sanitation, usercopy LFENCE barriers, and swapgs LFENCE
++ barriers.
+
+ However, the protections are put in place on a case by case basis,
+ and there is no guarantee that all possible attack vectors for Spectre
+@@ -366,12 +417,27 @@ Turning on mitigation for Spectre variant 1 and Spectre variant 2
+ 1. Kernel mitigation
+ ^^^^^^^^^^^^^^^^^^^^
+
++Spectre variant 1
++~~~~~~~~~~~~~~~~~
++
+ For the Spectre variant 1, vulnerable kernel code (as determined
+ by code audit or scanning tools) is annotated on a case by case
+ basis to use nospec accessor macros for bounds clipping :ref:`[2]
+ <spec_ref2>` to avoid any usable disclosure gadgets. However, it may
+ not cover all attack vectors for Spectre variant 1.
+
++ Copy-from-user code has an LFENCE barrier to prevent the access_ok()
++ check from being mis-speculated. The barrier is done by the
++ barrier_nospec() macro.
++
++ For the swapgs variant of Spectre variant 1, LFENCE barriers are
++ added to interrupt, exception and NMI entry where needed. These
++ barriers are done by the FENCE_SWAPGS_KERNEL_ENTRY and
++ FENCE_SWAPGS_USER_ENTRY macros.
++
++Spectre variant 2
++~~~~~~~~~~~~~~~~~
++
+ For Spectre variant 2 mitigation, the compiler turns indirect calls or
+ jumps in the kernel into equivalent return trampolines (retpolines)
+ :ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` to go to the target
+@@ -473,6 +539,12 @@ Mitigation control on the kernel command line
+ Spectre variant 2 mitigation can be disabled or force enabled at the
+ kernel command line.
+
++ nospectre_v1
++
++ [X86,PPC] Disable mitigations for Spectre Variant 1
++ (bounds check bypass). With this option data leaks are
++ possible in the system.
++
+ nospectre_v2
+
+ [X86] Disable all mitigations for the Spectre variant 2
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 0082d1e56999..0d40729d080f 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2587,7 +2587,7 @@
+ expose users to several CPU vulnerabilities.
+ Equivalent to: nopti [X86,PPC]
+ kpti=0 [ARM64]
+- nospectre_v1 [PPC]
++ nospectre_v1 [X86,PPC]
+ nobp=0 [S390]
+ nospectre_v2 [X86,PPC,S390,ARM64]
+ spectre_v2_user=off [X86]
+@@ -2936,9 +2936,9 @@
+ nosmt=force: Force disable SMT, cannot be undone
+ via the sysfs control file.
+
+- nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
+- check bypass). With this option data leaks are possible
+- in the system.
++ nospectre_v1 [X86,PPC] Disable mitigations for Spectre Variant 1
++ (bounds check bypass). With this option data leaks are
++ possible in the system.
+
+ nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
+ the Spectre variant 2 (indirect branch prediction)
+diff --git a/Makefile b/Makefile
+index 3cd40f1a8f75..359a6b49e576 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+@@ -467,6 +467,7 @@ KBUILD_CFLAGS_MODULE := -DMODULE
+ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
+ KBUILD_LDFLAGS :=
+ GCC_PLUGINS_CFLAGS :=
++CLANG_FLAGS :=
+
+ export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
+ export CPP AR NM STRIP OBJCOPY OBJDUMP PAHOLE KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
+@@ -519,7 +520,7 @@ endif
+
+ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
+ ifneq ($(CROSS_COMPILE),)
+-CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
++CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
+ GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
+ CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
+ GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
+diff --git a/arch/arm/boot/dts/rk3288-veyron-mickey.dts b/arch/arm/boot/dts/rk3288-veyron-mickey.dts
+index e852594417b5..b13f87792e9f 100644
+--- a/arch/arm/boot/dts/rk3288-veyron-mickey.dts
++++ b/arch/arm/boot/dts/rk3288-veyron-mickey.dts
+@@ -128,10 +128,6 @@
+ };
+ };
+
+-&emmc {
+- /delete-property/mmc-hs200-1_8v;
+-};
+-
+ &i2c2 {
+ status = "disabled";
+ };
+diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
+index 468a1818545d..ce57881625ec 100644
+--- a/arch/arm/boot/dts/rk3288-veyron-minnie.dts
++++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
+@@ -90,10 +90,6 @@
+ pwm-off-delay-ms = <200>;
+ };
+
+-&emmc {
+- /delete-property/mmc-hs200-1_8v;
+-};
+-
+ &gpio_keys {
+ pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>;
+
+diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
+index aa017abf4f42..f7bc886a4b51 100644
+--- a/arch/arm/boot/dts/rk3288.dtsi
++++ b/arch/arm/boot/dts/rk3288.dtsi
+@@ -231,6 +231,7 @@
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ clock-frequency = <24000000>;
++ arm,no-tick-in-suspend;
+ };
+
+ timer: timer@ff810000 {
+diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
+index 1c518b8ee520..21a59efd1a2c 100644
+--- a/arch/arm/mach-exynos/Kconfig
++++ b/arch/arm/mach-exynos/Kconfig
+@@ -106,7 +106,7 @@ config SOC_EXYNOS5420
+ bool "SAMSUNG EXYNOS5420"
+ default y
+ depends on ARCH_EXYNOS5
+- select MCPM if SMP
++ select EXYNOS_MCPM if SMP
+ select ARM_CCI400_PORT_CTRL
+ select ARM_CPU_SUSPEND
+
+@@ -115,6 +115,10 @@ config SOC_EXYNOS5800
+ default y
+ depends on SOC_EXYNOS5420
+
++config EXYNOS_MCPM
++ bool
++ select MCPM
++
+ config EXYNOS_CPU_SUSPEND
+ bool
+ select ARM_CPU_SUSPEND
+diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
+index 264dbaa89c3d..5abf3db23912 100644
+--- a/arch/arm/mach-exynos/Makefile
++++ b/arch/arm/mach-exynos/Makefile
+@@ -18,5 +18,5 @@ plus_sec := $(call as-instr,.arch_extension sec,+sec)
+ AFLAGS_exynos-smc.o :=-Wa,-march=armv7-a$(plus_sec)
+ AFLAGS_sleep.o :=-Wa,-march=armv7-a$(plus_sec)
+
+-obj-$(CONFIG_MCPM) += mcpm-exynos.o
++obj-$(CONFIG_EXYNOS_MCPM) += mcpm-exynos.o
+ CFLAGS_mcpm-exynos.o += -march=armv7-a
+diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
+index be122af0de8f..8b1e6ab8504f 100644
+--- a/arch/arm/mach-exynos/suspend.c
++++ b/arch/arm/mach-exynos/suspend.c
+@@ -268,7 +268,7 @@ static int exynos5420_cpu_suspend(unsigned long arg)
+ unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+
+- if (IS_ENABLED(CONFIG_MCPM)) {
++ if (IS_ENABLED(CONFIG_EXYNOS_MCPM)) {
+ mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume);
+ mcpm_cpu_suspend();
+ }
+@@ -351,7 +351,7 @@ static void exynos5420_pm_prepare(void)
+ exynos_pm_enter_sleep_mode();
+
+ /* ensure at least INFORM0 has the resume address */
+- if (IS_ENABLED(CONFIG_MCPM))
++ if (IS_ENABLED(CONFIG_EXYNOS_MCPM))
+ pmu_raw_writel(__pa_symbol(mcpm_entry_point), S5P_INFORM0);
+
+ tmp = pmu_raw_readl(EXYNOS_L2_OPTION(0));
+@@ -455,7 +455,7 @@ static void exynos5420_prepare_pm_resume(void)
+ mpidr = read_cpuid_mpidr();
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+- if (IS_ENABLED(CONFIG_MCPM))
++ if (IS_ENABLED(CONFIG_EXYNOS_MCPM))
+ WARN_ON(mcpm_cpu_powered_up());
+
+ if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) {
+diff --git a/arch/arm/mach-rpc/dma.c b/arch/arm/mach-rpc/dma.c
+index 488d5c3b37f4..799e0b016b62 100644
+--- a/arch/arm/mach-rpc/dma.c
++++ b/arch/arm/mach-rpc/dma.c
+@@ -128,7 +128,7 @@ static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
+ } while (1);
+
+ idma->state = ~DMA_ST_AB;
+- disable_irq(irq);
++ disable_irq_nosync(irq);
+
+ return IRQ_HANDLED;
+ }
+@@ -177,6 +177,9 @@ static void iomd_enable_dma(unsigned int chan, dma_t *dma)
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+
++ idma->dma_addr = idma->dma.sg->dma_address;
++ idma->dma_len = idma->dma.sg->length;
++
+ iomd_writeb(DMA_CR_C, dma_base + CR);
+ idma->state = DMA_ST_AB;
+ }
+diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
+index 329f8ceeebea..205071b45a32 100644
+--- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
+@@ -184,6 +184,8 @@
+ num-lanes = <4>;
+ num-viewport = <8>;
+ reset-gpios = <&cp0_gpio2 20 GPIO_ACTIVE_LOW>;
++ ranges = <0x81000000 0x0 0xf9010000 0x0 0xf9010000 0x0 0x10000
++ 0x82000000 0x0 0xc0000000 0x0 0xc0000000 0x0 0x20000000>;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+index 2c3127167e3c..d987d6741e40 100644
+--- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+@@ -118,7 +118,7 @@
+ };
+
+ vreg_l3_1p05: l3 {
+- regulator-min-microvolt = <1050000>;
++ regulator-min-microvolt = <1048000>;
+ regulator-max-microvolt = <1160000>;
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
+index ffedf9640af7..65a2cbeb28be 100644
+--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
++++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
+@@ -383,6 +383,7 @@
+ compatible = "qcom,gcc-qcs404";
+ reg = <0x01800000 0x80000>;
+ #clock-cells = <1>;
++ #reset-cells = <1>;
+
+ assigned-clocks = <&gcc GCC_APSS_AHB_CLK_SRC>;
+ assigned-clock-rates = <19200000>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
+index 04623e52ac5d..1bc1579674e5 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
+@@ -565,12 +565,11 @@
+ status = "okay";
+
+ u2phy0_otg: otg-port {
+- phy-supply = <&vcc5v0_typec0>;
+ status = "okay";
+ };
+
+ u2phy0_host: host-port {
+- phy-supply = <&vcc5v0_host>;
++ phy-supply = <&vcc5v0_typec0>;
+ status = "okay";
+ };
+ };
+@@ -620,7 +619,7 @@
+
+ &usbdrd_dwc3_0 {
+ status = "okay";
+- dr_mode = "otg";
++ dr_mode = "host";
+ };
+
+ &usbdrd3_1 {
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+index 196ac9b78076..89594a7276f4 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+@@ -1706,11 +1706,11 @@
+ reg = <0x0 0xff914000 0x0 0x100>, <0x0 0xff915000 0x0 0x100>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "isp0_mmu";
+- clocks = <&cru ACLK_ISP0_NOC>, <&cru HCLK_ISP0_NOC>;
++ clocks = <&cru ACLK_ISP0_WRAPPER>, <&cru HCLK_ISP0_WRAPPER>;
+ clock-names = "aclk", "iface";
+ #iommu-cells = <0>;
++ power-domains = <&power RK3399_PD_ISP0>;
+ rockchip,disable-mmu-reset;
+- status = "disabled";
+ };
+
+ isp1_mmu: iommu@ff924000 {
+@@ -1718,11 +1718,11 @@
+ reg = <0x0 0xff924000 0x0 0x100>, <0x0 0xff925000 0x0 0x100>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupt-names = "isp1_mmu";
+- clocks = <&cru ACLK_ISP1_NOC>, <&cru HCLK_ISP1_NOC>;
++ clocks = <&cru ACLK_ISP1_WRAPPER>, <&cru HCLK_ISP1_WRAPPER>;
+ clock-names = "aclk", "iface";
+ #iommu-cells = <0>;
++ power-domains = <&power RK3399_PD_ISP1>;
+ rockchip,disable-mmu-reset;
+- status = "disabled";
+ };
+
+ hdmi_sound: hdmi-sound {
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index 373799b7982f..0a61344ab243 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -35,9 +35,10 @@
+ */
+
+ enum ftr_type {
+- FTR_EXACT, /* Use a predefined safe value */
+- FTR_LOWER_SAFE, /* Smaller value is safe */
+- FTR_HIGHER_SAFE,/* Bigger value is safe */
++ FTR_EXACT, /* Use a predefined safe value */
++ FTR_LOWER_SAFE, /* Smaller value is safe */
++ FTR_HIGHER_SAFE, /* Bigger value is safe */
++ FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
+ };
+
+ #define FTR_STRICT true /* SANITY check strict matching required */
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index aabdabf52fdb..ae63eedea1c1 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -225,8 +225,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
+- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
+- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
+ /*
+ * Linux can handle differing I-cache policies. Userspace JITs will
+@@ -468,6 +468,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
+ case FTR_LOWER_SAFE:
+ ret = new < cur ? new : cur;
+ break;
++ case FTR_HIGHER_OR_ZERO_SAFE:
++ if (!cur || !new)
++ break;
++ /* Fallthrough */
+ case FTR_HIGHER_SAFE:
+ ret = new > cur ? new : cur;
+ break;
+diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
+index dceb84520948..67b3bae50b92 100644
+--- a/arch/arm64/kernel/hw_breakpoint.c
++++ b/arch/arm64/kernel/hw_breakpoint.c
+@@ -536,13 +536,14 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
+ /* Aligned */
+ break;
+ case 1:
+- /* Allow single byte watchpoint. */
+- if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
+- break;
+ case 2:
+ /* Allow halfword watchpoints and breakpoints. */
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
+ break;
++ case 3:
++ /* Allow single byte watchpoint. */
++ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
++ break;
+ default:
+ return -EINVAL;
+ }
+diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
+index cfd87e662fcf..9c95097557c7 100644
+--- a/arch/mips/lantiq/irq.c
++++ b/arch/mips/lantiq/irq.c
+@@ -154,8 +154,9 @@ static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
+ if (edge)
+ irq_set_handler(d->hwirq, handle_edge_irq);
+
+- ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
+- (val << (i * 4)), LTQ_EIU_EXIN_C);
++ ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
++ (~(7 << (i * 4)))) | (val << (i * 4)),
++ LTQ_EIU_EXIN_C);
+ }
+ }
+
+diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h
+index 899b2fb4b52f..7b5180d78e20 100644
+--- a/arch/nds32/include/asm/syscall.h
++++ b/arch/nds32/include/asm/syscall.h
+@@ -26,7 +26,8 @@ struct pt_regs;
+ *
+ * It's only valid to call this when @task is known to be blocked.
+ */
+-int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
++static inline int
++syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+ {
+ return regs->syscallno;
+ }
+@@ -47,7 +48,8 @@ int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+ * system call instruction. This may not be the same as what the
+ * register state looked like at system call entry tracing.
+ */
+-void syscall_rollback(struct task_struct *task, struct pt_regs *regs)
++static inline void
++syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+ {
+ regs->uregs[0] = regs->orig_r0;
+ }
+@@ -62,7 +64,8 @@ void syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+-long syscall_get_error(struct task_struct *task, struct pt_regs *regs)
++static inline long
++syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+ {
+ unsigned long error = regs->uregs[0];
+ return IS_ERR_VALUE(error) ? error : 0;
+@@ -79,7 +82,8 @@ long syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+-long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
++static inline long
++syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+ {
+ return regs->uregs[0];
+ }
+@@ -99,8 +103,9 @@ long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+-void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+- int error, long val)
++static inline void
++syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
++ int error, long val)
+ {
+ regs->uregs[0] = (long)error ? error : val;
+ }
+@@ -118,8 +123,9 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+ #define SYSCALL_MAX_ARGS 6
+-void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+- unsigned long *args)
++static inline void
++syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
++ unsigned long *args)
+ {
+ args[0] = regs->orig_r0;
+ args++;
+@@ -138,8 +144,9 @@ void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ * It's only valid to call this when @task is stopped for tracing on
+ * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+-void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+- const unsigned long *args)
++static inline void
++syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
++ const unsigned long *args)
+ {
+ regs->orig_r0 = args[0];
+ args++;
+diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
+index c19af26febe6..303ac6c4be64 100644
+--- a/arch/parisc/Makefile
++++ b/arch/parisc/Makefile
+@@ -164,5 +164,8 @@ define archhelp
+ @echo ' zinstall - Install compressed vmlinuz kernel'
+ endef
+
++archclean:
++ $(Q)$(MAKE) $(clean)=$(boot)
++
+ archheaders:
+ $(Q)$(MAKE) $(build)=arch/parisc/kernel/syscalls all
+diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile
+index 2da8624e5cf6..1e5879c6a752 100644
+--- a/arch/parisc/boot/compressed/Makefile
++++ b/arch/parisc/boot/compressed/Makefile
+@@ -12,6 +12,7 @@ UBSAN_SANITIZE := n
+ targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
+ targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
+ targets += misc.o piggy.o sizes.h head.o real2.o firmware.o
++targets += real2.S firmware.c
+
+ KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
+ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+@@ -55,7 +56,8 @@ $(obj)/misc.o: $(obj)/sizes.h
+ CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER
+ $(obj)/vmlinux.lds: $(obj)/sizes.h
+
+-$(obj)/vmlinux.bin: vmlinux
++OBJCOPYFLAGS_vmlinux.bin := -R .comment -R .note -S
++$(obj)/vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+ vmlinux.bin.all-y := $(obj)/vmlinux.bin
+diff --git a/arch/parisc/boot/compressed/vmlinux.lds.S b/arch/parisc/boot/compressed/vmlinux.lds.S
+index bfd7872739a3..2ac3a643f2eb 100644
+--- a/arch/parisc/boot/compressed/vmlinux.lds.S
++++ b/arch/parisc/boot/compressed/vmlinux.lds.S
+@@ -48,8 +48,8 @@ SECTIONS
+ *(.rodata.compressed)
+ }
+
+- /* bootloader code and data starts behind area of extracted kernel */
+- . = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START);
++ /* bootloader code and data starts at least behind area of extracted kernel */
++ . = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START));
+
+ /* align on next page boundary */
+ . = ALIGN(4096);
+diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
+index 0d62be3cba47..74f4555a62ba 100644
+--- a/arch/powerpc/mm/kasan/kasan_init_32.c
++++ b/arch/powerpc/mm/kasan/kasan_init_32.c
+@@ -21,7 +21,7 @@ static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
+ __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
+ }
+
+-static int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
++static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
+ {
+ pmd_t *pmd;
+ unsigned long k_cur, k_next;
+@@ -35,7 +35,10 @@ static int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_
+ if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
+ continue;
+
+- new = pte_alloc_one_kernel(&init_mm);
++ if (slab_is_available())
++ new = pte_alloc_one_kernel(&init_mm);
++ else
++ new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
+
+ if (!new)
+ return -ENOMEM;
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index 5a237e8dbf8d..0de54a1d25c0 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -17,6 +17,7 @@
+ #include "pgtable.h"
+ #include "../string.h"
+ #include "../voffset.h"
++#include <asm/bootparam_utils.h>
+
+ /*
+ * WARNING!!
+diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
+index d2f184165934..c8181392f70d 100644
+--- a/arch/x86/boot/compressed/misc.h
++++ b/arch/x86/boot/compressed/misc.h
+@@ -23,7 +23,6 @@
+ #include <asm/page.h>
+ #include <asm/boot.h>
+ #include <asm/bootparam.h>
+-#include <asm/bootparam_utils.h>
+
+ #define BOOT_CTYPE_H
+ #include <linux/acpi.h>
+diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
+index efb0d1b1f15f..d6f2e29be3e2 100644
+--- a/arch/x86/entry/calling.h
++++ b/arch/x86/entry/calling.h
+@@ -329,6 +329,23 @@ For 32-bit we have the following conventions - kernel is built with
+
+ #endif
+
++/*
++ * Mitigate Spectre v1 for conditional swapgs code paths.
++ *
++ * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
++ * prevent a speculative swapgs when coming from kernel space.
++ *
++ * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
++ * to prevent the swapgs from getting speculatively skipped when coming from
++ * user space.
++ */
++.macro FENCE_SWAPGS_USER_ENTRY
++ ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
++.endm
++.macro FENCE_SWAPGS_KERNEL_ENTRY
++ ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
++.endm
++
+ .macro STACKLEAK_ERASE_NOCLOBBER
+ #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+ PUSH_AND_CLEAR_REGS
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 8dbca86c249b..69808aaa6851 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -519,7 +519,7 @@ ENTRY(interrupt_entry)
+ testb $3, CS-ORIG_RAX+8(%rsp)
+ jz 1f
+ SWAPGS
+-
++ FENCE_SWAPGS_USER_ENTRY
+ /*
+ * Switch to the thread stack. The IRET frame and orig_ax are
+ * on the stack, as well as the return address. RDI..R12 are
+@@ -549,8 +549,10 @@ ENTRY(interrupt_entry)
+ UNWIND_HINT_FUNC
+
+ movq (%rdi), %rdi
++ jmp 2f
+ 1:
+-
++ FENCE_SWAPGS_KERNEL_ENTRY
++2:
+ PUSH_AND_CLEAR_REGS save_ret=1
+ ENCODE_FRAME_POINTER 8
+
+@@ -1171,7 +1173,6 @@ idtentry stack_segment do_stack_segment has_error_code=1
+ #ifdef CONFIG_XEN_PV
+ idtentry xennmi do_nmi has_error_code=0
+ idtentry xendebug do_debug has_error_code=0
+-idtentry xenint3 do_int3 has_error_code=0
+ #endif
+
+ idtentry general_protection do_general_protection has_error_code=1
+@@ -1216,6 +1217,13 @@ ENTRY(paranoid_entry)
+ */
+ SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
+
++ /*
++ * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
++ * unconditional CR3 write, even in the PTI case. So do an lfence
++ * to prevent GS speculation, regardless of whether PTI is enabled.
++ */
++ FENCE_SWAPGS_KERNEL_ENTRY
++
+ ret
+ END(paranoid_entry)
+
+@@ -1266,6 +1274,7 @@ ENTRY(error_entry)
+ * from user mode due to an IRET fault.
+ */
+ SWAPGS
++ FENCE_SWAPGS_USER_ENTRY
+ /* We have user CR3. Change to kernel CR3. */
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+
+@@ -1287,6 +1296,8 @@ ENTRY(error_entry)
+ CALL_enter_from_user_mode
+ ret
+
++.Lerror_entry_done_lfence:
++ FENCE_SWAPGS_KERNEL_ENTRY
+ .Lerror_entry_done:
+ TRACE_IRQS_OFF
+ ret
+@@ -1305,7 +1316,7 @@ ENTRY(error_entry)
+ cmpq %rax, RIP+8(%rsp)
+ je .Lbstep_iret
+ cmpq $.Lgs_change, RIP+8(%rsp)
+- jne .Lerror_entry_done
++ jne .Lerror_entry_done_lfence
+
+ /*
+ * hack: .Lgs_change can fail with user gsbase. If this happens, fix up
+@@ -1313,6 +1324,7 @@ ENTRY(error_entry)
+ * .Lgs_change's error handler with kernel gsbase.
+ */
+ SWAPGS
++ FENCE_SWAPGS_USER_ENTRY
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+ jmp .Lerror_entry_done
+
+@@ -1327,6 +1339,7 @@ ENTRY(error_entry)
+ * gsbase and CR3. Switch to kernel gsbase and CR3:
+ */
+ SWAPGS
++ FENCE_SWAPGS_USER_ENTRY
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+
+ /*
+@@ -1418,6 +1431,7 @@ ENTRY(nmi)
+
+ swapgs
+ cld
++ FENCE_SWAPGS_USER_ENTRY
+ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
+ movq %rsp, %rdx
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 1340fa53b575..2e599384abd8 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -49,7 +49,7 @@ static inline void generic_apic_probe(void)
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+
+-extern unsigned int apic_verbosity;
++extern int apic_verbosity;
+ extern int local_apic_timer_c2_ok;
+
+ extern int disable_apic;
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 1d337c51f7e6..403f70c2e431 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -22,8 +22,8 @@ enum cpuid_leafs
+ CPUID_LNX_3,
+ CPUID_7_0_EBX,
+ CPUID_D_1_EAX,
+- CPUID_F_0_EDX,
+- CPUID_F_1_EDX,
++ CPUID_LNX_4,
++ CPUID_DUMMY,
+ CPUID_8000_0008_EBX,
+ CPUID_6_EAX,
+ CPUID_8000_000A_EDX,
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 1017b9c7dfe0..49a8c25eada4 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -271,13 +271,18 @@
+ #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
+ #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
+
+-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
+-#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
+-
+-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
+-#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
+-#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
+-#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
++/*
++ * Extended auxiliary flags: Linux defined - for features scattered in various
++ * CPUID levels like 0xf, etc.
++ *
++ * Reuse free bits when adding new feature flags!
++ */
++#define X86_FEATURE_CQM_LLC (11*32+ 0) /* LLC QoS if 1 */
++#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */
++#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */
++#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
++#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
++#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
+
+ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
+ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
+@@ -387,5 +392,6 @@
+ #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+ #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
+ #define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
++#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
+
+ #endif /* _ASM_X86_CPUFEATURES_H */
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 08f46951c430..8253925c5e8c 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1491,25 +1491,29 @@ enum {
+ #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
+ #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
+
++asmlinkage void __noreturn kvm_spurious_fault(void);
++
+ /*
+ * Hardware virtualization extension instructions may fault if a
+ * reboot turns off virtualization while processes are running.
+- * Trap the fault and ignore the instruction if that happens.
++ * Usually after catching the fault we just panic; during reboot
++ * instead the instruction is ignored.
+ */
+-asmlinkage void kvm_spurious_fault(void);
+-
+-#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
+- "666: " insn "\n\t" \
+- "668: \n\t" \
+- ".pushsection .fixup, \"ax\" \n" \
+- "667: \n\t" \
+- cleanup_insn "\n\t" \
+- "cmpb $0, kvm_rebooting \n\t" \
+- "jne 668b \n\t" \
+- __ASM_SIZE(push) " $666b \n\t" \
+- "jmp kvm_spurious_fault \n\t" \
+- ".popsection \n\t" \
+- _ASM_EXTABLE(666b, 667b)
++#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
++ "666: \n\t" \
++ insn "\n\t" \
++ "jmp 668f \n\t" \
++ "667: \n\t" \
++ "call kvm_spurious_fault \n\t" \
++ "668: \n\t" \
++ ".pushsection .fixup, \"ax\" \n\t" \
++ "700: \n\t" \
++ cleanup_insn "\n\t" \
++ "cmpb $0, kvm_rebooting\n\t" \
++ "je 667b \n\t" \
++ "jmp 668b \n\t" \
++ ".popsection \n\t" \
++ _ASM_EXTABLE(666b, 700b)
+
+ #define __kvm_handle_fault_on_reboot(insn) \
+ ____kvm_handle_fault_on_reboot(insn, "")
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index c25c38a05c1c..d6f5ae2c79ab 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -746,6 +746,7 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
+ PV_RESTORE_ALL_CALLER_REGS \
+ FRAME_END \
+ "ret;" \
++ ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
+ ".popsection")
+
+ /* Get a reference to a callee-save function */
+diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
+index 7d6f3f3fad78..f2bd284abc16 100644
+--- a/arch/x86/include/asm/traps.h
++++ b/arch/x86/include/asm/traps.h
+@@ -40,7 +40,7 @@ asmlinkage void simd_coprocessor_error(void);
+ asmlinkage void xen_divide_error(void);
+ asmlinkage void xen_xennmi(void);
+ asmlinkage void xen_xendebug(void);
+-asmlinkage void xen_xenint3(void);
++asmlinkage void xen_int3(void);
+ asmlinkage void xen_overflow(void);
+ asmlinkage void xen_bounds(void);
+ asmlinkage void xen_invalid_op(void);
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 16c21ed97cb2..530cf1fd68a2 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -183,7 +183,7 @@ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
+ /*
+ * Debug level, exported for io_apic.c
+ */
+-unsigned int apic_verbosity;
++int apic_verbosity;
+
+ int pic_mode;
+
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 801ecd1c3fd5..c6fa3ef10b4e 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -34,6 +34,7 @@
+
+ #include "cpu.h"
+
++static void __init spectre_v1_select_mitigation(void);
+ static void __init spectre_v2_select_mitigation(void);
+ static void __init ssb_select_mitigation(void);
+ static void __init l1tf_select_mitigation(void);
+@@ -98,17 +99,11 @@ void __init check_bugs(void)
+ if (boot_cpu_has(X86_FEATURE_STIBP))
+ x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
+
+- /* Select the proper spectre mitigation before patching alternatives */
++ /* Select the proper CPU mitigations before patching alternatives: */
++ spectre_v1_select_mitigation();
+ spectre_v2_select_mitigation();
+-
+- /*
+- * Select proper mitigation for any exposure to the Speculative Store
+- * Bypass vulnerability.
+- */
+ ssb_select_mitigation();
+-
+ l1tf_select_mitigation();
+-
+ mds_select_mitigation();
+
+ arch_smt_update();
+@@ -273,6 +268,98 @@ static int __init mds_cmdline(char *str)
+ }
+ early_param("mds", mds_cmdline);
+
++#undef pr_fmt
++#define pr_fmt(fmt) "Spectre V1 : " fmt
++
++enum spectre_v1_mitigation {
++ SPECTRE_V1_MITIGATION_NONE,
++ SPECTRE_V1_MITIGATION_AUTO,
++};
++
++static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
++ SPECTRE_V1_MITIGATION_AUTO;
++
++static const char * const spectre_v1_strings[] = {
++ [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
++ [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
++};
++
++/*
++ * Does SMAP provide full mitigation against speculative kernel access to
++ * userspace?
++ */
++static bool smap_works_speculatively(void)
++{
++ if (!boot_cpu_has(X86_FEATURE_SMAP))
++ return false;
++
++ /*
++ * On CPUs which are vulnerable to Meltdown, SMAP does not
++ * prevent speculative access to user data in the L1 cache.
++ * Consider SMAP to be non-functional as a mitigation on these
++ * CPUs.
++ */
++ if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
++ return false;
++
++ return true;
++}
++
++static void __init spectre_v1_select_mitigation(void)
++{
++ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
++ spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
++ return;
++ }
++
++ if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
++ /*
++ * With Spectre v1, a user can speculatively control either
++ * path of a conditional swapgs with a user-controlled GS
++ * value. The mitigation is to add lfences to both code paths.
++ *
++ * If FSGSBASE is enabled, the user can put a kernel address in
++ * GS, in which case SMAP provides no protection.
++ *
++ * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
++ * FSGSBASE enablement patches have been merged. ]
++ *
++ * If FSGSBASE is disabled, the user can only put a user space
++ * address in GS. That makes an attack harder, but still
++ * possible if there's no SMAP protection.
++ */
++ if (!smap_works_speculatively()) {
++ /*
++ * Mitigation can be provided from SWAPGS itself or
++ * PTI as the CR3 write in the Meltdown mitigation
++ * is serializing.
++ *
++ * If neither is there, mitigate with an LFENCE to
++ * stop speculation through swapgs.
++ */
++ if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
++ !boot_cpu_has(X86_FEATURE_PTI))
++ setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
++
++ /*
++ * Enable lfences in the kernel entry (non-swapgs)
++ * paths, to prevent user entry from speculatively
++ * skipping swapgs.
++ */
++ setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
++ }
++ }
++
++ pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
++}
++
++static int __init nospectre_v1_cmdline(char *str)
++{
++ spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
++ return 0;
++}
++early_param("nospectre_v1", nospectre_v1_cmdline);
++
+ #undef pr_fmt
+ #define pr_fmt(fmt) "Spectre V2 : " fmt
+
+@@ -1290,7 +1377,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
+ break;
+
+ case X86_BUG_SPECTRE_V1:
+- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++ return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
+
+ case X86_BUG_SPECTRE_V2:
+ return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 2c57fffebf9b..3ae218b51eed 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -801,6 +801,30 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
+ }
+ }
+
++static void init_cqm(struct cpuinfo_x86 *c)
++{
++ if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
++ c->x86_cache_max_rmid = -1;
++ c->x86_cache_occ_scale = -1;
++ return;
++ }
++
++ /* will be overridden if occupancy monitoring exists */
++ c->x86_cache_max_rmid = cpuid_ebx(0xf);
++
++ if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
++ cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
++ cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
++ u32 eax, ebx, ecx, edx;
++
++ /* QoS sub-leaf, EAX=0Fh, ECX=1 */
++ cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
++
++ c->x86_cache_max_rmid = ecx;
++ c->x86_cache_occ_scale = ebx;
++ }
++}
++
+ void get_cpu_cap(struct cpuinfo_x86 *c)
+ {
+ u32 eax, ebx, ecx, edx;
+@@ -832,33 +856,6 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
+ c->x86_capability[CPUID_D_1_EAX] = eax;
+ }
+
+- /* Additional Intel-defined flags: level 0x0000000F */
+- if (c->cpuid_level >= 0x0000000F) {
+-
+- /* QoS sub-leaf, EAX=0Fh, ECX=0 */
+- cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
+- c->x86_capability[CPUID_F_0_EDX] = edx;
+-
+- if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
+- /* will be overridden if occupancy monitoring exists */
+- c->x86_cache_max_rmid = ebx;
+-
+- /* QoS sub-leaf, EAX=0Fh, ECX=1 */
+- cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
+- c->x86_capability[CPUID_F_1_EDX] = edx;
+-
+- if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
+- ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
+- (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
+- c->x86_cache_max_rmid = ecx;
+- c->x86_cache_occ_scale = ebx;
+- }
+- } else {
+- c->x86_cache_max_rmid = -1;
+- c->x86_cache_occ_scale = -1;
+- }
+- }
+-
+ /* AMD-defined flags: level 0x80000001 */
+ eax = cpuid_eax(0x80000000);
+ c->extended_cpuid_level = eax;
+@@ -889,6 +886,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
+
+ init_scattered_cpuid_features(c);
+ init_speculation_control(c);
++ init_cqm(c);
+
+ /*
+ * Clear/Set all flags overridden by options, after probe.
+@@ -947,6 +945,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
+ #define NO_L1TF BIT(3)
+ #define NO_MDS BIT(4)
+ #define MSBDS_ONLY BIT(5)
++#define NO_SWAPGS BIT(6)
+
+ #define VULNWL(_vendor, _family, _model, _whitelist) \
+ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
+@@ -973,30 +972,38 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
+
+- VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
+- VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY),
+- VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY),
+- VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
+- VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY),
+- VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY),
++ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
++ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
++ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
++ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
++ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
++ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+
+ VULNWL_INTEL(CORE_YONAH, NO_SSB),
+
+- VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY),
++ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+
+- VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
+- VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
+- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF),
++ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS),
++ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS),
++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS),
++
++ /*
++ * Technically, swapgs isn't serializing on AMD (despite it previously
++ * being documented as such in the APM). But according to AMD, %gs is
++ * updated non-speculatively, and the issuing of %gs-relative memory
++ * operands will be blocked until the %gs update completes, which is
++ * good enough for our purposes.
++ */
+
+ /* AMD Family 0xf - 0x12 */
+- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+
+ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
+- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
++ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
+ {}
+ };
+
+@@ -1033,6 +1040,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
+ }
+
++ if (!cpu_matches(NO_SWAPGS))
++ setup_force_cpu_bug(X86_BUG_SWAPGS);
++
+ if (cpu_matches(NO_MELTDOWN))
+ return;
+
+diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
+index 2c0bd38a44ab..fa07a224e7b9 100644
+--- a/arch/x86/kernel/cpu/cpuid-deps.c
++++ b/arch/x86/kernel/cpu/cpuid-deps.c
+@@ -59,6 +59,9 @@ static const struct cpuid_dep cpuid_deps[] = {
+ { X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F },
+ { X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F },
++ { X86_FEATURE_CQM_OCCUP_LLC, X86_FEATURE_CQM_LLC },
++ { X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC },
++ { X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC },
+ {}
+ };
+
+diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
+index 94aa1c72ca98..adf9b71386ef 100644
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -26,6 +26,10 @@ struct cpuid_bit {
+ static const struct cpuid_bit cpuid_bits[] = {
+ { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
+ { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
++ { X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
++ { X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
++ { X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
++ { X86_FEATURE_CQM_MBM_LOCAL, CPUID_EDX, 2, 0x0000000f, 1 },
+ { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
+ { X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
+ { X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 5169b8cc35bb..320b70acb211 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -817,6 +817,7 @@ asm(
+ "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
+ "setne %al;"
+ "ret;"
++".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
+ ".popsection");
+
+ #endif
+diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
+index 9a327d5b6d1f..d78a61408243 100644
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -47,8 +47,6 @@ static const struct cpuid_reg reverse_cpuid[] = {
+ [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
+ [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
+ [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
+- [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX},
+- [CPUID_F_1_EDX] = { 0xf, 1, CPUID_EDX},
+ [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
+ [CPUID_6_EAX] = { 6, 0, CPUID_EAX},
+ [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 98f6e4f88b04..8d95c81b2c82 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -4593,11 +4593,11 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
+ */
+
+ /* Faults from writes to non-writable pages */
+- u8 wf = (pfec & PFERR_WRITE_MASK) ? ~w : 0;
++ u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
+ /* Faults from user mode accesses to supervisor pages */
+- u8 uf = (pfec & PFERR_USER_MASK) ? ~u : 0;
++ u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
+ /* Faults from fetches of non-executable pages*/
+- u8 ff = (pfec & PFERR_FETCH_MASK) ? ~x : 0;
++ u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
+ /* Faults from kernel mode fetches of user pages */
+ u8 smepf = 0;
+ /* Faults from kernel mode accesses of user pages */
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index ef6575ab60ed..b96723294b2f 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4087,7 +4087,10 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
+ * mode, e.g. a 32-bit address size can yield a 64-bit virtual
+ * address when using FS/GS with a non-zero base.
+ */
+- *ret = s.base + off;
++ if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS)
++ *ret = s.base + off;
++ else
++ *ret = off;
+
+ /* Long mode: #GP(0)/#SS(0) if the memory address is in a
+ * non-canonical form. This is the only check on the memory
+diff --git a/arch/x86/math-emu/fpu_emu.h b/arch/x86/math-emu/fpu_emu.h
+index a5a41ec58072..0c122226ca56 100644
+--- a/arch/x86/math-emu/fpu_emu.h
++++ b/arch/x86/math-emu/fpu_emu.h
+@@ -177,7 +177,7 @@ static inline void reg_copy(FPU_REG const *x, FPU_REG *y)
+ #define setexponentpos(x,y) { (*(short *)&((x)->exp)) = \
+ ((y) + EXTENDED_Ebias) & 0x7fff; }
+ #define exponent16(x) (*(short *)&((x)->exp))
+-#define setexponent16(x,y) { (*(short *)&((x)->exp)) = (y); }
++#define setexponent16(x,y) { (*(short *)&((x)->exp)) = (u16)(y); }
+ #define addexponent(x,y) { (*(short *)&((x)->exp)) += (y); }
+ #define stdexp(x) { (*(short *)&((x)->exp)) += EXTENDED_Ebias; }
+
+diff --git a/arch/x86/math-emu/reg_constant.c b/arch/x86/math-emu/reg_constant.c
+index 8dc9095bab22..742619e94bdf 100644
+--- a/arch/x86/math-emu/reg_constant.c
++++ b/arch/x86/math-emu/reg_constant.c
+@@ -18,7 +18,7 @@
+ #include "control_w.h"
+
+ #define MAKE_REG(s, e, l, h) { l, h, \
+- ((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
++ (u16)((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
+
+ FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
+ #if 0
+diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
+index 4722ba2966ac..30c14cb343fc 100644
+--- a/arch/x86/xen/enlighten_pv.c
++++ b/arch/x86/xen/enlighten_pv.c
+@@ -596,12 +596,12 @@ struct trap_array_entry {
+
+ static struct trap_array_entry trap_array[] = {
+ { debug, xen_xendebug, true },
+- { int3, xen_xenint3, true },
+ { double_fault, xen_double_fault, true },
+ #ifdef CONFIG_X86_MCE
+ { machine_check, xen_machine_check, true },
+ #endif
+ { nmi, xen_xennmi, true },
++ { int3, xen_int3, false },
+ { overflow, xen_overflow, false },
+ #ifdef CONFIG_IA32_EMULATION
+ { entry_INT80_compat, xen_entry_INT80_compat, false },
+diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
+index 1e9ef0ba30a5..ebf610b49c06 100644
+--- a/arch/x86/xen/xen-asm_64.S
++++ b/arch/x86/xen/xen-asm_64.S
+@@ -32,7 +32,6 @@ xen_pv_trap divide_error
+ xen_pv_trap debug
+ xen_pv_trap xendebug
+ xen_pv_trap int3
+-xen_pv_trap xenint3
+ xen_pv_trap xennmi
+ xen_pv_trap overflow
+ xen_pv_trap bounds
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index ad2c565f5cbe..a86a770c9b79 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -17,7 +17,9 @@
+
+ #include "internal.h"
+
++#ifdef CONFIG_DMI
+ static const struct dmi_system_id acpi_rev_dmi_table[] __initconst;
++#endif
+
+ /*
+ * POLICY: If *anything* doesn't work, put it on the blacklist.
+@@ -61,7 +63,9 @@ int __init acpi_blacklisted(void)
+ }
+
+ (void)early_acpi_osi_init();
++#ifdef CONFIG_DMI
+ dmi_check_system(acpi_rev_dmi_table);
++#endif
+
+ return blacklisted;
+ }
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index f11b7dc16e9d..430d31499ce9 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -932,6 +932,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
+ struct file *file;
+ struct inode *inode;
+ struct address_space *mapping;
++ struct block_device *claimed_bdev = NULL;
+ int lo_flags = 0;
+ int error;
+ loff_t size;
+@@ -950,10 +951,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
+ * here to avoid changing device under exclusive owner.
+ */
+ if (!(mode & FMODE_EXCL)) {
+- bdgrab(bdev);
+- error = blkdev_get(bdev, mode | FMODE_EXCL, loop_set_fd);
+- if (error)
++ claimed_bdev = bd_start_claiming(bdev, loop_set_fd);
++ if (IS_ERR(claimed_bdev)) {
++ error = PTR_ERR(claimed_bdev);
+ goto out_putf;
++ }
+ }
+
+ error = mutex_lock_killable(&loop_ctl_mutex);
+@@ -1023,15 +1025,15 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
+ mutex_unlock(&loop_ctl_mutex);
+ if (partscan)
+ loop_reread_partitions(lo, bdev);
+- if (!(mode & FMODE_EXCL))
+- blkdev_put(bdev, mode | FMODE_EXCL);
++ if (claimed_bdev)
++ bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
+ return 0;
+
+ out_unlock:
+ mutex_unlock(&loop_ctl_mutex);
+ out_bdev:
+- if (!(mode & FMODE_EXCL))
+- blkdev_put(bdev, mode | FMODE_EXCL);
++ if (claimed_bdev)
++ bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
+ out_putf:
+ fput(file);
+ out:
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 3a9bca3aa093..57aebc6e1c28 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -1229,7 +1229,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
+ struct block_device *bdev)
+ {
+ sock_shutdown(nbd);
+- kill_bdev(bdev);
++ __invalidate_device(bdev, true);
+ nbd_bdev_reset(bdev);
+ if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
+ &nbd->config->runtime_flags))
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index d47ad10a35fe..bf868260f435 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -77,6 +77,18 @@ static int tpm_go_idle(struct tpm_chip *chip)
+ return chip->ops->go_idle(chip);
+ }
+
++static void tpm_clk_enable(struct tpm_chip *chip)
++{
++ if (chip->ops->clk_enable)
++ chip->ops->clk_enable(chip, true);
++}
++
++static void tpm_clk_disable(struct tpm_chip *chip)
++{
++ if (chip->ops->clk_enable)
++ chip->ops->clk_enable(chip, false);
++}
++
+ /**
+ * tpm_chip_start() - power on the TPM
+ * @chip: a TPM chip to use
+@@ -89,13 +101,12 @@ int tpm_chip_start(struct tpm_chip *chip)
+ {
+ int ret;
+
+- if (chip->ops->clk_enable)
+- chip->ops->clk_enable(chip, true);
++ tpm_clk_enable(chip);
+
+ if (chip->locality == -1) {
+ ret = tpm_request_locality(chip);
+ if (ret) {
+- chip->ops->clk_enable(chip, false);
++ tpm_clk_disable(chip);
+ return ret;
+ }
+ }
+@@ -103,8 +114,7 @@ int tpm_chip_start(struct tpm_chip *chip)
+ ret = tpm_cmd_ready(chip);
+ if (ret) {
+ tpm_relinquish_locality(chip);
+- if (chip->ops->clk_enable)
+- chip->ops->clk_enable(chip, false);
++ tpm_clk_disable(chip);
+ return ret;
+ }
+
+@@ -124,8 +134,7 @@ void tpm_chip_stop(struct tpm_chip *chip)
+ {
+ tpm_go_idle(chip);
+ tpm_relinquish_locality(chip);
+- if (chip->ops->clk_enable)
+- chip->ops->clk_enable(chip, false);
++ tpm_clk_disable(chip);
+ }
+ EXPORT_SYMBOL_GPL(tpm_chip_stop);
+
+diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
+index 9d8651033ae9..bc01611c7723 100644
+--- a/drivers/clk/mediatek/clk-mt8183.c
++++ b/drivers/clk/mediatek/clk-mt8183.c
+@@ -25,9 +25,11 @@ static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000),
+ };
+
++static const struct mtk_fixed_factor top_early_divs[] = {
++ FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, 2),
++};
++
+ static const struct mtk_fixed_factor top_divs[] = {
+- FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1,
+- 2),
+ FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1,
+@@ -1167,37 +1169,57 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ }
+
++static struct clk_onecell_data *top_clk_data;
++
++static void clk_mt8183_top_init_early(struct device_node *node)
++{
++ int i;
++
++ top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
++
++ for (i = 0; i < CLK_TOP_NR_CLK; i++)
++ top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
++
++ mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
++ top_clk_data);
++
++ of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
++}
++
++CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen",
++ clk_mt8183_top_init_early);
++
+ static int clk_mt8183_top_probe(struct platform_device *pdev)
+ {
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ void __iomem *base;
+- struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+-
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+- clk_data);
++ top_clk_data);
++
++ mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
++ top_clk_data);
+
+- mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
++ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+
+ mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
+- node, &mt8183_clk_lock, clk_data);
++ node, &mt8183_clk_lock, top_clk_data);
+
+ mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
+- base, &mt8183_clk_lock, clk_data);
++ base, &mt8183_clk_lock, top_clk_data);
+
+ mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
+- base, &mt8183_clk_lock, clk_data);
++ base, &mt8183_clk_lock, top_clk_data);
+
+ mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+- clk_data);
++ top_clk_data);
+
+- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
++ return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+ }
+
+ static int clk_mt8183_infra_probe(struct platform_device *pdev)
+diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
+index f76850d99e59..d3f42e086431 100644
+--- a/drivers/clk/meson/clk-mpll.c
++++ b/drivers/clk/meson/clk-mpll.c
+@@ -119,9 +119,12 @@ static int mpll_set_rate(struct clk_hw *hw,
+ meson_parm_write(clk->map, &mpll->sdm, sdm);
+ meson_parm_write(clk->map, &mpll->sdm_en, 1);
+
+- /* Set additional fractional part enable if required */
+- if (MESON_PARM_APPLICABLE(&mpll->ssen))
+- meson_parm_write(clk->map, &mpll->ssen, 1);
++ /* Set spread spectrum if possible */
++ if (MESON_PARM_APPLICABLE(&mpll->ssen)) {
++ unsigned int ss =
++ mpll->flags & CLK_MESON_MPLL_SPREAD_SPECTRUM ? 1 : 0;
++ meson_parm_write(clk->map, &mpll->ssen, ss);
++ }
+
+ /* Set the integer divider part */
+ meson_parm_write(clk->map, &mpll->n2, n2);
+diff --git a/drivers/clk/meson/clk-mpll.h b/drivers/clk/meson/clk-mpll.h
+index cf79340006dd..0f948430fed4 100644
+--- a/drivers/clk/meson/clk-mpll.h
++++ b/drivers/clk/meson/clk-mpll.h
+@@ -23,6 +23,7 @@ struct meson_clk_mpll_data {
+ };
+
+ #define CLK_MESON_MPLL_ROUND_CLOSEST BIT(0)
++#define CLK_MESON_MPLL_SPREAD_SPECTRUM BIT(1)
+
+ extern const struct clk_ops meson_clk_mpll_ro_ops;
+ extern const struct clk_ops meson_clk_mpll_ops;
+diff --git a/drivers/clk/sprd/sc9860-clk.c b/drivers/clk/sprd/sc9860-clk.c
+index 9980ab55271b..f76305b4bc8d 100644
+--- a/drivers/clk/sprd/sc9860-clk.c
++++ b/drivers/clk/sprd/sc9860-clk.c
+@@ -2023,6 +2023,7 @@ static int sc9860_clk_probe(struct platform_device *pdev)
+ {
+ const struct of_device_id *match;
+ const struct sprd_clk_desc *desc;
++ int ret;
+
+ match = of_match_node(sprd_sc9860_clk_ids, pdev->dev.of_node);
+ if (!match) {
+@@ -2031,7 +2032,9 @@ static int sc9860_clk_probe(struct platform_device *pdev)
+ }
+
+ desc = match->data;
+- sprd_clk_regmap_init(pdev, desc);
++ ret = sprd_clk_regmap_init(pdev, desc);
++ if (ret)
++ return ret;
+
+ return sprd_clk_probe(&pdev->dev, desc->hw_clks);
+ }
+diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
+index ac1d27a8c650..e5470a6bbf55 100644
+--- a/drivers/clk/tegra/clk-tegra210.c
++++ b/drivers/clk/tegra/clk-tegra210.c
+@@ -2204,9 +2204,9 @@ static struct div_nmp pllu_nmp = {
+ };
+
+ static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
+- { 12000000, 480000000, 40, 1, 0, 0 },
+- { 13000000, 480000000, 36, 1, 0, 0 }, /* actual: 468.0 MHz */
+- { 38400000, 480000000, 25, 2, 0, 0 },
++ { 12000000, 480000000, 40, 1, 1, 0 },
++ { 13000000, 480000000, 36, 1, 1, 0 }, /* actual: 468.0 MHz */
++ { 38400000, 480000000, 25, 2, 1, 0 },
+ { 0, 0, 0, 0, 0, 0 },
+ };
+
+@@ -3333,6 +3333,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
+ { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
+ { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
+ { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
++ { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
+ { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
+ { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
+ { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
+@@ -3357,7 +3358,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
+ { TEGRA210_CLK_PLL_DP, TEGRA210_CLK_CLK_MAX, 270000000, 0 },
+ { TEGRA210_CLK_SOC_THERM, TEGRA210_CLK_PLL_P, 51000000, 0 },
+ { TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 },
+- { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
+ { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
+ { TEGRA210_CLK_SPDIF_IN_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA210_CLK_I2S0_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index de5a8ca70d3d..6b17d179ef8a 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -24,10 +24,6 @@
+ #include "sp-dev.h"
+ #include "psp-dev.h"
+
+-#define SEV_VERSION_GREATER_OR_EQUAL(_maj, _min) \
+- ((psp_master->api_major) >= _maj && \
+- (psp_master->api_minor) >= _min)
+-
+ #define DEVICE_NAME "sev"
+ #define SEV_FW_FILE "amd/sev.fw"
+ #define SEV_FW_NAME_SIZE 64
+@@ -47,6 +43,15 @@ MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during
+ static bool psp_dead;
+ static int psp_timeout;
+
++static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
++{
++ if (psp_master->api_major > maj)
++ return true;
++ if (psp_master->api_major == maj && psp_master->api_minor >= min)
++ return true;
++ return false;
++}
++
+ static struct psp_device *psp_alloc_struct(struct sp_device *sp)
+ {
+ struct device *dev = sp->dev;
+@@ -588,7 +593,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+ int ret;
+
+ /* SEV GET_ID is available from SEV API v0.16 and up */
+- if (!SEV_VERSION_GREATER_OR_EQUAL(0, 16))
++ if (!sev_version_greater_or_equal(0, 16))
+ return -ENOTSUPP;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+@@ -651,7 +656,7 @@ static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
+ int ret;
+
+ /* SEV GET_ID available from SEV API v0.16 and up */
+- if (!SEV_VERSION_GREATER_OR_EQUAL(0, 16))
++ if (!sev_version_greater_or_equal(0, 16))
+ return -ENOTSUPP;
+
+ /* SEV FW expects the buffer it fills with the ID to be
+@@ -1053,7 +1058,7 @@ void psp_pci_init(void)
+ psp_master->sev_state = SEV_STATE_UNINIT;
+ }
+
+- if (SEV_VERSION_GREATER_OR_EQUAL(0, 15) &&
++ if (sev_version_greater_or_equal(0, 15) &&
+ sev_update_firmware(psp_master->dev) == 0)
+ sev_get_api_version();
+
+diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
+index a02318c6d28a..4c0131857133 100644
+--- a/drivers/dax/kmem.c
++++ b/drivers/dax/kmem.c
+@@ -66,8 +66,11 @@ int dev_dax_kmem_probe(struct device *dev)
+ new_res->name = dev_name(dev);
+
+ rc = add_memory(numa_node, new_res->start, resource_size(new_res));
+- if (rc)
++ if (rc) {
++ release_resource(new_res);
++ kfree(new_res);
+ return rc;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
+index 33ab1b607e2b..54de669c38b8 100644
+--- a/drivers/dma/sh/rcar-dmac.c
++++ b/drivers/dma/sh/rcar-dmac.c
+@@ -1165,7 +1165,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+
+ /* Someone calling slave DMA on a generic channel? */
+- if (rchan->mid_rid < 0 || !sg_len) {
++ if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
+ dev_warn(chan->device->dev,
+ "%s: bad parameter: len=%d, id=%d\n",
+ __func__, sg_len, rchan->mid_rid);
+diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
+index ef317c90fbe1..79e9593815f1 100644
+--- a/drivers/dma/tegra20-apb-dma.c
++++ b/drivers/dma/tegra20-apb-dma.c
+@@ -977,8 +977,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
+ csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ }
+
+- if (flags & DMA_PREP_INTERRUPT)
++ if (flags & DMA_PREP_INTERRUPT) {
+ csr |= TEGRA_APBDMA_CSR_IE_EOC;
++ } else {
++ WARN_ON_ONCE(1);
++ return NULL;
++ }
+
+ apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
+
+@@ -1120,8 +1124,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
+ csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ }
+
+- if (flags & DMA_PREP_INTERRUPT)
++ if (flags & DMA_PREP_INTERRUPT) {
+ csr |= TEGRA_APBDMA_CSR_IE_EOC;
++ } else {
++ WARN_ON_ONCE(1);
++ return NULL;
++ }
+
+ apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
+
+diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
+index 08c85099d4d0..f3659443f8c2 100644
+--- a/drivers/firmware/psci/psci_checker.c
++++ b/drivers/firmware/psci/psci_checker.c
+@@ -359,16 +359,16 @@ static int suspend_test_thread(void *arg)
+ for (;;) {
+ /* Needs to be set first to avoid missing a wakeup. */
+ set_current_state(TASK_INTERRUPTIBLE);
+- if (kthread_should_stop()) {
+- __set_current_state(TASK_RUNNING);
++ if (kthread_should_park())
+ break;
+- }
+ schedule();
+ }
+
+ pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
+ cpu, nb_suspend, nb_shallow_sleep, nb_err);
+
++ kthread_parkme();
++
+ return nb_err;
+ }
+
+@@ -433,8 +433,10 @@ static int suspend_tests(void)
+
+
+ /* Stop and destroy all threads, get return status. */
+- for (i = 0; i < nb_threads; ++i)
++ for (i = 0; i < nb_threads; ++i) {
++ err += kthread_park(threads[i]);
+ err += kthread_stop(threads[i]);
++ }
+ out:
+ cpuidle_resume_and_unlock();
+ kfree(threads);
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index bb3104d2eb0c..4f333d6f2e23 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -956,9 +956,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
+ }
+
+ if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
+- irqflags |= IRQF_TRIGGER_RISING;
++ irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
++ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
+ if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
+- irqflags |= IRQF_TRIGGER_FALLING;
++ irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
++ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
+ irqflags |= IRQF_ONESHOT;
+
+ INIT_KFIFO(le->events);
+@@ -1392,12 +1394,17 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
+ for (i = 0; i < chip->ngpio; i++) {
+ struct gpio_desc *desc = &gdev->descs[i];
+
+- if (chip->get_direction && gpiochip_line_is_valid(chip, i))
+- desc->flags = !chip->get_direction(chip, i) ?
+- (1 << FLAG_IS_OUT) : 0;
+- else
+- desc->flags = !chip->direction_input ?
+- (1 << FLAG_IS_OUT) : 0;
++ if (chip->get_direction && gpiochip_line_is_valid(chip, i)) {
++ if (!chip->get_direction(chip, i))
++ set_bit(FLAG_IS_OUT, &desc->flags);
++ else
++ clear_bit(FLAG_IS_OUT, &desc->flags);
++ } else {
++ if (!chip->direction_input)
++ set_bit(FLAG_IS_OUT, &desc->flags);
++ else
++ clear_bit(FLAG_IS_OUT, &desc->flags);
++ }
+ }
+
+ acpi_gpiochip_add(chip);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index eac7186e4f08..12142d13f22f 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -2034,6 +2034,9 @@ enum dc_status resource_map_pool_resources(
+ if (context->streams[i] == stream) {
+ context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
+ context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->id;
++ context->stream_status[i].audio_inst =
++ pipe_ctx->stream_res.audio ? pipe_ctx->stream_res.audio->inst : -1;
++
+ return DC_OK;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+index 189bdab929a5..c20803b71fa5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
+@@ -42,6 +42,7 @@ struct dc_stream_status {
+ int primary_otg_inst;
+ int stream_enc_inst;
+ int plane_count;
++ int audio_inst;
+ struct timing_sync_info timing_sync_info;
+ struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
+ };
+diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
+index a68addf95c23..4a7cf8646b0d 100644
+--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
++++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
+@@ -1904,6 +1904,18 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
+
+ entry = __gvt_cache_find_gfn(info->vgpu, gfn);
+ if (!entry) {
++ ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
++ if (ret)
++ goto err_unlock;
++
++ ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
++ if (ret)
++ goto err_unmap;
++ } else if (entry->size != size) {
++ /* the same gfn with different size: unmap and re-map */
++ gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
++ __gvt_cache_remove_entry(vgpu, entry);
++
+ ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
+ if (ret)
+ goto err_unlock;
+diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
+index dc4ce694c06a..235aedc62b4c 100644
+--- a/drivers/gpu/drm/i915/i915_perf.c
++++ b/drivers/gpu/drm/i915/i915_perf.c
+@@ -3457,9 +3457,13 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
+ dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
+ dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
+
+- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
+- dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+-
++ if (IS_GEN(dev_priv, 10)) {
++ dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
++ dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
++ } else {
++ dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124;
++ dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e;
++ }
+ dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
+ }
+ }
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index 4b1650f51955..847b7866137d 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -775,7 +775,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
+ drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
+ connector->display_info.bpc * 3);
+
+- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
++ if (crtc_state->mode_changed) {
+ slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
+ mstc->port,
+ asyh->dp.pbn);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 4116ee62adaf..f69ff22beee0 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -252,7 +252,7 @@ nouveau_conn_reset(struct drm_connector *connector)
+ return;
+
+ if (connector->state)
+- __drm_atomic_helper_connector_destroy_state(connector->state);
++ nouveau_conn_atomic_destroy_state(connector, connector->state);
+ __drm_atomic_helper_connector_reset(connector, &asyc->state);
+ asyc->dither.mode = DITHERING_MODE_AUTO;
+ asyc->dither.depth = DITHERING_DEPTH_AUTO;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
+index 40c47d6a7d78..745e197a4775 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
+@@ -385,9 +385,10 @@ nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
+ ret = nouveau_dmem_chunk_alloc(drm);
+ if (ret) {
+ if (c)
+- break;
++ return 0;
+ return ret;
+ }
++ mutex_lock(&drm->dmem->mutex);
+ continue;
+ }
+
+diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
+index 8d55cdd69ff4..435c7d7377a3 100644
+--- a/drivers/i2c/busses/i2c-at91-core.c
++++ b/drivers/i2c/busses/i2c-at91-core.c
+@@ -142,7 +142,7 @@ static struct at91_twi_pdata sama5d4_config = {
+
+ static struct at91_twi_pdata sama5d2_config = {
+ .clk_max_div = 7,
+- .clk_offset = 4,
++ .clk_offset = 3,
+ .has_unre_flag = true,
+ .has_alt_cmd = true,
+ .has_hold_field = true,
+diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
+index e87232f2e708..a3fcc35ffd3b 100644
+--- a/drivers/i2c/busses/i2c-at91-master.c
++++ b/drivers/i2c/busses/i2c-at91-master.c
+@@ -122,9 +122,11 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
+ writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
+
+ /* send stop when last byte has been written */
+- if (--dev->buf_len == 0)
++ if (--dev->buf_len == 0) {
+ if (!dev->use_alt_cmd)
+ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
++ at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
++ }
+
+ dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
+
+@@ -542,9 +544,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ } else {
+ at91_twi_write_next_byte(dev);
+ at91_twi_write(dev, AT91_TWI_IER,
+- AT91_TWI_TXCOMP |
+- AT91_TWI_NACK |
+- AT91_TWI_TXRDY);
++ AT91_TWI_TXCOMP | AT91_TWI_NACK |
++ (dev->buf_len ? AT91_TWI_TXRDY : 0));
+ }
+ }
+
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index a845b8decac8..ad1681872e39 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -403,16 +403,18 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c)
+ {
+ struct i2c_msg *msg = iproc_i2c->msg;
++ uint32_t val;
+
+ /* Read valid data from RX FIFO */
+ while (iproc_i2c->rx_bytes < msg->len) {
+- if (!((iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET) >> M_FIFO_RX_CNT_SHIFT)
+- & M_FIFO_RX_CNT_MASK))
++ val = iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET);
++
++ /* rx fifo empty */
++ if (!((val >> M_RX_STATUS_SHIFT) & M_RX_STATUS_MASK))
+ break;
+
+ msg->buf[iproc_i2c->rx_bytes] =
+- (iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET) >>
+- M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
++ (val >> M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
+ iproc_i2c->rx_bytes++;
+ }
+ }
+diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
+index 29f7b15c81d9..156d210de195 100644
+--- a/drivers/infiniband/core/device.c
++++ b/drivers/infiniband/core/device.c
+@@ -98,6 +98,12 @@ static LIST_HEAD(client_list);
+ static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
+ static DECLARE_RWSEM(clients_rwsem);
+
++static void ib_client_put(struct ib_client *client)
++{
++ if (refcount_dec_and_test(&client->uses))
++ complete(&client->uses_zero);
++}
++
+ /*
+ * If client_data is registered then the corresponding client must also still
+ * be registered.
+@@ -650,6 +656,14 @@ static int add_client_context(struct ib_device *device,
+ return 0;
+
+ down_write(&device->client_data_rwsem);
++ /*
++ * So long as the client is registered hold both the client and device
++ * unregistration locks.
++ */
++ if (!refcount_inc_not_zero(&client->uses))
++ goto out_unlock;
++ refcount_inc(&device->refcount);
++
+ /*
+ * Another caller to add_client_context got here first and has already
+ * completely initialized context.
+@@ -673,6 +687,9 @@ static int add_client_context(struct ib_device *device,
+ return 0;
+
+ out:
++ ib_device_put(device);
++ ib_client_put(client);
++out_unlock:
+ up_write(&device->client_data_rwsem);
+ return ret;
+ }
+@@ -692,7 +709,7 @@ static void remove_client_context(struct ib_device *device,
+ client_data = xa_load(&device->client_data, client_id);
+ xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
+ client = xa_load(&clients, client_id);
+- downgrade_write(&device->client_data_rwsem);
++ up_write(&device->client_data_rwsem);
+
+ /*
+ * Notice we cannot be holding any exclusive locks when calling the
+@@ -702,17 +719,13 @@ static void remove_client_context(struct ib_device *device,
+ *
+ * For this reason clients and drivers should not call the
+ * unregistration functions will holdling any locks.
+- *
+- * It tempting to drop the client_data_rwsem too, but this is required
+- * to ensure that unregister_client does not return until all clients
+- * are completely unregistered, which is required to avoid module
+- * unloading races.
+ */
+ if (client->remove)
+ client->remove(device, client_data);
+
+ xa_erase(&device->client_data, client_id);
+- up_read(&device->client_data_rwsem);
++ ib_device_put(device);
++ ib_client_put(client);
+ }
+
+ static int alloc_port_data(struct ib_device *device)
+@@ -1696,6 +1709,8 @@ int ib_register_client(struct ib_client *client)
+ unsigned long index;
+ int ret;
+
++ refcount_set(&client->uses, 1);
++ init_completion(&client->uses_zero);
+ ret = assign_client_id(client);
+ if (ret)
+ return ret;
+@@ -1731,16 +1746,29 @@ void ib_unregister_client(struct ib_client *client)
+ unsigned long index;
+
+ down_write(&clients_rwsem);
++ ib_client_put(client);
+ xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
+ up_write(&clients_rwsem);
++
++ /* We do not want to have locks while calling client->remove() */
++ rcu_read_lock();
++ xa_for_each (&devices, index, device) {
++ if (!ib_device_try_get(device))
++ continue;
++ rcu_read_unlock();
++
++ remove_client_context(device, client->client_id);
++
++ ib_device_put(device);
++ rcu_read_lock();
++ }
++ rcu_read_unlock();
++
+ /*
+- * Every device still known must be serialized to make sure we are
+- * done with the client callbacks before we return.
++ * remove_client_context() is not a fence, it can return even though a
++ * removal is ongoing. Wait until all removals are completed.
+ */
+- down_read(&devices_rwsem);
+- xa_for_each (&devices, index, device)
+- remove_client_context(device, client->client_id);
+- up_read(&devices_rwsem);
++ wait_for_completion(&client->uses_zero);
+
+ down_write(&clients_rwsem);
+ list_del(&client->list);
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 2c3685faa57a..a4a9f90f2482 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -308,6 +308,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+ struct bnxt_qplib_gid *gid_to_del;
++ u16 vlan_id = 0xFFFF;
+
+ /* Delete the entry from the hardware */
+ ctx = *context;
+@@ -317,7 +318,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
+ if (sgid_tbl && sgid_tbl->active) {
+ if (ctx->idx >= sgid_tbl->max)
+ return -EINVAL;
+- gid_to_del = &sgid_tbl->tbl[ctx->idx];
++ gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
++ vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
+ /* DEL_GID is called in WQ context(netdevice_event_work_handler)
+ * or via the ib_unregister_device path. In the former case QP1
+ * may not be destroyed yet, in which case just return as FW
+@@ -335,7 +337,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
+ }
+ ctx->refcnt--;
+ if (!ctx->refcnt) {
+- rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
++ rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
++ vlan_id, true);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to remove GID: %#x", rc);
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index 37928b1111df..bdbde8e22420 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -488,7 +488,7 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ u16 max)
+ {
+- sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
++ sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
+ if (!sgid_tbl->tbl)
+ return -ENOMEM;
+
+@@ -526,9 +526,10 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
+ for (i = 0; i < sgid_tbl->max; i++) {
+ if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
+ sizeof(bnxt_qplib_gid_zero)))
+- bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
++ bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
++ sgid_tbl->tbl[i].vlan_id, true);
+ }
+- memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
++ memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
+ memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
+ memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
+ sgid_tbl->active = 0;
+@@ -537,7 +538,11 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
+ static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct net_device *netdev)
+ {
+- memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
++ u32 i;
++
++ for (i = 0; i < sgid_tbl->max; i++)
++ sgid_tbl->tbl[i].vlan_id = 0xffff;
++
+ memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
+ }
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+index 30c42c92fac7..fbda11a7ab1a 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+@@ -111,7 +111,7 @@ struct bnxt_qplib_pd_tbl {
+ };
+
+ struct bnxt_qplib_sgid_tbl {
+- struct bnxt_qplib_gid *tbl;
++ struct bnxt_qplib_gid_info *tbl;
+ u16 *hw_id;
+ u16 max;
+ u16 active;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+index 48793d3512ac..40296b97d21e 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+@@ -213,12 +213,12 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
+ index, sgid_tbl->max);
+ return -EINVAL;
+ }
+- memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid));
++ memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
+ return 0;
+ }
+
+ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+- struct bnxt_qplib_gid *gid, bool update)
++ struct bnxt_qplib_gid *gid, u16 vlan_id, bool update)
+ {
+ struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
+ struct bnxt_qplib_res,
+@@ -236,7 +236,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ return -ENOMEM;
+ }
+ for (index = 0; index < sgid_tbl->max; index++) {
+- if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid)))
++ if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
++ vlan_id == sgid_tbl->tbl[index].vlan_id)
+ break;
+ }
+ if (index == sgid_tbl->max) {
+@@ -262,8 +263,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ if (rc)
+ return rc;
+ }
+- memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
++ memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
+ sizeof(bnxt_qplib_gid_zero));
++ sgid_tbl->tbl[index].vlan_id = 0xFFFF;
+ sgid_tbl->vlan[index] = 0;
+ sgid_tbl->active--;
+ dev_dbg(&res->pdev->dev,
+@@ -296,7 +298,8 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ }
+ free_idx = sgid_tbl->max;
+ for (i = 0; i < sgid_tbl->max; i++) {
+- if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
++ if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
++ sgid_tbl->tbl[i].vlan_id == vlan_id) {
+ dev_dbg(&res->pdev->dev,
+ "SGID entry already exist in entry %d!\n", i);
+ *index = i;
+@@ -351,6 +354,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ }
+ /* Add GID to the sgid_tbl */
+ memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
++ sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
+ sgid_tbl->active++;
+ if (vlan_id != 0xFFFF)
+ sgid_tbl->vlan[free_idx] = 1;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+index 0ec3b12b0bcd..13d9432d5ce2 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+@@ -84,6 +84,11 @@ struct bnxt_qplib_gid {
+ u8 data[16];
+ };
+
++struct bnxt_qplib_gid_info {
++ struct bnxt_qplib_gid gid;
++ u16 vlan_id;
++};
++
+ struct bnxt_qplib_ah {
+ struct bnxt_qplib_gid dgid;
+ struct bnxt_qplib_pd *pd;
+@@ -221,7 +226,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
+ struct bnxt_qplib_gid *gid);
+ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+- struct bnxt_qplib_gid *gid, bool update);
++ struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
+ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
+ bool update, u32 *index);
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
+index d5b643a1d9fd..67052dc3100c 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -14452,7 +14452,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
+ clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
+ }
+
+-static void init_rxe(struct hfi1_devdata *dd)
++static int init_rxe(struct hfi1_devdata *dd)
+ {
+ struct rsm_map_table *rmt;
+ u64 val;
+@@ -14461,6 +14461,9 @@ static void init_rxe(struct hfi1_devdata *dd)
+ write_csr(dd, RCV_ERR_MASK, ~0ull);
+
+ rmt = alloc_rsm_map_table(dd);
++ if (!rmt)
++ return -ENOMEM;
++
+ /* set up QOS, including the QPN map table */
+ init_qos(dd, rmt);
+ init_fecn_handling(dd, rmt);
+@@ -14487,6 +14490,7 @@ static void init_rxe(struct hfi1_devdata *dd)
+ val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
+ RCV_BYPASS_HDR_SIZE_SHIFT);
+ write_csr(dd, RCV_BYPASS, val);
++ return 0;
+ }
+
+ static void init_other(struct hfi1_devdata *dd)
+@@ -15024,7 +15028,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
+ goto bail_cleanup;
+
+ /* set initial RXE CSRs */
+- init_rxe(dd);
++ ret = init_rxe(dd);
++ if (ret)
++ goto bail_cleanup;
++
+ /* set initial TXE CSRs */
+ init_txe(dd);
+ /* set initial non-RXE, non-TXE CSRs */
+diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
+index aa9c8d3ef87b..fe7e7097e00a 100644
+--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
++++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
+@@ -1620,6 +1620,7 @@ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
+ flows[i].req = req;
+ flows[i].npagesets = 0;
+ flows[i].pagesets[0].mapped = 0;
++ flows[i].resync_npkts = 0;
+ }
+ req->flows = flows;
+ return 0;
+@@ -1673,34 +1674,6 @@ static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
+ return NULL;
+ }
+
+-static struct tid_rdma_flow *
+-__find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail,
+- u32 psn, u16 *fidx)
+-{
+- for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
+- tail = CIRC_NEXT(tail, MAX_FLOWS)) {
+- struct tid_rdma_flow *flow = &req->flows[tail];
+- u32 spsn, lpsn;
+-
+- spsn = full_flow_psn(flow, flow->flow_state.spsn);
+- lpsn = full_flow_psn(flow, flow->flow_state.lpsn);
+-
+- if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) {
+- if (fidx)
+- *fidx = tail;
+- return flow;
+- }
+- }
+- return NULL;
+-}
+-
+-static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req,
+- u32 psn, u16 *fidx)
+-{
+- return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn,
+- fidx);
+-}
+-
+ /* TID RDMA READ functions */
+ u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
+ struct ib_other_headers *ohdr, u32 *bth1,
+@@ -2790,19 +2763,7 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
+ * to prevent continuous Flow Sequence errors for any
+ * packets that could be still in the fabric.
+ */
+- flow = find_flow(req, psn, NULL);
+- if (!flow) {
+- /*
+- * We can't find the IB PSN matching the
+- * received KDETH PSN. The only thing we can
+- * do at this point is report the error to
+- * the QP.
+- */
+- hfi1_kern_read_tid_flow_free(qp);
+- spin_unlock(&qp->s_lock);
+- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
+- return ret;
+- }
++ flow = &req->flows[req->clear_tail];
+ if (priv->s_flags & HFI1_R_TID_SW_PSN) {
+ diff = cmp_psn(psn,
+ flow->flow_state.r_next_psn);
+diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
+index bad3229bad37..27f86b436b9e 100644
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -54,6 +54,7 @@
+ #include <linux/mm.h>
+ #include <linux/vmalloc.h>
+ #include <rdma/opa_addr.h>
++#include <linux/nospec.h>
+
+ #include "hfi.h"
+ #include "common.h"
+@@ -1536,6 +1537,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
+ sl = rdma_ah_get_sl(ah_attr);
+ if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
+ return -EINVAL;
++ sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc));
+
+ sc5 = ibp->sl_to_sc[sl];
+ if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
+diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+index 40eb8be482e4..f52b845f2f7b 100644
+--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
+@@ -480,6 +480,7 @@ struct mlx5_umr_wr {
+ u64 length;
+ int access_flags;
+ u32 mkey;
++ u8 ignore_free_state:1;
+ };
+
+ static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 5f09699fab98..e54bec2c2965 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -545,13 +545,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+ return;
+
+ c = order2idx(dev, mr->order);
+- if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
+- mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
+- return;
+- }
++ WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
+
+- if (unreg_umr(dev, mr))
++ if (unreg_umr(dev, mr)) {
++ mr->allocated_from_cache = false;
++ destroy_mkey(dev, mr);
++ ent = &cache->ent[c];
++ if (ent->cur < ent->limit)
++ queue_work(cache->wq, &ent->work);
+ return;
++ }
+
+ ent = &cache->ent[c];
+ spin_lock_irq(&ent->lock);
+@@ -1373,9 +1376,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+ return 0;
+
+ umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
+- MLX5_IB_SEND_UMR_FAIL_IF_FREE;
++ MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
+ umrwr.wr.opcode = MLX5_IB_WR_UMR;
++ umrwr.pd = dev->umrc.pd;
+ umrwr.mkey = mr->mmkey.key;
++ umrwr.ignore_free_state = 1;
+
+ return mlx5_ib_post_send_wait(dev, &umrwr);
+ }
+@@ -1578,10 +1583,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+ mr->sig = NULL;
+ }
+
+- mlx5_free_priv_descs(mr);
+-
+- if (!allocated_from_cache)
++ if (!allocated_from_cache) {
+ destroy_mkey(dev, mr);
++ mlx5_free_priv_descs(mr);
++ }
+ }
+
+ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index f6623c77443a..6dbca72a73b1 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -1718,7 +1718,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ }
+
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
+- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ memcpy(rss_key, ucmd.rx_hash_key, len);
+ break;
+ }
+@@ -4262,10 +4261,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
+
+ memset(umr, 0, sizeof(*umr));
+
+- if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+- umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
+- else
+- umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
++ if (!umrwr->ignore_free_state) {
++ if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
++ /* fail if free */
++ umr->flags = MLX5_UMR_CHECK_FREE;
++ else
++ /* fail if not free */
++ umr->flags = MLX5_UMR_CHECK_NOT_FREE;
++ }
+
+ umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
+ if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
+diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
+index 63aa541c9608..50f0f3c66934 100644
+--- a/drivers/misc/eeprom/at24.c
++++ b/drivers/misc/eeprom/at24.c
+@@ -719,7 +719,7 @@ static int at24_probe(struct i2c_client *client)
+ nvmem_config.name = dev_name(dev);
+ nvmem_config.dev = dev;
+ nvmem_config.read_only = !writable;
+- nvmem_config.root_only = true;
++ nvmem_config.root_only = !(flags & AT24_FLAG_IRUGO);
+ nvmem_config.owner = THIS_MODULE;
+ nvmem_config.compat = true;
+ nvmem_config.base_dev = dev;
+diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
+index 3557d5c51141..245a6fd668c8 100644
+--- a/drivers/mmc/core/queue.c
++++ b/drivers/mmc/core/queue.c
+@@ -10,6 +10,7 @@
+ #include <linux/kthread.h>
+ #include <linux/scatterlist.h>
+ #include <linux/dma-mapping.h>
++#include <linux/backing-dev.h>
+
+ #include <linux/mmc/card.h>
+ #include <linux/mmc/host.h>
+@@ -430,6 +431,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
+ goto free_tag_set;
+ }
+
++ if (mmc_host_is_spi(host) && host->use_spi_crc)
++ mq->queue->backing_dev_info->capabilities |=
++ BDI_CAP_STABLE_WRITES;
++
+ mq->queue->queuedata = mq;
+ blk_queue_rq_timeout(mq->queue, 60 * HZ);
+
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index b53b6b7d4dd4..60c3a06e3469 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -2034,8 +2034,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
+ * delayed. Allowing the transfer to take place
+ * avoids races and keeps things simple.
+ */
+- if ((err != -ETIMEDOUT) &&
+- (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
++ if (err != -ETIMEDOUT) {
+ state = STATE_SENDING_DATA;
+ continue;
+ }
+diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
+index 2d736e416775..ba9a63db73da 100644
+--- a/drivers/mmc/host/meson-mx-sdio.c
++++ b/drivers/mmc/host/meson-mx-sdio.c
+@@ -73,7 +73,7 @@
+ #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9)
+- #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13)
++ #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(13, 10)
+ #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15)
+ #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30)
+ #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31)
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index 9a822e2e9f0b..06f84a4d79e0 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -405,6 +405,7 @@ err_cleanup_host:
+ sdhci_cleanup_host(host);
+
+ pm_runtime_disable:
++ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
+index 1622d3145587..8ca9fad6e6ad 100644
+--- a/drivers/mtd/nand/raw/nand_micron.c
++++ b/drivers/mtd/nand/raw/nand_micron.c
+@@ -390,6 +390,14 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
+ (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
++ /*
++ * It seems that there are devices which do not support ECC officially.
++ * At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports
++ * enabling the ECC feature but don't reflect that to the READ_ID table.
++ * So we have to guarantee that we disable the ECC feature directly
++ * after we did the READ_ID table command. Later we can evaluate the
++ * ECC_ENABLE support.
++ */
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+@@ -398,13 +406,13 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+- if (!(id[4] & MICRON_ID_ECC_ENABLED))
+- return MICRON_ON_DIE_UNSUPPORTED;
+-
+ ret = micron_nand_on_die_ecc_setup(chip, false);
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
++ if (!(id[4] & MICRON_ID_ECC_ENABLED))
++ return MICRON_ON_DIE_UNSUPPORTED;
++
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 82015c8a5ed7..b7a246b33599 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -4697,8 +4697,12 @@ int be_update_queues(struct be_adapter *adapter)
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+- if (netif_running(netdev))
++ if (netif_running(netdev)) {
++ /* device cannot transmit now, avoid dev_watchdog timeouts */
++ netif_carrier_off(netdev);
++
+ be_close(netdev);
++ }
+
+ be_cancel_worker(adapter);
+
+diff --git a/drivers/pci/of.c b/drivers/pci/of.c
+index 73d5adec0a28..bc7b27a28795 100644
+--- a/drivers/pci/of.c
++++ b/drivers/pci/of.c
+@@ -22,12 +22,15 @@ void pci_set_of_node(struct pci_dev *dev)
+ return;
+ dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node,
+ dev->devfn);
++ if (dev->dev.of_node)
++ dev->dev.fwnode = &dev->dev.of_node->fwnode;
+ }
+
+ void pci_release_of_node(struct pci_dev *dev)
+ {
+ of_node_put(dev->dev.of_node);
+ dev->dev.of_node = NULL;
++ dev->dev.fwnode = NULL;
+ }
+
+ void pci_set_bus_of_node(struct pci_bus *bus)
+@@ -41,13 +44,18 @@ void pci_set_bus_of_node(struct pci_bus *bus)
+ if (node && of_property_read_bool(node, "external-facing"))
+ bus->self->untrusted = true;
+ }
++
+ bus->dev.of_node = node;
++
++ if (bus->dev.of_node)
++ bus->dev.fwnode = &bus->dev.of_node->fwnode;
+ }
+
+ void pci_release_bus_of_node(struct pci_bus *bus)
+ {
+ of_node_put(bus->dev.of_node);
+ bus->dev.of_node = NULL;
++ bus->dev.fwnode = NULL;
+ }
+
+ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
+diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
+index 2d06b8095a19..df352b334ea7 100644
+--- a/drivers/perf/arm_pmu.c
++++ b/drivers/perf/arm_pmu.c
+@@ -723,8 +723,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
+ cpu_pm_pmu_setup(armpmu, cmd);
+ break;
+ case CPU_PM_EXIT:
+- cpu_pm_pmu_setup(armpmu, cmd);
+ case CPU_PM_ENTER_FAILED:
++ cpu_pm_pmu_setup(armpmu, cmd);
+ armpmu->start(armpmu);
+ break;
+ default:
+diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
+index ce7a90e68042..8155f59ece38 100644
+--- a/drivers/rapidio/devices/rio_mport_cdev.c
++++ b/drivers/rapidio/devices/rio_mport_cdev.c
+@@ -1686,6 +1686,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
+
+ if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
+ return -EFAULT;
++ dev_info.name[sizeof(dev_info.name) - 1] = '\0';
+
+ rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
+ dev_info.comptag, dev_info.destid, dev_info.hopcount);
+@@ -1817,6 +1818,7 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
+
+ if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
+ return -EFAULT;
++ dev_info.name[sizeof(dev_info.name) - 1] = '\0';
+
+ mport = priv->md->mport;
+
+diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
+index 8b5363223eaa..5031c6806908 100644
+--- a/drivers/remoteproc/remoteproc_core.c
++++ b/drivers/remoteproc/remoteproc_core.c
+@@ -512,6 +512,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
+ /* Initialise vdev subdevice */
+ snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index);
+ rvdev->dev.parent = rproc->dev.parent;
++ rvdev->dev.dma_pfn_offset = rproc->dev.parent->dma_pfn_offset;
+ rvdev->dev.release = rproc_rvdev_release;
+ dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name);
+ dev_set_drvdata(&rvdev->dev, rvdev);
+diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
+index b9ce93e9df89..99f86612f775 100644
+--- a/drivers/s390/block/dasd_alias.c
++++ b/drivers/s390/block/dasd_alias.c
+@@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
+ char msg_format;
+ char msg_no;
+
++ /*
++ * intrc values ENODEV, ENOLINK and EPERM
++ * will be optained from sleep_on to indicate that no
++ * IO operation can be started
++ */
++ if (cqr->intrc == -ENODEV)
++ return 1;
++
++ if (cqr->intrc == -ENOLINK)
++ return 1;
++
++ if (cqr->intrc == -EPERM)
++ return 1;
++
+ sense = dasd_get_sense(&cqr->irb);
+ if (!sense)
+ return 0;
+@@ -447,12 +461,8 @@ static int read_unit_address_configuration(struct dasd_device *device,
+ lcu->flags &= ~NEED_UAC_UPDATE;
+ spin_unlock_irqrestore(&lcu->lock, flags);
+
+- do {
+- rc = dasd_sleep_on(cqr);
+- if (rc && suborder_not_supported(cqr))
+- return -EOPNOTSUPP;
+- } while (rc && (cqr->retries > 0));
+- if (rc) {
++ rc = dasd_sleep_on(cqr);
++ if (rc && !suborder_not_supported(cqr)) {
+ spin_lock_irqsave(&lcu->lock, flags);
+ lcu->flags |= NEED_UAC_UPDATE;
+ spin_unlock_irqrestore(&lcu->lock, flags);
+diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
+index e8fc28dba8df..96f0d34e9459 100644
+--- a/drivers/s390/scsi/zfcp_erp.c
++++ b/drivers/s390/scsi/zfcp_erp.c
+@@ -11,6 +11,7 @@
+ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+ #include <linux/kthread.h>
++#include <linux/bug.h>
+ #include "zfcp_ext.h"
+ #include "zfcp_reqlist.h"
+
+@@ -217,6 +218,12 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(enum zfcp_erp_act_type need,
+ struct zfcp_erp_action *erp_action;
+ struct zfcp_scsi_dev *zfcp_sdev;
+
++ if (WARN_ON_ONCE(need != ZFCP_ERP_ACTION_REOPEN_LUN &&
++ need != ZFCP_ERP_ACTION_REOPEN_PORT &&
++ need != ZFCP_ERP_ACTION_REOPEN_PORT_FORCED &&
++ need != ZFCP_ERP_ACTION_REOPEN_ADAPTER))
++ return NULL;
++
+ switch (need) {
+ case ZFCP_ERP_ACTION_REOPEN_LUN:
+ zfcp_sdev = sdev_to_zfcp(sdev);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 8aacbd1e7db2..f2d61d023bcb 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -2683,6 +2683,8 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
+ {
+ u64 required_mask, coherent_mask;
+ struct sysinfo s;
++ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
++ int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
+
+ if (ioc->is_mcpu_endpoint)
+ goto try_32bit;
+@@ -2692,17 +2694,17 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
+ goto try_32bit;
+
+ if (ioc->dma_mask)
+- coherent_mask = DMA_BIT_MASK(64);
++ coherent_mask = DMA_BIT_MASK(dma_mask);
+ else
+ coherent_mask = DMA_BIT_MASK(32);
+
+- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
++ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
+ dma_set_coherent_mask(&pdev->dev, coherent_mask))
+ goto try_32bit;
+
+ ioc->base_add_sg_single = &_base_add_sg_single_64;
+ ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+- ioc->dma_mask = 64;
++ ioc->dma_mask = dma_mask;
+ goto out;
+
+ try_32bit:
+@@ -2724,7 +2726,7 @@ static int
+ _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
+ struct pci_dev *pdev)
+ {
+- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
++ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ return -ENODEV;
+ }
+@@ -4631,7 +4633,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+ total_sz += sz;
+ } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
+
+- if (ioc->dma_mask == 64) {
++ if (ioc->dma_mask > 32) {
+ if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
+ ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
+ pci_name(ioc->pdev));
+diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8.c
+index fc6429f9170a..79a3d922a4a9 100644
+--- a/drivers/soc/imx/soc-imx8.c
++++ b/drivers/soc/imx/soc-imx8.c
+@@ -73,7 +73,7 @@ static int __init imx8_soc_init(void)
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+- return -ENODEV;
++ return -ENOMEM;
+
+ soc_dev_attr->family = "Freescale i.MX";
+
+@@ -83,8 +83,10 @@ static int __init imx8_soc_init(void)
+ goto free_soc;
+
+ id = of_match_node(imx8_soc_match, root);
+- if (!id)
++ if (!id) {
++ ret = -ENODEV;
+ goto free_soc;
++ }
+
+ of_node_put(root);
+
+@@ -96,20 +98,25 @@ static int __init imx8_soc_init(void)
+ }
+
+ soc_dev_attr->revision = imx8_revision(soc_rev);
+- if (!soc_dev_attr->revision)
++ if (!soc_dev_attr->revision) {
++ ret = -ENOMEM;
+ goto free_soc;
++ }
+
+ soc_dev = soc_device_register(soc_dev_attr);
+- if (IS_ERR(soc_dev))
++ if (IS_ERR(soc_dev)) {
++ ret = PTR_ERR(soc_dev);
+ goto free_rev;
++ }
+
+ return 0;
+
+ free_rev:
+- kfree(soc_dev_attr->revision);
++ if (strcmp(soc_dev_attr->revision, "unknown"))
++ kfree(soc_dev_attr->revision);
+ free_soc:
+ kfree(soc_dev_attr);
+ of_node_put(root);
+- return -ENODEV;
++ return ret;
+ }
+ device_initcall(imx8_soc_init);
+diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
+index 005326050c23..235d01870dd8 100644
+--- a/drivers/soc/qcom/rpmpd.c
++++ b/drivers/soc/qcom/rpmpd.c
+@@ -226,7 +226,7 @@ static int rpmpd_set_performance(struct generic_pm_domain *domain,
+ struct rpmpd *pd = domain_to_rpmpd(domain);
+
+ if (state > MAX_RPMPD_STATE)
+- goto out;
++ state = MAX_RPMPD_STATE;
+
+ mutex_lock(&rpmpd_lock);
+
+diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
+index f363fbeb5ab0..e09edb5c5e06 100644
+--- a/drivers/virtio/virtio_mmio.c
++++ b/drivers/virtio/virtio_mmio.c
+@@ -463,9 +463,14 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct irq_affinity *desc)
+ {
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+- unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
++ int irq = platform_get_irq(vm_dev->pdev, 0);
+ int i, err, queue_idx = 0;
+
++ if (irq < 0) {
++ dev_err(&vdev->dev, "Cannot get IRQ resource\n");
++ return irq;
++ }
++
+ err = request_irq(irq, vm_interrupt, IRQF_SHARED,
+ dev_name(&vdev->dev), vm_dev);
+ if (err)
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index 469dfbd6cf90..dd4d5dea9a54 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -1145,7 +1145,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ goto out_put_map;
+
+ if (!use_ptemod) {
+- err = vm_map_pages(vma, map->pages, map->count);
++ err = vm_map_pages_zero(vma, map->pages, map->count);
+ if (err)
+ goto out_put_map;
+ } else {
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index d53f3493a6b9..c416d31cb545 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -361,8 +361,8 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+ /* Convert the size to actually allocated. */
+ size = 1UL << (order + XEN_PAGE_SHIFT);
+
+- if (((dev_addr + size - 1 <= dma_mask)) ||
+- range_straddles_page_boundary(phys, size))
++ if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
++ range_straddles_page_boundary(phys, size)))
+ xen_destroy_contiguous_region(phys, order);
+
+ xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
+@@ -402,7 +402,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
+
+ map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
+ attrs);
+- if (map == DMA_MAPPING_ERROR)
++ if (map == (phys_addr_t)DMA_MAPPING_ERROR)
+ return DMA_MAPPING_ERROR;
+
+ dev_addr = xen_phys_to_bus(map);
+diff --git a/fs/adfs/super.c b/fs/adfs/super.c
+index ffb669f9bba7..ce0fbbe002bf 100644
+--- a/fs/adfs/super.c
++++ b/fs/adfs/super.c
+@@ -360,6 +360,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
+ struct buffer_head *bh;
+ struct object_info root_obj;
+ unsigned char *b_data;
++ unsigned int blocksize;
+ struct adfs_sb_info *asb;
+ struct inode *root;
+ int ret = -EINVAL;
+@@ -411,8 +412,10 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
+ goto error_free_bh;
+ }
+
++ blocksize = 1 << dr->log2secsize;
+ brelse(bh);
+- if (sb_set_blocksize(sb, 1 << dr->log2secsize)) {
++
++ if (sb_set_blocksize(sb, blocksize)) {
+ bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
+ if (!bh) {
+ adfs_error(sb, "couldn't read superblock on "
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 749f5984425d..09c9d6726f07 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1151,8 +1151,7 @@ static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
+ * Pointer to the block device containing @bdev on success, ERR_PTR()
+ * value on failure.
+ */
+-static struct block_device *bd_start_claiming(struct block_device *bdev,
+- void *holder)
++struct block_device *bd_start_claiming(struct block_device *bdev, void *holder)
+ {
+ struct gendisk *disk;
+ struct block_device *whole;
+@@ -1199,6 +1198,62 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
+ return ERR_PTR(err);
+ }
+ }
++EXPORT_SYMBOL(bd_start_claiming);
++
++static void bd_clear_claiming(struct block_device *whole, void *holder)
++{
++ lockdep_assert_held(&bdev_lock);
++ /* tell others that we're done */
++ BUG_ON(whole->bd_claiming != holder);
++ whole->bd_claiming = NULL;
++ wake_up_bit(&whole->bd_claiming, 0);
++}
++
++/**
++ * bd_finish_claiming - finish claiming of a block device
++ * @bdev: block device of interest
++ * @whole: whole block device (returned from bd_start_claiming())
++ * @holder: holder that has claimed @bdev
++ *
++ * Finish exclusive open of a block device. Mark the device as exlusively
++ * open by the holder and wake up all waiters for exclusive open to finish.
++ */
++void bd_finish_claiming(struct block_device *bdev, struct block_device *whole,
++ void *holder)
++{
++ spin_lock(&bdev_lock);
++ BUG_ON(!bd_may_claim(bdev, whole, holder));
++ /*
++ * Note that for a whole device bd_holders will be incremented twice,
++ * and bd_holder will be set to bd_may_claim before being set to holder
++ */
++ whole->bd_holders++;
++ whole->bd_holder = bd_may_claim;
++ bdev->bd_holders++;
++ bdev->bd_holder = holder;
++ bd_clear_claiming(whole, holder);
++ spin_unlock(&bdev_lock);
++}
++EXPORT_SYMBOL(bd_finish_claiming);
++
++/**
++ * bd_abort_claiming - abort claiming of a block device
++ * @bdev: block device of interest
++ * @whole: whole block device (returned from bd_start_claiming())
++ * @holder: holder that has claimed @bdev
++ *
++ * Abort claiming of a block device when the exclusive open failed. This can be
++ * also used when exclusive open is not actually desired and we just needed
++ * to block other exclusive openers for a while.
++ */
++void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
++ void *holder)
++{
++ spin_lock(&bdev_lock);
++ bd_clear_claiming(whole, holder);
++ spin_unlock(&bdev_lock);
++}
++EXPORT_SYMBOL(bd_abort_claiming);
+
+ #ifdef CONFIG_SYSFS
+ struct bd_holder_disk {
+@@ -1668,29 +1723,7 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
+
+ /* finish claiming */
+ mutex_lock(&bdev->bd_mutex);
+- spin_lock(&bdev_lock);
+-
+- if (!res) {
+- BUG_ON(!bd_may_claim(bdev, whole, holder));
+- /*
+- * Note that for a whole device bd_holders
+- * will be incremented twice, and bd_holder
+- * will be set to bd_may_claim before being
+- * set to holder
+- */
+- whole->bd_holders++;
+- whole->bd_holder = bd_may_claim;
+- bdev->bd_holders++;
+- bdev->bd_holder = holder;
+- }
+-
+- /* tell others that we're done */
+- BUG_ON(whole->bd_claiming != holder);
+- whole->bd_claiming = NULL;
+- wake_up_bit(&whole->bd_claiming, 0);
+-
+- spin_unlock(&bdev_lock);
+-
++ bd_finish_claiming(bdev, whole, holder);
+ /*
+ * Block event polling for write claims if requested. Any
+ * write holder makes the write_holder state stick until
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 2a1be0d1a698..5b4beebf138c 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3999,6 +3999,27 @@ static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ if (!same_inode)
+ inode_dio_wait(inode_out);
+
++ /*
++ * Workaround to make sure NOCOW buffered write reach disk as NOCOW.
++ *
++ * Btrfs' back references do not have a block level granularity, they
++ * work at the whole extent level.
++ * NOCOW buffered write without data space reserved may not be able
++ * to fall back to CoW due to lack of data space, thus could cause
++ * data loss.
++ *
++ * Here we take a shortcut by flushing the whole inode, so that all
++ * nocow write should reach disk as nocow before we increase the
++ * reference of the extent. We could do better by only flushing NOCOW
++ * data, but that needs extra accounting.
++ *
++ * Also we don't need to check ASYNC_EXTENT, as async extent will be
++ * CoWed anyway, not affecting nocow part.
++ */
++ ret = filemap_flush(inode_in->i_mapping);
++ if (ret < 0)
++ return ret;
++
+ ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs),
+ wb_len);
+ if (ret < 0)
+diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
+index 3e6ffbbd8b0a..f8a3c1b0a15a 100644
+--- a/fs/btrfs/qgroup.c
++++ b/fs/btrfs/qgroup.c
+@@ -2614,6 +2614,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
+ int ret = 0;
+ int i;
+ u64 *i_qgroups;
++ bool committing = false;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *quota_root;
+ struct btrfs_qgroup *srcgroup;
+@@ -2621,7 +2622,25 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
+ u32 level_size = 0;
+ u64 nums;
+
+- mutex_lock(&fs_info->qgroup_ioctl_lock);
++ /*
++ * There are only two callers of this function.
++ *
++ * One in create_subvol() in the ioctl context, which needs to hold
++ * the qgroup_ioctl_lock.
++ *
++ * The other one in create_pending_snapshot() where no other qgroup
++ * code can modify the fs as they all need to either start a new trans
++ * or hold a trans handler, thus we don't need to hold
++ * qgroup_ioctl_lock.
++ * This would avoid long and complex lock chain and make lockdep happy.
++ */
++ spin_lock(&fs_info->trans_lock);
++ if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
++ committing = true;
++ spin_unlock(&fs_info->trans_lock);
++
++ if (!committing)
++ mutex_lock(&fs_info->qgroup_ioctl_lock);
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ goto out;
+
+@@ -2785,7 +2804,8 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
+ unlock:
+ spin_unlock(&fs_info->qgroup_lock);
+ out:
+- mutex_unlock(&fs_info->qgroup_ioctl_lock);
++ if (!committing)
++ mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ return ret;
+ }
+
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index f7fe4770f0e5..d25271381c56 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -6322,68 +6322,21 @@ static int changed_extent(struct send_ctx *sctx,
+ {
+ int ret = 0;
+
+- if (sctx->cur_ino != sctx->cmp_key->objectid) {
+-
+- if (result == BTRFS_COMPARE_TREE_CHANGED) {
+- struct extent_buffer *leaf_l;
+- struct extent_buffer *leaf_r;
+- struct btrfs_file_extent_item *ei_l;
+- struct btrfs_file_extent_item *ei_r;
+-
+- leaf_l = sctx->left_path->nodes[0];
+- leaf_r = sctx->right_path->nodes[0];
+- ei_l = btrfs_item_ptr(leaf_l,
+- sctx->left_path->slots[0],
+- struct btrfs_file_extent_item);
+- ei_r = btrfs_item_ptr(leaf_r,
+- sctx->right_path->slots[0],
+- struct btrfs_file_extent_item);
+-
+- /*
+- * We may have found an extent item that has changed
+- * only its disk_bytenr field and the corresponding
+- * inode item was not updated. This case happens due to
+- * very specific timings during relocation when a leaf
+- * that contains file extent items is COWed while
+- * relocation is ongoing and its in the stage where it
+- * updates data pointers. So when this happens we can
+- * safely ignore it since we know it's the same extent,
+- * but just at different logical and physical locations
+- * (when an extent is fully replaced with a new one, we
+- * know the generation number must have changed too,
+- * since snapshot creation implies committing the current
+- * transaction, and the inode item must have been updated
+- * as well).
+- * This replacement of the disk_bytenr happens at
+- * relocation.c:replace_file_extents() through
+- * relocation.c:btrfs_reloc_cow_block().
+- */
+- if (btrfs_file_extent_generation(leaf_l, ei_l) ==
+- btrfs_file_extent_generation(leaf_r, ei_r) &&
+- btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
+- btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
+- btrfs_file_extent_compression(leaf_l, ei_l) ==
+- btrfs_file_extent_compression(leaf_r, ei_r) &&
+- btrfs_file_extent_encryption(leaf_l, ei_l) ==
+- btrfs_file_extent_encryption(leaf_r, ei_r) &&
+- btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
+- btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
+- btrfs_file_extent_type(leaf_l, ei_l) ==
+- btrfs_file_extent_type(leaf_r, ei_r) &&
+- btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
+- btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
+- btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
+- btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
+- btrfs_file_extent_offset(leaf_l, ei_l) ==
+- btrfs_file_extent_offset(leaf_r, ei_r) &&
+- btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
+- btrfs_file_extent_num_bytes(leaf_r, ei_r))
+- return 0;
+- }
+-
+- inconsistent_snapshot_error(sctx, result, "extent");
+- return -EIO;
+- }
++ /*
++ * We have found an extent item that changed without the inode item
++ * having changed. This can happen either after relocation (where the
++ * disk_bytenr of an extent item is replaced at
++ * relocation.c:replace_file_extents()) or after deduplication into a
++ * file in both the parent and send snapshots (where an extent item can
++ * get modified or replaced with a new one). Note that deduplication
++ * updates the inode item, but it only changes the iversion (sequence
++ * field in the inode item) of the inode, so if a file is deduplicated
++ * the same amount of times in both the parent and send snapshots, its
++ * iversion becames the same in both snapshots, whence the inode item is
++ * the same on both snapshots.
++ */
++ if (sctx->cur_ino != sctx->cmp_key->objectid)
++ return 0;
+
+ if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
+ if (result != BTRFS_COMPARE_TREE_DELETED)
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 3f6811cdf803..1aa3f6d6d775 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -2019,6 +2019,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
+ }
+ } else {
+ spin_unlock(&fs_info->trans_lock);
++ /*
++ * The previous transaction was aborted and was already removed
++ * from the list of transactions at fs_info->trans_list. So we
++ * abort to prevent writing a new superblock that reflects a
++ * corrupt state (pointing to trees with unwritten nodes/leafs).
++ */
++ if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
++ ret = -EROFS;
++ goto cleanup_transaction;
++ }
+ }
+
+ extwriter_counter_dec(cur_trans, trans->type);
+diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
+index 96fce4bef4e7..ccd5706199d7 100644
+--- a/fs/btrfs/tree-checker.c
++++ b/fs/btrfs/tree-checker.c
+@@ -132,6 +132,7 @@ static int check_extent_data_item(struct extent_buffer *leaf,
+ struct btrfs_file_extent_item *fi;
+ u32 sectorsize = fs_info->sectorsize;
+ u32 item_size = btrfs_item_size_nr(leaf, slot);
++ u64 extent_end;
+
+ if (!IS_ALIGNED(key->offset, sectorsize)) {
+ file_extent_err(leaf, slot,
+@@ -207,6 +208,16 @@ static int check_extent_data_item(struct extent_buffer *leaf,
+ CHECK_FE_ALIGNED(leaf, slot, fi, num_bytes, sectorsize))
+ return -EUCLEAN;
+
++ /* Catch extent end overflow */
++ if (check_add_overflow(btrfs_file_extent_num_bytes(leaf, fi),
++ key->offset, &extent_end)) {
++ file_extent_err(leaf, slot,
++ "extent end overflow, have file offset %llu extent num bytes %llu",
++ key->offset,
++ btrfs_file_extent_num_bytes(leaf, fi));
++ return -EUCLEAN;
++ }
++
+ /*
+ * Check that no two consecutive file extent items, in the same leaf,
+ * present ranges that overlap each other.
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 1c2a6e4b39da..8508f6028c8d 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -5328,8 +5328,7 @@ static inline int btrfs_chunk_max_errors(struct map_lookup *map)
+
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID10 |
+- BTRFS_BLOCK_GROUP_RAID5 |
+- BTRFS_BLOCK_GROUP_DUP)) {
++ BTRFS_BLOCK_GROUP_RAID5)) {
+ max_errors = 1;
+ } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
+ max_errors = 2;
+diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
+index 0637149fb9f9..1271024a3797 100644
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -1512,18 +1512,26 @@ static int __dir_lease_try_check(const struct dentry *dentry)
+ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
+ {
+ struct ceph_inode_info *ci = ceph_inode(dir);
+- struct ceph_dentry_info *di = ceph_dentry(dentry);
+- int valid = 0;
++ int valid;
++ int shared_gen;
+
+ spin_lock(&ci->i_ceph_lock);
+- if (atomic_read(&ci->i_shared_gen) == di->lease_shared_gen)
+- valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
++ valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
++ shared_gen = atomic_read(&ci->i_shared_gen);
+ spin_unlock(&ci->i_ceph_lock);
+- if (valid)
+- __ceph_dentry_dir_lease_touch(di);
+- dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
+- dir, (unsigned)atomic_read(&ci->i_shared_gen),
+- dentry, (unsigned)di->lease_shared_gen, valid);
++ if (valid) {
++ struct ceph_dentry_info *di;
++ spin_lock(&dentry->d_lock);
++ di = ceph_dentry(dentry);
++ if (dir == d_inode(dentry->d_parent) &&
++ di && di->lease_shared_gen == shared_gen)
++ __ceph_dentry_dir_lease_touch(di);
++ else
++ valid = 0;
++ spin_unlock(&dentry->d_lock);
++ }
++ dout("dir_lease_is_valid dir %p v%u dentry %p = %d\n",
++ dir, (unsigned)atomic_read(&ci->i_shared_gen), dentry, valid);
+ return valid;
+ }
+
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index edec39aa5ce2..1d313d0536f9 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -544,7 +544,12 @@ static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
+ long long release_count,
+ long long ordered_count)
+ {
+- smp_mb__before_atomic();
++ /*
++ * Makes sure operations that setup readdir cache (update page
++ * cache and i_size) are strongly ordered w.r.t. the following
++ * atomic64_set() operations.
++ */
++ smp_mb();
+ atomic64_set(&ci->i_complete_seq[0], release_count);
+ atomic64_set(&ci->i_complete_seq[1], ordered_count);
+ }
+diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
+index 0cc42c8879e9..0619adbcbe14 100644
+--- a/fs/ceph/xattr.c
++++ b/fs/ceph/xattr.c
+@@ -79,7 +79,7 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
+ const char *ns_field = " pool_namespace=";
+ char buf[128];
+ size_t len, total_len = 0;
+- int ret;
++ ssize_t ret;
+
+ pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
+
+@@ -103,11 +103,8 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
+ if (pool_ns)
+ total_len += strlen(ns_field) + pool_ns->len;
+
+- if (!size) {
+- ret = total_len;
+- } else if (total_len > size) {
+- ret = -ERANGE;
+- } else {
++ ret = total_len;
++ if (size >= total_len) {
+ memcpy(val, buf, len);
+ ret = len;
+ if (pool_name) {
+@@ -835,8 +832,11 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
+ if (err)
+ return err;
+ err = -ENODATA;
+- if (!(vxattr->exists_cb && !vxattr->exists_cb(ci)))
++ if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
+ err = vxattr->getxattr_cb(ci, value, size);
++ if (size && size < err)
++ err = -ERANGE;
++ }
+ return err;
+ }
+
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 59380dd546a1..18c7c6b2fe08 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -706,10 +706,10 @@ static bool
+ server_unresponsive(struct TCP_Server_Info *server)
+ {
+ /*
+- * We need to wait 2 echo intervals to make sure we handle such
++ * We need to wait 3 echo intervals to make sure we handle such
+ * situations right:
+ * 1s client sends a normal SMB request
+- * 2s client gets a response
++ * 3s client gets a response
+ * 30s echo workqueue job pops, and decides we got a response recently
+ * and don't need to send another
+ * ...
+@@ -718,9 +718,9 @@ server_unresponsive(struct TCP_Server_Info *server)
+ */
+ if ((server->tcpStatus == CifsGood ||
+ server->tcpStatus == CifsNeedNegotiate) &&
+- time_after(jiffies, server->lstrp + 2 * server->echo_interval)) {
++ time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
+ cifs_dbg(VFS, "Server %s has not responded in %lu seconds. Reconnecting...\n",
+- server->hostname, (2 * server->echo_interval) / HZ);
++ server->hostname, (3 * server->echo_interval) / HZ);
+ cifs_reconnect(server);
+ wake_up(&server->response_q);
+ return true;
+@@ -4463,11 +4463,13 @@ cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
+ unsigned int xid,
+ struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
+- char *full_path)
++ char *full_path,
++ int added_treename)
+ {
+ int rc;
+ char *s;
+ char sep, tmp;
++ int skip = added_treename ? 1 : 0;
+
+ sep = CIFS_DIR_SEP(cifs_sb);
+ s = full_path;
+@@ -4482,7 +4484,14 @@ cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
+ /* next separator */
+ while (*s && *s != sep)
+ s++;
+-
++ /*
++ * if the treename is added, we then have to skip the first
++ * part within the separators
++ */
++ if (skip) {
++ skip = 0;
++ continue;
++ }
+ /*
+ * temporarily null-terminate the path at the end of
+ * the current component
+@@ -4530,8 +4539,7 @@ static int is_path_remote(struct cifs_sb_info *cifs_sb, struct smb_vol *vol,
+
+ if (rc != -EREMOTE) {
+ rc = cifs_are_all_path_components_accessible(server, xid, tcon,
+- cifs_sb,
+- full_path);
++ cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
+ if (rc != 0) {
+ cifs_dbg(VFS, "cannot query dirs between root and final path, "
+ "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
+diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
+index 0ceef32e6fae..241f7e04ad04 100644
+--- a/fs/coda/psdev.c
++++ b/fs/coda/psdev.c
+@@ -182,8 +182,11 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
+ if (req->uc_opcode == CODA_OPEN_BY_FD) {
+ struct coda_open_by_fd_out *outp =
+ (struct coda_open_by_fd_out *)req->uc_data;
+- if (!outp->oh.result)
++ if (!outp->oh.result) {
+ outp->fh = fget(outp->fd);
++ if (!outp->fh)
++ return -EBADF;
++ }
+ }
+
+ wake_up(&req->uc_sleep);
+diff --git a/fs/dax.c b/fs/dax.c
+index 01ca13c80bb4..7d0e99982d48 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -267,7 +267,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
+ static void put_unlocked_entry(struct xa_state *xas, void *entry)
+ {
+ /* If we were the only waiter woken, wake the next one */
+- if (entry && dax_is_conflict(entry))
++ if (entry && !dax_is_conflict(entry))
+ dax_wake_entry(xas, entry, false);
+ }
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 6c09cedcf17d..3e887a09533b 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1692,6 +1692,7 @@ restart:
+ do {
+ struct sqe_submit *s = &req->submit;
+ const struct io_uring_sqe *sqe = s->sqe;
++ unsigned int flags = req->flags;
+
+ /* Ensure we clear previously set non-block flag */
+ req->rw.ki_flags &= ~IOCB_NOWAIT;
+@@ -1737,7 +1738,7 @@ restart:
+ kfree(sqe);
+
+ /* req from defer and link list needn't decrease async cnt */
+- if (req->flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
++ if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
+ goto out;
+
+ if (!async_list)
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index d315d86844e4..872ab208c8ad 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -317,7 +317,10 @@ void acpi_set_irq_model(enum acpi_irq_model_id model,
+ #ifdef CONFIG_X86_IO_APIC
+ extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
+ #else
+-#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
++static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
++{
++ return -1;
++}
+ #endif
+ /*
+ * This function undoes the effect of one call to acpi_register_gsi().
+diff --git a/include/linux/coda.h b/include/linux/coda.h
+index d30209b9cef8..0ca0c83fdb1c 100644
+--- a/include/linux/coda.h
++++ b/include/linux/coda.h
+@@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance.
+ #ifndef _CODA_HEADER_
+ #define _CODA_HEADER_
+
+-#if defined(__linux__)
+ typedef unsigned long long u_quad_t;
+-#endif
++
+ #include <uapi/linux/coda.h>
+ #endif
+diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
+index 15170954aa2b..57d2b2faf6a3 100644
+--- a/include/linux/coda_psdev.h
++++ b/include/linux/coda_psdev.h
+@@ -19,6 +19,17 @@ struct venus_comm {
+ struct mutex vc_mutex;
+ };
+
++/* messages between coda filesystem in kernel and Venus */
++struct upc_req {
++ struct list_head uc_chain;
++ caddr_t uc_data;
++ u_short uc_flags;
++ u_short uc_inSize; /* Size is at most 5000 bytes */
++ u_short uc_outSize;
++ u_short uc_opcode; /* copied from data to save lookup */
++ int uc_unique;
++ wait_queue_head_t uc_sleep; /* process' wait queue */
++};
+
+ static inline struct venus_comm *coda_vcp(struct super_block *sb)
+ {
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index e8579412ad21..d7ee4c6bad48 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -170,3 +170,5 @@
+ #else
+ #define __diag_GCC_8(s)
+ #endif
++
++#define __no_fgcse __attribute__((optimize("-fno-gcse")))
+diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
+index 19e58b9138a0..0454d82f8bd8 100644
+--- a/include/linux/compiler_types.h
++++ b/include/linux/compiler_types.h
+@@ -187,6 +187,10 @@ struct ftrace_likely_data {
+ #define asm_volatile_goto(x...) asm goto(x)
+ #endif
+
++#ifndef __no_fgcse
++# define __no_fgcse
++#endif
++
+ /* Are two types/vars the same type (ignoring qualifiers)? */
+ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 79fec8a8413f..5186ac5b2a29 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2615,6 +2615,12 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
+ void *holder);
+ extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
+ void *holder);
++extern struct block_device *bd_start_claiming(struct block_device *bdev,
++ void *holder);
++extern void bd_finish_claiming(struct block_device *bdev,
++ struct block_device *whole, void *holder);
++extern void bd_abort_claiming(struct block_device *bdev,
++ struct block_device *whole, void *holder);
+ extern void blkdev_put(struct block_device *bdev, fmode_t mode);
+ extern int __blkdev_reread_part(struct block_device *bdev);
+ extern int blkdev_reread_part(struct block_device *bdev);
+diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
+index 9ddcf50a3c59..a7f08fb0f865 100644
+--- a/include/linux/gpio/consumer.h
++++ b/include/linux/gpio/consumer.h
+@@ -247,7 +247,7 @@ static inline void gpiod_put(struct gpio_desc *desc)
+ might_sleep();
+
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ }
+
+ static inline void devm_gpiod_unhinge(struct device *dev,
+@@ -256,7 +256,7 @@ static inline void devm_gpiod_unhinge(struct device *dev,
+ might_sleep();
+
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ }
+
+ static inline void gpiod_put_array(struct gpio_descs *descs)
+@@ -264,7 +264,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
+ might_sleep();
+
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(descs);
+ }
+
+ static inline struct gpio_desc *__must_check
+@@ -317,7 +317,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
+ might_sleep();
+
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ }
+
+ static inline void devm_gpiod_put_array(struct device *dev,
+@@ -326,32 +326,32 @@ static inline void devm_gpiod_put_array(struct device *dev,
+ might_sleep();
+
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(descs);
+ }
+
+
+ static inline int gpiod_get_direction(const struct gpio_desc *desc)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return -ENOSYS;
+ }
+ static inline int gpiod_direction_input(struct gpio_desc *desc)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return -ENOSYS;
+ }
+ static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return -ENOSYS;
+ }
+ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return -ENOSYS;
+ }
+
+@@ -359,7 +359,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
+ static inline int gpiod_get_value(const struct gpio_desc *desc)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return 0;
+ }
+ static inline int gpiod_get_array_value(unsigned int array_size,
+@@ -368,13 +368,13 @@ static inline int gpiod_get_array_value(unsigned int array_size,
+ unsigned long *value_bitmap)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc_array);
+ return 0;
+ }
+ static inline void gpiod_set_value(struct gpio_desc *desc, int value)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ }
+ static inline int gpiod_set_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+@@ -382,13 +382,13 @@ static inline int gpiod_set_array_value(unsigned int array_size,
+ unsigned long *value_bitmap)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc_array);
+ return 0;
+ }
+ static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return 0;
+ }
+ static inline int gpiod_get_raw_array_value(unsigned int array_size,
+@@ -397,13 +397,13 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size,
+ unsigned long *value_bitmap)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc_array);
+ return 0;
+ }
+ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ }
+ static inline int gpiod_set_raw_array_value(unsigned int array_size,
+ struct gpio_desc **desc_array,
+@@ -411,14 +411,14 @@ static inline int gpiod_set_raw_array_value(unsigned int array_size,
+ unsigned long *value_bitmap)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc_array);
+ return 0;
+ }
+
+ static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return 0;
+ }
+ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
+@@ -427,13 +427,13 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
+ unsigned long *value_bitmap)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc_array);
+ return 0;
+ }
+ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ }
+ static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+@@ -441,13 +441,13 @@ static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
+ unsigned long *value_bitmap)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc_array);
+ return 0;
+ }
+ static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return 0;
+ }
+ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
+@@ -456,14 +456,14 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
+ unsigned long *value_bitmap)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc_array);
+ return 0;
+ }
+ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
+ int value)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ }
+ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+@@ -471,41 +471,41 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
+ unsigned long *value_bitmap)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc_array);
+ return 0;
+ }
+
+ static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return -ENOSYS;
+ }
+
+ static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return -ENOSYS;
+ }
+
+ static inline int gpiod_is_active_low(const struct gpio_desc *desc)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return 0;
+ }
+ static inline int gpiod_cansleep(const struct gpio_desc *desc)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return 0;
+ }
+
+ static inline int gpiod_to_irq(const struct gpio_desc *desc)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return -EINVAL;
+ }
+
+@@ -513,7 +513,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
+ const char *name)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return -EINVAL;
+ }
+
+@@ -525,7 +525,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
+ static inline int desc_to_gpio(const struct gpio_desc *desc)
+ {
+ /* GPIO can never have been requested */
+- WARN_ON(1);
++ WARN_ON(desc);
+ return -EINVAL;
+ }
+
+diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
+index ae892eef8b82..988fde33cd7f 100644
+--- a/include/linux/memory_hotplug.h
++++ b/include/linux/memory_hotplug.h
+@@ -324,7 +324,7 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
+ extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
+ extern void try_offline_node(int nid);
+ extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
+-extern void remove_memory(int nid, u64 start, u64 size);
++extern int remove_memory(int nid, u64 start, u64 size);
+ extern void __remove_memory(int nid, u64 start, u64 size);
+
+ #else
+@@ -341,7 +341,11 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
+ return -EINVAL;
+ }
+
+-static inline void remove_memory(int nid, u64 start, u64 size) {}
++static inline int remove_memory(int nid, u64 start, u64 size)
++{
++ return -EBUSY;
++}
++
+ static inline void __remove_memory(int nid, u64 start, u64 size) {}
+ #endif /* CONFIG_MEMORY_HOTREMOVE */
+
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index 0ae41b5df101..db0fc59cf4f0 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -2722,6 +2722,9 @@ struct ib_client {
+ const union ib_gid *gid,
+ const struct sockaddr *addr,
+ void *client_data);
++
++ refcount_t uses;
++ struct completion uses_zero;
+ struct list_head list;
+ u32 client_id;
+
+diff --git a/include/uapi/linux/coda_psdev.h b/include/uapi/linux/coda_psdev.h
+index aa6623efd2dd..d50d51a57fe4 100644
+--- a/include/uapi/linux/coda_psdev.h
++++ b/include/uapi/linux/coda_psdev.h
+@@ -7,19 +7,6 @@
+ #define CODA_PSDEV_MAJOR 67
+ #define MAX_CODADEVS 5 /* how many do we allow */
+
+-
+-/* messages between coda filesystem in kernel and Venus */
+-struct upc_req {
+- struct list_head uc_chain;
+- caddr_t uc_data;
+- u_short uc_flags;
+- u_short uc_inSize; /* Size is at most 5000 bytes */
+- u_short uc_outSize;
+- u_short uc_opcode; /* copied from data to save lookup */
+- int uc_unique;
+- wait_queue_head_t uc_sleep; /* process' wait queue */
+-};
+-
+ #define CODA_REQ_ASYNC 0x1
+ #define CODA_REQ_READ 0x2
+ #define CODA_REQ_WRITE 0x4
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index 216cad1ff0d0..65c351564ad0 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -438,7 +438,6 @@ static void mqueue_evict_inode(struct inode *inode)
+ {
+ struct mqueue_inode_info *info;
+ struct user_struct *user;
+- unsigned long mq_bytes, mq_treesize;
+ struct ipc_namespace *ipc_ns;
+ struct msg_msg *msg, *nmsg;
+ LIST_HEAD(tmp_msg);
+@@ -461,16 +460,18 @@ static void mqueue_evict_inode(struct inode *inode)
+ free_msg(msg);
+ }
+
+- /* Total amount of bytes accounted for the mqueue */
+- mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+- min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+- sizeof(struct posix_msg_tree_node);
+-
+- mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+- info->attr.mq_msgsize);
+-
+ user = info->user;
+ if (user) {
++ unsigned long mq_bytes, mq_treesize;
++
++ /* Total amount of bytes accounted for the mqueue */
++ mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
++ min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
++ sizeof(struct posix_msg_tree_node);
++
++ mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
++ info->attr.mq_msgsize);
++
+ spin_lock(&mq_lock);
+ user->mq_bytes -= mq_bytes;
+ /*
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index 546ebee39e2a..5fcc7a17eb5a 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -1073,11 +1073,18 @@ const struct btf_type *btf_type_id_size(const struct btf *btf,
+ !btf_type_is_var(size_type)))
+ return NULL;
+
+- size = btf->resolved_sizes[size_type_id];
+ size_type_id = btf->resolved_ids[size_type_id];
+ size_type = btf_type_by_id(btf, size_type_id);
+ if (btf_type_nosize_or_null(size_type))
+ return NULL;
++ else if (btf_type_has_size(size_type))
++ size = size_type->size;
++ else if (btf_type_is_array(size_type))
++ size = btf->resolved_sizes[size_type_id];
++ else if (btf_type_is_ptr(size_type))
++ size = sizeof(void *);
++ else
++ return NULL;
+ }
+
+ *type_id = size_type_id;
+@@ -1602,7 +1609,6 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
+ const struct btf_type *next_type;
+ u32 next_type_id = t->type;
+ struct btf *btf = env->btf;
+- u32 next_type_size = 0;
+
+ next_type = btf_type_by_id(btf, next_type_id);
+ if (!next_type || btf_type_is_resolve_source_only(next_type)) {
+@@ -1620,7 +1626,7 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
+ * save us a few type-following when we use it later (e.g. in
+ * pretty print).
+ */
+- if (!btf_type_id_size(btf, &next_type_id, &next_type_size)) {
++ if (!btf_type_id_size(btf, &next_type_id, NULL)) {
+ if (env_type_is_resolved(env, next_type_id))
+ next_type = btf_type_id_resolve(btf, &next_type_id);
+
+@@ -1633,7 +1639,7 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
+ }
+ }
+
+- env_stack_pop_resolved(env, next_type_id, next_type_size);
++ env_stack_pop_resolved(env, next_type_id, 0);
+
+ return 0;
+ }
+@@ -1645,7 +1651,6 @@ static int btf_var_resolve(struct btf_verifier_env *env,
+ const struct btf_type *t = v->t;
+ u32 next_type_id = t->type;
+ struct btf *btf = env->btf;
+- u32 next_type_size;
+
+ next_type = btf_type_by_id(btf, next_type_id);
+ if (!next_type || btf_type_is_resolve_source_only(next_type)) {
+@@ -1675,12 +1680,12 @@ static int btf_var_resolve(struct btf_verifier_env *env,
+ * forward types or similar that would resolve to size of
+ * zero is allowed.
+ */
+- if (!btf_type_id_size(btf, &next_type_id, &next_type_size)) {
++ if (!btf_type_id_size(btf, &next_type_id, NULL)) {
+ btf_verifier_log_type(env, v->t, "Invalid type_id");
+ return -EINVAL;
+ }
+
+- env_stack_pop_resolved(env, next_type_id, next_type_size);
++ env_stack_pop_resolved(env, next_type_id, 0);
+
+ return 0;
+ }
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index f2148db91439..ceee0730fba5 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1295,7 +1295,7 @@ bool bpf_opcode_in_insntable(u8 code)
+ *
+ * Decode and execute eBPF instructions.
+ */
+-static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
++static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
+ {
+ #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
+ #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
+diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
+index 13f0cb080a4d..5f4e1b78babb 100644
+--- a/kernel/dma/swiotlb.c
++++ b/kernel/dma/swiotlb.c
+@@ -546,7 +546,7 @@ not_found:
+ if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
+ dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
+ size, io_tlb_nslabs, tmp_io_tlb_used);
+- return DMA_MAPPING_ERROR;
++ return (phys_addr_t)DMA_MAPPING_ERROR;
+ found:
+ io_tlb_used += nslots;
+ spin_unlock_irqrestore(&io_tlb_lock, flags);
+@@ -664,7 +664,7 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
+ /* Oh well, have to allocate and map a bounce buffer. */
+ *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
+ *phys, size, dir, attrs);
+- if (*phys == DMA_MAPPING_ERROR)
++ if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
+ return false;
+
+ /* Ensure that the address returned is DMA'ble */
+diff --git a/kernel/module.c b/kernel/module.c
+index 80c7c09584cf..8431c3d47c97 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -3385,8 +3385,7 @@ static bool finished_loading(const char *name)
+ sched_annotate_sleep();
+ mutex_lock(&module_mutex);
+ mod = find_module_all(name, strlen(name), true);
+- ret = !mod || mod->state == MODULE_STATE_LIVE
+- || mod->state == MODULE_STATE_GOING;
++ ret = !mod || mod->state == MODULE_STATE_LIVE;
+ mutex_unlock(&module_mutex);
+
+ return ret;
+@@ -3576,8 +3575,7 @@ again:
+ mutex_lock(&module_mutex);
+ old = find_module_all(mod->name, strlen(mod->name), true);
+ if (old != NULL) {
+- if (old->state == MODULE_STATE_COMING
+- || old->state == MODULE_STATE_UNFORMED) {
++ if (old->state != MODULE_STATE_LIVE) {
+ /* Wait in case it fails to load. */
+ mutex_unlock(&module_mutex);
+ err = wait_event_interruptible(module_wq,
+diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
+index 36139de0a3c4..899b726c9e98 100644
+--- a/kernel/stacktrace.c
++++ b/kernel/stacktrace.c
+@@ -226,12 +226,17 @@ unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
+ .store = store,
+ .size = size,
+ };
++ mm_segment_t fs;
+
+ /* Trace user stack if not a kernel thread */
+ if (!current->mm)
+ return 0;
+
++ fs = get_fs();
++ set_fs(USER_DS);
+ arch_stack_walk_user(consume_entry, &c, task_pt_regs(current));
++ set_fs(fs);
++
+ return c.len;
+ }
+ #endif
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 576c41644e77..208220d526e8 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1622,6 +1622,11 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
+ return keep_regs;
+ }
+
++static struct ftrace_ops *
++ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
++static struct ftrace_ops *
++ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
++
+ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
+ int filter_hash,
+ bool inc)
+@@ -1750,15 +1755,17 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
+ }
+
+ /*
+- * If the rec had TRAMP enabled, then it needs to
+- * be cleared. As TRAMP can only be enabled iff
+- * there is only a single ops attached to it.
+- * In otherwords, always disable it on decrementing.
+- * In the future, we may set it if rec count is
+- * decremented to one, and the ops that is left
+- * has a trampoline.
++ * The TRAMP needs to be set only if rec count
++ * is decremented to one, and the ops that is
++ * left has a trampoline. As TRAMP can only be
++ * enabled if there is only a single ops attached
++ * to it.
+ */
+- rec->flags &= ~FTRACE_FL_TRAMP;
++ if (ftrace_rec_count(rec) == 1 &&
++ ftrace_find_tramp_ops_any(rec))
++ rec->flags |= FTRACE_FL_TRAMP;
++ else
++ rec->flags &= ~FTRACE_FL_TRAMP;
+
+ /*
+ * flags will be cleared in ftrace_check_record()
+@@ -1951,11 +1958,6 @@ static void print_ip_ins(const char *fmt, const unsigned char *p)
+ printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
+ }
+
+-static struct ftrace_ops *
+-ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
+-static struct ftrace_ops *
+-ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
+-
+ enum ftrace_bug_type ftrace_bug_type;
+ const void *ftrace_expected;
+
+diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
+index 69ebf3c2f1b5..78af97163147 100644
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -137,6 +137,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
+ if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
+ return 0;
+
++ /*
++ * Do not trace a function if it's filtered by set_graph_notrace.
++ * Make the index of ret stack negative to indicate that it should
++ * ignore further functions. But it needs its own ret stack entry
++ * to recover the original index in order to continue tracing after
++ * returning from the function.
++ */
+ if (ftrace_graph_notrace_addr(trace->func)) {
+ trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
+ /*
+@@ -155,16 +162,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
+ if (ftrace_graph_ignore_irqs())
+ return 0;
+
+- /*
+- * Do not trace a function if it's filtered by set_graph_notrace.
+- * Make the index of ret stack negative to indicate that it should
+- * ignore further functions. But it needs its own ret stack entry
+- * to recover the original index in order to continue tracing after
+- * returning from the function.
+- */
+- if (ftrace_graph_notrace_addr(trace->func))
+- return 1;
+-
+ /*
+ * Stop here if tracing_threshold is set. We only write function return
+ * events to the ring buffer.
+diff --git a/lib/Makefile b/lib/Makefile
+index fb7697031a79..7c3c1ad21afc 100644
+--- a/lib/Makefile
++++ b/lib/Makefile
+@@ -278,7 +278,8 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
+ obj-$(CONFIG_UBSAN) += ubsan.o
+
+ UBSAN_SANITIZE_ubsan.o := n
+-CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
++KASAN_SANITIZE_ubsan.o := n
++CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
+
+ obj-$(CONFIG_SBITMAP) += sbitmap.o
+
+diff --git a/lib/ioremap.c b/lib/ioremap.c
+index 063213685563..a95161d9c883 100644
+--- a/lib/ioremap.c
++++ b/lib/ioremap.c
+@@ -86,6 +86,9 @@ static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
+ if ((end - addr) != PMD_SIZE)
+ return 0;
+
++ if (!IS_ALIGNED(addr, PMD_SIZE))
++ return 0;
++
+ if (!IS_ALIGNED(phys_addr, PMD_SIZE))
+ return 0;
+
+@@ -126,6 +129,9 @@ static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
+ if ((end - addr) != PUD_SIZE)
+ return 0;
+
++ if (!IS_ALIGNED(addr, PUD_SIZE))
++ return 0;
++
+ if (!IS_ALIGNED(phys_addr, PUD_SIZE))
+ return 0;
+
+@@ -166,6 +172,9 @@ static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
+ if ((end - addr) != P4D_SIZE)
+ return 0;
+
++ if (!IS_ALIGNED(addr, P4D_SIZE))
++ return 0;
++
+ if (!IS_ALIGNED(phys_addr, P4D_SIZE))
+ return 0;
+
+diff --git a/lib/test_overflow.c b/lib/test_overflow.c
+index fc680562d8b6..7a4b6f6c5473 100644
+--- a/lib/test_overflow.c
++++ b/lib/test_overflow.c
+@@ -486,16 +486,17 @@ static int __init test_overflow_shift(void)
+ * Deal with the various forms of allocator arguments. See comments above
+ * the DEFINE_TEST_ALLOC() instances for mapping of the "bits".
+ */
+-#define alloc010(alloc, arg, sz) alloc(sz, GFP_KERNEL)
+-#define alloc011(alloc, arg, sz) alloc(sz, GFP_KERNEL, NUMA_NO_NODE)
++#define alloc_GFP (GFP_KERNEL | __GFP_NOWARN)
++#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP)
++#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE)
+ #define alloc000(alloc, arg, sz) alloc(sz)
+ #define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE)
+-#define alloc110(alloc, arg, sz) alloc(arg, sz, GFP_KERNEL)
++#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP)
+ #define free0(free, arg, ptr) free(ptr)
+ #define free1(free, arg, ptr) free(arg, ptr)
+
+-/* Wrap around to 8K */
+-#define TEST_SIZE (9 << PAGE_SHIFT)
++/* Wrap around to 16K */
++#define TEST_SIZE (5 * 4096)
+
+ #define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\
+ static int __init test_ ## func (void *arg) \
+diff --git a/lib/test_string.c b/lib/test_string.c
+index bf8def01ed20..b5117ae59693 100644
+--- a/lib/test_string.c
++++ b/lib/test_string.c
+@@ -36,7 +36,7 @@ static __init int memset16_selftest(void)
+ fail:
+ kfree(p);
+ if (i < 256)
+- return (i << 24) | (j << 16) | k;
++ return (i << 24) | (j << 16) | k | 0x8000;
+ return 0;
+ }
+
+@@ -72,7 +72,7 @@ static __init int memset32_selftest(void)
+ fail:
+ kfree(p);
+ if (i < 256)
+- return (i << 24) | (j << 16) | k;
++ return (i << 24) | (j << 16) | k | 0x8000;
+ return 0;
+ }
+
+@@ -108,7 +108,7 @@ static __init int memset64_selftest(void)
+ fail:
+ kfree(p);
+ if (i < 256)
+- return (i << 24) | (j << 16) | k;
++ return (i << 24) | (j << 16) | k | 0x8000;
+ return 0;
+ }
+
+diff --git a/mm/cma.c b/mm/cma.c
+index 3340ef34c154..4973d253dc83 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -278,6 +278,12 @@ int __init cma_declare_contiguous(phys_addr_t base,
+ */
+ alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
+ max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
++ if (fixed && base & (alignment - 1)) {
++ ret = -EINVAL;
++ pr_err("Region at %pa must be aligned to %pa bytes\n",
++ &base, &alignment);
++ goto err;
++ }
+ base = ALIGN(base, alignment);
+ size = ALIGN(size, alignment);
+ limit &= ~(alignment - 1);
+@@ -308,6 +314,13 @@ int __init cma_declare_contiguous(phys_addr_t base,
+ if (limit == 0 || limit > memblock_end)
+ limit = memblock_end;
+
++ if (base + size > limit) {
++ ret = -EINVAL;
++ pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
++ &size, &base, &limit);
++ goto err;
++ }
++
+ /* Reserve memory */
+ if (fixed) {
+ if (memblock_is_region_reserved(base, size) ||
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 9e1b9acb116b..952dc2fb24e5 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -842,13 +842,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
+
+ /*
+ * Periodically drop the lock (if held) regardless of its
+- * contention, to give chance to IRQs. Abort async compaction
+- * if contended.
++ * contention, to give chance to IRQs. Abort completely if
++ * a fatal signal is pending.
+ */
+ if (!(low_pfn % SWAP_CLUSTER_MAX)
+ && compact_unlock_should_abort(&pgdat->lru_lock,
+- flags, &locked, cc))
+- break;
++ flags, &locked, cc)) {
++ low_pfn = 0;
++ goto fatal_pending;
++ }
+
+ if (!pfn_valid_within(low_pfn))
+ goto isolate_fail;
+@@ -1060,6 +1062,7 @@ isolate_abort:
+ trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
+ nr_scanned, nr_isolated);
+
++fatal_pending:
+ cc->total_migrate_scanned += nr_scanned;
+ if (nr_isolated)
+ count_compact_events(COMPACTISOLATED, nr_isolated);
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 591eafafbd8c..902d020aa70e 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -691,12 +691,15 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
+ if (mem_cgroup_disabled())
+ return;
+
+- __this_cpu_add(memcg->vmstats_local->stat[idx], val);
+-
+ x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
+ if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
+ struct mem_cgroup *mi;
+
++ /*
++ * Batch local counters to keep them in sync with
++ * the hierarchical ones.
++ */
++ __this_cpu_add(memcg->vmstats_local->stat[idx], x);
+ for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
+ atomic_long_add(x, &mi->vmstats[idx]);
+ x = 0;
+@@ -745,13 +748,15 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+ /* Update memcg */
+ __mod_memcg_state(memcg, idx, val);
+
+- /* Update lruvec */
+- __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
+-
+ x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
+ if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
+ struct mem_cgroup_per_node *pi;
+
++ /*
++ * Batch local counters to keep them in sync with
++ * the hierarchical ones.
++ */
++ __this_cpu_add(pn->lruvec_stat_local->count[idx], x);
+ for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
+ atomic_long_add(x, &pi->lruvec_stat[idx]);
+ x = 0;
+@@ -773,12 +778,15 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
+ if (mem_cgroup_disabled())
+ return;
+
+- __this_cpu_add(memcg->vmstats_local->events[idx], count);
+-
+ x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
+ if (unlikely(x > MEMCG_CHARGE_BATCH)) {
+ struct mem_cgroup *mi;
+
++ /*
++ * Batch local counters to keep them in sync with
++ * the hierarchical ones.
++ */
++ __this_cpu_add(memcg->vmstats_local->events[idx], x);
+ for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
+ atomic_long_add(x, &mi->vmevents[idx]);
+ x = 0;
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index e096c987d261..77d1f69cdead 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1736,9 +1736,10 @@ static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
+ endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
+ pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
+ &beginpa, &endpa);
+- }
+
+- return ret;
++ return -EBUSY;
++ }
++ return 0;
+ }
+
+ static int check_cpu_on_node(pg_data_t *pgdat)
+@@ -1821,19 +1822,9 @@ static void __release_memory_resource(resource_size_t start,
+ }
+ }
+
+-/**
+- * remove_memory
+- * @nid: the node ID
+- * @start: physical address of the region to remove
+- * @size: size of the region to remove
+- *
+- * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
+- * and online/offline operations before this call, as required by
+- * try_offline_node().
+- */
+-void __ref __remove_memory(int nid, u64 start, u64 size)
++static int __ref try_remove_memory(int nid, u64 start, u64 size)
+ {
+- int ret;
++ int rc = 0;
+
+ BUG_ON(check_hotplug_memory_range(start, size));
+
+@@ -1841,13 +1832,13 @@ void __ref __remove_memory(int nid, u64 start, u64 size)
+
+ /*
+ * All memory blocks must be offlined before removing memory. Check
+- * whether all memory blocks in question are offline and trigger a BUG()
++ * whether all memory blocks in question are offline and return error
+ * if this is not the case.
+ */
+- ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
+- check_memblock_offlined_cb);
+- if (ret)
+- BUG();
++ rc = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
++ check_memblock_offlined_cb);
++ if (rc)
++ goto done;
+
+ /* remove memmap entry */
+ firmware_map_remove(start, start + size, "System RAM");
+@@ -1859,14 +1850,45 @@ void __ref __remove_memory(int nid, u64 start, u64 size)
+
+ try_offline_node(nid);
+
++done:
+ mem_hotplug_done();
++ return rc;
+ }
+
+-void remove_memory(int nid, u64 start, u64 size)
++/**
++ * remove_memory
++ * @nid: the node ID
++ * @start: physical address of the region to remove
++ * @size: size of the region to remove
++ *
++ * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
++ * and online/offline operations before this call, as required by
++ * try_offline_node().
++ */
++void __remove_memory(int nid, u64 start, u64 size)
++{
++
++ /*
++ * trigger BUG() is some memory is not offlined prior to calling this
++ * function
++ */
++ if (try_remove_memory(nid, start, size))
++ BUG();
++}
++
++/*
++ * Remove memory if every memory block is offline, otherwise return -EBUSY is
++ * some memory is not offline
++ */
++int remove_memory(int nid, u64 start, u64 size)
+ {
++ int rc;
++
+ lock_device_hotplug();
+- __remove_memory(nid, start, size);
++ rc = try_remove_memory(nid, start, size);
+ unlock_device_hotplug();
++
++ return rc;
+ }
+ EXPORT_SYMBOL_GPL(remove_memory);
+ #endif /* CONFIG_MEMORY_HOTREMOVE */
+diff --git a/mm/migrate.c b/mm/migrate.c
+index e9594bc0d406..dbb3b5bee4ee 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -771,12 +771,12 @@ recheck_buffers:
+ }
+ bh = bh->b_this_page;
+ } while (bh != head);
+- spin_unlock(&mapping->private_lock);
+ if (busy) {
+ if (invalidated) {
+ rc = -EAGAIN;
+ goto unlock_buffers;
+ }
++ spin_unlock(&mapping->private_lock);
+ invalidate_bh_lrus();
+ invalidated = true;
+ goto recheck_buffers;
+@@ -809,6 +809,8 @@ recheck_buffers:
+
+ rc = MIGRATEPAGE_SUCCESS;
+ unlock_buffers:
++ if (check_refs)
++ spin_unlock(&mapping->private_lock);
+ bh = head;
+ do {
+ unlock_buffer(bh);
+@@ -2345,16 +2347,13 @@ next:
+ static void migrate_vma_collect(struct migrate_vma *migrate)
+ {
+ struct mmu_notifier_range range;
+- struct mm_walk mm_walk;
+-
+- mm_walk.pmd_entry = migrate_vma_collect_pmd;
+- mm_walk.pte_entry = NULL;
+- mm_walk.pte_hole = migrate_vma_collect_hole;
+- mm_walk.hugetlb_entry = NULL;
+- mm_walk.test_walk = NULL;
+- mm_walk.vma = migrate->vma;
+- mm_walk.mm = migrate->vma->vm_mm;
+- mm_walk.private = migrate;
++ struct mm_walk mm_walk = {
++ .pmd_entry = migrate_vma_collect_pmd,
++ .pte_hole = migrate_vma_collect_hole,
++ .vma = migrate->vma,
++ .mm = migrate->vma->vm_mm,
++ .private = migrate,
++ };
+
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm,
+ migrate->start,
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 58251ba63e4a..cbd3411f644e 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -1003,7 +1003,8 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name,
+ }
+
+ struct kmem_cache *
+-kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init;
++kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
++{ /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
+ EXPORT_SYMBOL(kmalloc_caches);
+
+ /*
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 96aafbf8ce4e..4ebf20152328 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -684,7 +684,14 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
+ unsigned long ret, freed = 0;
+ struct shrinker *shrinker;
+
+- if (!mem_cgroup_is_root(memcg))
++ /*
++ * The root memcg might be allocated even though memcg is disabled
++ * via "cgroup_disable=memory" boot parameter. This could make
++ * mem_cgroup_is_root() return false, then just run memcg slab
++ * shrink, but skip global shrink. This may result in premature
++ * oom.
++ */
++ if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
+ return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
+
+ if (!down_read_trylock(&shrinker_rwsem))
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index dfcd69d08c1e..3b27094dc42e 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -101,6 +101,7 @@ struct z3fold_buddy_slots {
+ * @refcount: reference count for the z3fold page
+ * @work: work_struct for page layout optimization
+ * @slots: pointer to the structure holding buddy slots
++ * @pool: pointer to the containing pool
+ * @cpu: CPU which this page "belongs" to
+ * @first_chunks: the size of the first buddy in chunks, 0 if free
+ * @middle_chunks: the size of the middle buddy in chunks, 0 if free
+@@ -114,6 +115,7 @@ struct z3fold_header {
+ struct kref refcount;
+ struct work_struct work;
+ struct z3fold_buddy_slots *slots;
++ struct z3fold_pool *pool;
+ short cpu;
+ unsigned short first_chunks;
+ unsigned short middle_chunks;
+@@ -320,6 +322,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
+ zhdr->start_middle = 0;
+ zhdr->cpu = -1;
+ zhdr->slots = slots;
++ zhdr->pool = pool;
+ INIT_LIST_HEAD(&zhdr->buddy);
+ INIT_WORK(&zhdr->work, compact_page_work);
+ return zhdr;
+@@ -426,7 +429,7 @@ static enum buddy handle_to_buddy(unsigned long handle)
+
+ static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
+ {
+- return slots_to_pool(zhdr->slots);
++ return zhdr->pool;
+ }
+
+ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
+@@ -1357,12 +1360,22 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
+ unlock_page(page);
+ return -EBUSY;
+ }
++ if (work_pending(&zhdr->work)) {
++ z3fold_page_unlock(zhdr);
++ return -EAGAIN;
++ }
+ new_zhdr = page_address(newpage);
+ memcpy(new_zhdr, zhdr, PAGE_SIZE);
+ newpage->private = page->private;
+ page->private = 0;
+ z3fold_page_unlock(zhdr);
+ spin_lock_init(&new_zhdr->page_lock);
++ INIT_WORK(&new_zhdr->work, compact_page_work);
++ /*
++ * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
++ * so we only have to reinitialize it.
++ */
++ INIT_LIST_HEAD(&new_zhdr->buddy);
+ new_mapping = page_mapping(page);
+ __ClearPageMovable(page);
+ ClearPagePrivate(page);
+diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
+index fec6ec2ffa47..38d77353c66a 100644
+--- a/scripts/Makefile.modpost
++++ b/scripts/Makefile.modpost
+@@ -142,10 +142,8 @@ FORCE:
+ # optimization, we don't need to read them if the target does not
+ # exist, we will rebuild anyway in that case.
+
+-cmd_files := $(wildcard $(foreach f,$(sort $(targets)),$(dir $(f)).$(notdir $(f)).cmd))
++existing-targets := $(wildcard $(sort $(targets)))
+
+-ifneq ($(cmd_files),)
+- include $(cmd_files)
+-endif
++-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
+
+ .PHONY: $(PHONY)
+diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
+index a245255cecb2..27964917cbfd 100644
+--- a/scripts/kconfig/confdata.c
++++ b/scripts/kconfig/confdata.c
+@@ -867,6 +867,7 @@ int conf_write(const char *name)
+ const char *str;
+ char tmpname[PATH_MAX + 1], oldname[PATH_MAX + 1];
+ char *env;
++ int i;
+ bool need_newline = false;
+
+ if (!name)
+@@ -949,6 +950,9 @@ next:
+ }
+ fclose(out);
+
++ for_all_symbols(i, sym)
++ sym->flags &= ~SYMBOL_WRITTEN;
++
+ if (*tmpname) {
+ if (is_same(name, tmpname)) {
+ conf_message("No change to %s", name);
+diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
+index 624ccc6ac744..f8efaa9f647c 100644
+--- a/security/selinux/ss/policydb.c
++++ b/security/selinux/ss/policydb.c
+@@ -272,6 +272,8 @@ static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
+ return v;
+ }
+
++static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap);
++
+ /*
+ * Initialize a policy database structure.
+ */
+@@ -319,8 +321,10 @@ static int policydb_init(struct policydb *p)
+ out:
+ hashtab_destroy(p->filename_trans);
+ hashtab_destroy(p->range_tr);
+- for (i = 0; i < SYM_NUM; i++)
++ for (i = 0; i < SYM_NUM; i++) {
++ hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
+ hashtab_destroy(p->symtab[i].table);
++ }
+ return rc;
+ }
+
+diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
+index 1192c7561d62..3c2db3816029 100644
+--- a/sound/hda/hdac_i915.c
++++ b/sound/hda/hdac_i915.c
+@@ -136,10 +136,12 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
+ if (!acomp)
+ return -ENODEV;
+ if (!acomp->ops) {
+- request_module("i915");
+- /* 60s timeout */
+- wait_for_completion_timeout(&bind_complete,
+- msecs_to_jiffies(60 * 1000));
++ if (!IS_ENABLED(CONFIG_MODULES) ||
++ !request_module("i915")) {
++ /* 60s timeout */
++ wait_for_completion_timeout(&bind_complete,
++ msecs_to_jiffies(60 * 1000));
++ }
+ }
+ if (!acomp->ops) {
+ dev_info(bus->dev, "couldn't bind with audio component\n");
+diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
+index f470144d1a70..bf114ca9ca87 100644
+--- a/tools/perf/builtin-version.c
++++ b/tools/perf/builtin-version.c
+@@ -19,6 +19,7 @@ static struct version version;
+ static struct option version_options[] = {
+ OPT_BOOLEAN(0, "build-options", &version.build_options,
+ "display the build options"),
++ OPT_END(),
+ };
+
+ static const char * const version_usage[] = {
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index 1c9511262947..f1573a11d3e4 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -1,4 +1,5 @@
+ # SPDX-License-Identifier: GPL-2.0
++include ../../../../scripts/Kbuild.include
+
+ LIBDIR := ../../../lib
+ BPFDIR := $(LIBDIR)/bpf
+@@ -185,8 +186,8 @@ $(ALU32_BUILD_DIR)/test_progs_32: prog_tests/*.c
+
+ $(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR) \
+ $(ALU32_BUILD_DIR)/test_progs_32
+- $(CLANG) $(CLANG_FLAGS) \
+- -O2 -target bpf -emit-llvm -c $< -o - | \
++ ($(CLANG) $(CLANG_FLAGS) -O2 -target bpf -emit-llvm -c $< -o - || \
++ echo "clang failed") | \
+ $(LLC) -march=bpf -mattr=+alu32 -mcpu=$(CPU) $(LLC_FLAGS) \
+ -filetype=obj -o $@
+ ifeq ($(DWARF2BTF),y)
+@@ -197,16 +198,16 @@ endif
+ # Have one program compiled without "-target bpf" to test whether libbpf loads
+ # it successfully
+ $(OUTPUT)/test_xdp.o: progs/test_xdp.c
+- $(CLANG) $(CLANG_FLAGS) \
+- -O2 -emit-llvm -c $< -o - | \
++ ($(CLANG) $(CLANG_FLAGS) -O2 -emit-llvm -c $< -o - || \
++ echo "clang failed") | \
+ $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
+ ifeq ($(DWARF2BTF),y)
+ $(BTF_PAHOLE) -J $@
+ endif
+
+ $(OUTPUT)/%.o: progs/%.c
+- $(CLANG) $(CLANG_FLAGS) \
+- -O2 -target bpf -emit-llvm -c $< -o - | \
++ ($(CLANG) $(CLANG_FLAGS) -O2 -target bpf -emit-llvm -c $< -o - || \
++ echo "clang failed") | \
+ $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
+ ifeq ($(DWARF2BTF),y)
+ $(BTF_PAHOLE) -J $@
+diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
+index 4c223266299a..bdb69599c4bd 100644
+--- a/tools/testing/selftests/cgroup/cgroup_util.c
++++ b/tools/testing/selftests/cgroup/cgroup_util.c
+@@ -191,8 +191,7 @@ int cg_find_unified_root(char *root, size_t len)
+ strtok(NULL, delim);
+ strtok(NULL, delim);
+
+- if (strcmp(fs, "cgroup") == 0 &&
+- strcmp(type, "cgroup2") == 0) {
++ if (strcmp(type, "cgroup2") == 0) {
+ strncpy(root, mount, len);
+ return 0;
+ }
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-08-04 16:16 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-08-04 16:16 UTC (permalink / raw
To: gentoo-commits
commit: 0cd4c4c48a094b7baeb008455b70fa37a9287345
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Aug 4 16:16:01 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Aug 4 16:16:01 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0cd4c4c4
Linux patch 5.2.6
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1005_linux-5.2.6.patch | 1297 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 1301 insertions(+)
diff --git a/0000_README b/0000_README
index 01e534c..3a50bfb 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch: 1004_linux-5.2.5.patch
From: https://www.kernel.org
Desc: Linux 5.2.5
+Patch: 1005_linux-5.2.6.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.6
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1005_linux-5.2.6.patch b/1005_linux-5.2.6.patch
new file mode 100644
index 0000000..4a18db6
--- /dev/null
+++ b/1005_linux-5.2.6.patch
@@ -0,0 +1,1297 @@
+diff --git a/Makefile b/Makefile
+index 78bd926c8439..3cd40f1a8f75 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
+index b9a37057b77a..cee24c308337 100644
+--- a/arch/sh/boards/Kconfig
++++ b/arch/sh/boards/Kconfig
+@@ -8,27 +8,19 @@ config SH_ALPHA_BOARD
+ bool
+
+ config SH_DEVICE_TREE
+- bool "Board Described by Device Tree"
++ bool
+ select OF
+ select OF_EARLY_FLATTREE
+ select TIMER_OF
+ select COMMON_CLK
+ select GENERIC_CALIBRATE_DELAY
+- help
+- Select Board Described by Device Tree to build a kernel that
+- does not hard-code any board-specific knowledge but instead uses
+- a device tree blob provided by the boot-loader. You must enable
+- drivers for any hardware you want to use separately. At this
+- time, only boards based on the open-hardware J-Core processors
+- have sufficient driver coverage to use this option; do not
+- select it if you are using original SuperH hardware.
+
+ config SH_JCORE_SOC
+ bool "J-Core SoC"
+- depends on SH_DEVICE_TREE && (CPU_SH2 || CPU_J2)
++ select SH_DEVICE_TREE
+ select CLKSRC_JCORE_PIT
+ select JCORE_AIC
+- default y if CPU_J2
++ depends on CPU_J2
+ help
+ Select this option to include drivers core components of the
+ J-Core SoC, including interrupt controllers and timers.
+diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
+index a55be205b91a..dbfe34664633 100644
+--- a/drivers/bluetooth/hci_ath.c
++++ b/drivers/bluetooth/hci_ath.c
+@@ -98,6 +98,9 @@ static int ath_open(struct hci_uart *hu)
+
+ BT_DBG("hu %p", hu);
+
++ if (!hci_uart_has_flow_control(hu))
++ return -EOPNOTSUPP;
++
+ ath = kzalloc(sizeof(*ath), GFP_KERNEL);
+ if (!ath)
+ return -ENOMEM;
+diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
+index 8905ad2edde7..ae2624fce913 100644
+--- a/drivers/bluetooth/hci_bcm.c
++++ b/drivers/bluetooth/hci_bcm.c
+@@ -406,6 +406,9 @@ static int bcm_open(struct hci_uart *hu)
+
+ bt_dev_dbg(hu->hdev, "hu %p", hu);
+
++ if (!hci_uart_has_flow_control(hu))
++ return -EOPNOTSUPP;
++
+ bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
+ if (!bcm)
+ return -ENOMEM;
+diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
+index 207bae5e0d46..31f25153087d 100644
+--- a/drivers/bluetooth/hci_intel.c
++++ b/drivers/bluetooth/hci_intel.c
+@@ -391,6 +391,9 @@ static int intel_open(struct hci_uart *hu)
+
+ BT_DBG("hu %p", hu);
+
++ if (!hci_uart_has_flow_control(hu))
++ return -EOPNOTSUPP;
++
+ intel = kzalloc(sizeof(*intel), GFP_KERNEL);
+ if (!intel)
+ return -ENOMEM;
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index c84f985f348d..c953f14656b5 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -284,6 +284,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+ return 0;
+ }
+
++/* Check the underlying device or tty has flow control support */
++bool hci_uart_has_flow_control(struct hci_uart *hu)
++{
++ /* serdev nodes check if the needed operations are present */
++ if (hu->serdev)
++ return true;
++
++ if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset)
++ return true;
++
++ return false;
++}
++
+ /* Flow control or un-flow control the device */
+ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
+ {
+diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
+index 50212ac629e3..49dcf198ffd8 100644
+--- a/drivers/bluetooth/hci_mrvl.c
++++ b/drivers/bluetooth/hci_mrvl.c
+@@ -52,6 +52,9 @@ static int mrvl_open(struct hci_uart *hu)
+
+ BT_DBG("hu %p", hu);
+
++ if (!hci_uart_has_flow_control(hu))
++ return -EOPNOTSUPP;
++
+ mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
+ if (!mrvl)
+ return -ENOMEM;
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 9d273cdde563..f41fb2c02e4f 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -458,6 +458,9 @@ static int qca_open(struct hci_uart *hu)
+
+ BT_DBG("hu %p qca_open", hu);
+
++ if (!hci_uart_has_flow_control(hu))
++ return -EOPNOTSUPP;
++
+ qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
+ if (!qca)
+ return -ENOMEM;
+diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
+index d8cf005e3c5d..22c278b13ab9 100644
+--- a/drivers/bluetooth/hci_uart.h
++++ b/drivers/bluetooth/hci_uart.h
+@@ -103,6 +103,7 @@ int hci_uart_tx_wakeup(struct hci_uart *hu);
+ int hci_uart_init_ready(struct hci_uart *hu);
+ void hci_uart_init_work(struct work_struct *work);
+ void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
++bool hci_uart_has_flow_control(struct hci_uart *hu);
+ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
+ void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
+ unsigned int oper_speed);
+diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
+index 4c99739b937e..0e224232f746 100644
+--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
+@@ -1955,6 +1955,9 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+
+ /* get endpoint base */
+ idx = ((ep_addr & 0x7f) - 1) * 2;
++ if (idx > 15)
++ return -EIO;
++
+ if (ep_addr & 0x80)
+ idx++;
+ attr = ep->desc.bmAttributes;
+diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c
+index 5e782b3c2fa9..bf1ee654df80 100644
+--- a/drivers/media/radio/radio-raremono.c
++++ b/drivers/media/radio/radio-raremono.c
+@@ -271,6 +271,14 @@ static int vidioc_g_frequency(struct file *file, void *priv,
+ return 0;
+ }
+
++static void raremono_device_release(struct v4l2_device *v4l2_dev)
++{
++ struct raremono_device *radio = to_raremono_dev(v4l2_dev);
++
++ kfree(radio->buffer);
++ kfree(radio);
++}
++
+ /* File system interface */
+ static const struct v4l2_file_operations usb_raremono_fops = {
+ .owner = THIS_MODULE,
+@@ -295,12 +303,14 @@ static int usb_raremono_probe(struct usb_interface *intf,
+ struct raremono_device *radio;
+ int retval = 0;
+
+- radio = devm_kzalloc(&intf->dev, sizeof(struct raremono_device), GFP_KERNEL);
+- if (radio)
+- radio->buffer = devm_kmalloc(&intf->dev, BUFFER_LENGTH, GFP_KERNEL);
+-
+- if (!radio || !radio->buffer)
++ radio = kzalloc(sizeof(*radio), GFP_KERNEL);
++ if (!radio)
++ return -ENOMEM;
++ radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL);
++ if (!radio->buffer) {
++ kfree(radio);
+ return -ENOMEM;
++ }
+
+ radio->usbdev = interface_to_usbdev(intf);
+ radio->intf = intf;
+@@ -324,7 +334,8 @@ static int usb_raremono_probe(struct usb_interface *intf,
+ if (retval != 3 ||
+ (get_unaligned_be16(&radio->buffer[1]) & 0xfff) == 0x0242) {
+ dev_info(&intf->dev, "this is not Thanko's Raremono.\n");
+- return -ENODEV;
++ retval = -ENODEV;
++ goto free_mem;
+ }
+
+ dev_info(&intf->dev, "Thanko's Raremono connected: (%04X:%04X)\n",
+@@ -333,7 +344,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
+ retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
+ if (retval < 0) {
+ dev_err(&intf->dev, "couldn't register v4l2_device\n");
+- return retval;
++ goto free_mem;
+ }
+
+ mutex_init(&radio->lock);
+@@ -345,6 +356,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
+ radio->vdev.ioctl_ops = &usb_raremono_ioctl_ops;
+ radio->vdev.lock = &radio->lock;
+ radio->vdev.release = video_device_release_empty;
++ radio->v4l2_dev.release = raremono_device_release;
+
+ usb_set_intfdata(intf, &radio->v4l2_dev);
+
+@@ -360,6 +372,10 @@ static int usb_raremono_probe(struct usb_interface *intf,
+ }
+ dev_err(&intf->dev, "could not register video device\n");
+ v4l2_device_unregister(&radio->v4l2_dev);
++
++free_mem:
++ kfree(radio->buffer);
++ kfree(radio);
+ return retval;
+ }
+
+diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
+index f746f6e2f686..a8a72d5fbd12 100644
+--- a/drivers/media/usb/au0828/au0828-core.c
++++ b/drivers/media/usb/au0828/au0828-core.c
+@@ -719,6 +719,12 @@ static int au0828_usb_probe(struct usb_interface *interface,
+ /* Setup */
+ au0828_card_setup(dev);
+
++ /*
++ * Store the pointer to the au0828_dev so it can be accessed in
++ * au0828_usb_disconnect
++ */
++ usb_set_intfdata(interface, dev);
++
+ /* Analog TV */
+ retval = au0828_analog_register(dev, interface);
+ if (retval) {
+@@ -737,12 +743,6 @@ static int au0828_usb_probe(struct usb_interface *interface,
+ /* Remote controller */
+ au0828_rc_register(dev);
+
+- /*
+- * Store the pointer to the au0828_dev so it can be accessed in
+- * au0828_usb_disconnect
+- */
+- usb_set_intfdata(interface, dev);
+-
+ pr_info("Registered device AU0828 [%s]\n",
+ dev->board.name == NULL ? "Unset" : dev->board.name);
+
+diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
+index b2268981c963..17468f7d78ed 100644
+--- a/drivers/media/usb/cpia2/cpia2_usb.c
++++ b/drivers/media/usb/cpia2/cpia2_usb.c
+@@ -893,7 +893,6 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
+ cpia2_unregister_camera(cam);
+ v4l2_device_disconnect(&cam->v4l2_dev);
+ mutex_unlock(&cam->v4l2_lock);
+- v4l2_device_put(&cam->v4l2_dev);
+
+ if(cam->buffers) {
+ DBG("Wakeup waiting processes\n");
+@@ -902,6 +901,8 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
+ wake_up_interruptible(&cam->wq_stream);
+ }
+
++ v4l2_device_put(&cam->v4l2_dev);
++
+ LOG("CPiA2 camera disconnected.\n");
+ }
+
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+index 70b5cb08d65b..bbf361ce0bd0 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+@@ -1670,7 +1670,7 @@ static int pvr2_decoder_enable(struct pvr2_hdw *hdw,int enablefl)
+ }
+ if (!hdw->flag_decoder_missed) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+- "WARNING: No decoder present");
++ "***WARNING*** No decoder present");
+ hdw->flag_decoder_missed = !0;
+ trace_stbit("flag_decoder_missed",
+ hdw->flag_decoder_missed);
+@@ -2356,7 +2356,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
+ if (hdw_desc->flag_is_experimental) {
+ pvr2_trace(PVR2_TRACE_INFO, "**********");
+ pvr2_trace(PVR2_TRACE_INFO,
+- "WARNING: Support for this device (%s) is experimental.",
++ "***WARNING*** Support for this device (%s) is experimental.",
+ hdw_desc->description);
+ pvr2_trace(PVR2_TRACE_INFO,
+ "Important functionality might not be entirely working.");
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+index 68e323f8d9cf..275394bafe7d 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+@@ -333,11 +333,11 @@ static int i2c_hack_cx25840(struct pvr2_hdw *hdw,
+
+ if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+- "WARNING: Detected a wedged cx25840 chip; the device will not work.");
++ "***WARNING*** Detected a wedged cx25840 chip; the device will not work.");
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+- "WARNING: Try power cycling the pvrusb2 device.");
++ "***WARNING*** Try power cycling the pvrusb2 device.");
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+- "WARNING: Disabling further access to the device to prevent other foul-ups.");
++ "***WARNING*** Disabling further access to the device to prevent other foul-ups.");
+ // This blocks all further communication with the part.
+ hdw->i2c_func[0x44] = NULL;
+ pvr2_hdw_render_useless(hdw);
+diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.c b/drivers/media/usb/pvrusb2/pvrusb2-std.c
+index 447279b4a545..e7ab41401577 100644
+--- a/drivers/media/usb/pvrusb2/pvrusb2-std.c
++++ b/drivers/media/usb/pvrusb2/pvrusb2-std.c
+@@ -343,7 +343,7 @@ struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
+ bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk);
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+- "WARNING: Failed to classify the following standard(s): %.*s",
++ "***WARNING*** Failed to classify the following standard(s): %.*s",
+ bcnt,buf);
+ }
+
+diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
+index 970cf69ac35f..a3ecf7d77949 100644
+--- a/drivers/net/wireless/ath/ath10k/usb.c
++++ b/drivers/net/wireless/ath/ath10k/usb.c
+@@ -1016,7 +1016,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
+ }
+
+ /* TODO: remove this once USB support is fully implemented */
+- ath10k_warn(ar, "WARNING: ath10k USB support is incomplete, don't expect anything to work!\n");
++ ath10k_warn(ar, "Warning: ath10k USB support is incomplete, don't expect anything to work!\n");
+
+ return 0;
+
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 499acf07d61a..e942b3e84068 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -12,11 +12,6 @@ module_param(multipath, bool, 0444);
+ MODULE_PARM_DESC(multipath,
+ "turn on native support for multiple controllers per subsystem");
+
+-inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+-{
+- return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
+-}
+-
+ /*
+ * If multipathing is enabled we need to always use the subsystem instance
+ * number for numbering our devices to avoid conflicts between subsystems that
+@@ -614,7 +609,8 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+ {
+ int error;
+
+- if (!nvme_ctrl_use_ana(ctrl))
++ /* check if multipath is enabled and we have the capability */
++ if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
+ return 0;
+
+ ctrl->anacap = id->anacap;
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 55553d293a98..7391cd0a7739 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -472,7 +472,11 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[];
+ extern const struct block_device_operations nvme_ns_head_ops;
+
+ #ifdef CONFIG_NVME_MULTIPATH
+-bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl);
++static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
++{
++ return ctrl->ana_log_buf != NULL;
++}
++
+ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ struct nvme_ctrl *ctrl, int *flags);
+ void nvme_failover_req(struct request *req);
+diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
+index 3a546ec10d90..22a65ad4e46e 100644
+--- a/drivers/pps/pps.c
++++ b/drivers/pps/pps.c
+@@ -152,6 +152,14 @@ static long pps_cdev_ioctl(struct file *file,
+ pps->params.mode |= PPS_CANWAIT;
+ pps->params.api_version = PPS_API_VERS;
+
++ /*
++ * Clear unused fields of pps_kparams to avoid leaking
++ * uninitialized data of the PPS_SETPARAMS caller via
++ * PPS_GETPARAMS
++ */
++ pps->params.assert_off_tu.flags = 0;
++ pps->params.clear_off_tu.flags = 0;
++
+ spin_unlock_irq(&pps->lock);
+
+ break;
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 0176241eaea7..7754d7679122 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -1263,20 +1263,22 @@ static int send_cap_msg(struct cap_msg_args *arg)
+ }
+
+ /*
+- * Queue cap releases when an inode is dropped from our cache. Since
+- * inode is about to be destroyed, there is no need for i_ceph_lock.
++ * Queue cap releases when an inode is dropped from our cache.
+ */
+-void __ceph_remove_caps(struct inode *inode)
++void __ceph_remove_caps(struct ceph_inode_info *ci)
+ {
+- struct ceph_inode_info *ci = ceph_inode(inode);
+ struct rb_node *p;
+
++ /* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
++ * may call __ceph_caps_issued_mask() on a freeing inode. */
++ spin_lock(&ci->i_ceph_lock);
+ p = rb_first(&ci->i_caps);
+ while (p) {
+ struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
+ p = rb_next(p);
+ __ceph_remove_cap(cap, true);
+ }
++ spin_unlock(&ci->i_ceph_lock);
+ }
+
+ /*
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 5b7d4881a4f8..3c7a32779574 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -536,7 +536,7 @@ void ceph_evict_inode(struct inode *inode)
+
+ ceph_fscache_unregister_inode_cookie(ci);
+
+- __ceph_remove_caps(inode);
++ __ceph_remove_caps(ci);
+
+ if (__ceph_has_any_quota(ci))
+ ceph_adjust_quota_realms_count(inode, false);
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 048409fba1a8..edec39aa5ce2 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -1000,7 +1000,7 @@ extern void ceph_add_cap(struct inode *inode,
+ unsigned cap, unsigned seq, u64 realmino, int flags,
+ struct ceph_cap **new_cap);
+ extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
+-extern void __ceph_remove_caps(struct inode* inode);
++extern void __ceph_remove_caps(struct ceph_inode_info *ci);
+ extern void ceph_put_cap(struct ceph_mds_client *mdsc,
+ struct ceph_cap *cap);
+ extern int ceph_is_any_caps(struct inode *inode);
+diff --git a/fs/exec.c b/fs/exec.c
+index 89a500bb897a..39902cc9eb6f 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1828,7 +1828,7 @@ static int __do_execve_file(int fd, struct filename *filename,
+ membarrier_execve(current);
+ rseq_execve(current);
+ acct_update_integrals(current);
+- task_numa_free(current);
++ task_numa_free(current, false);
+ free_bprm(bprm);
+ kfree(pathbuf);
+ if (filename)
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index d7e4f0848e28..4d90f5bf0b0a 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -406,10 +406,10 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
+ clp = nfs_match_client(cl_init);
+ if (clp) {
+ spin_unlock(&nn->nfs_client_lock);
+- if (IS_ERR(clp))
+- return clp;
+ if (new)
+ new->rpc_ops->free_client(new);
++ if (IS_ERR(clp))
++ return clp;
+ return nfs_found_client(cl_init, clp);
+ }
+ if (new) {
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 03517154fe0f..e781af70d10d 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -209,12 +209,53 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
+ return result;
+ }
+
++/*
++ * If the user used setproctitle(), we just get the string from
++ * user space at arg_start, and limit it to a maximum of one page.
++ */
++static ssize_t get_mm_proctitle(struct mm_struct *mm, char __user *buf,
++ size_t count, unsigned long pos,
++ unsigned long arg_start)
++{
++ char *page;
++ int ret, got;
++
++ if (pos >= PAGE_SIZE)
++ return 0;
++
++ page = (char *)__get_free_page(GFP_KERNEL);
++ if (!page)
++ return -ENOMEM;
++
++ ret = 0;
++ got = access_remote_vm(mm, arg_start, page, PAGE_SIZE, FOLL_ANON);
++ if (got > 0) {
++ int len = strnlen(page, got);
++
++ /* Include the NUL character if it was found */
++ if (len < got)
++ len++;
++
++ if (len > pos) {
++ len -= pos;
++ if (len > count)
++ len = count;
++ len -= copy_to_user(buf, page+pos, len);
++ if (!len)
++ len = -EFAULT;
++ ret = len;
++ }
++ }
++ free_page((unsigned long)page);
++ return ret;
++}
++
+ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
+ size_t count, loff_t *ppos)
+ {
+ unsigned long arg_start, arg_end, env_start, env_end;
+ unsigned long pos, len;
+- char *page;
++ char *page, c;
+
+ /* Check if process spawned far enough to have cmdline. */
+ if (!mm->env_end)
+@@ -231,28 +272,42 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
+ return 0;
+
+ /*
+- * We have traditionally allowed the user to re-write
+- * the argument strings and overflow the end result
+- * into the environment section. But only do that if
+- * the environment area is contiguous to the arguments.
++ * We allow setproctitle() to overwrite the argument
++ * strings, and overflow past the original end. But
++ * only when it overflows into the environment area.
+ */
+- if (env_start != arg_end || env_start >= env_end)
++ if (env_start != arg_end || env_end < env_start)
+ env_start = env_end = arg_end;
+-
+- /* .. and limit it to a maximum of one page of slop */
+- if (env_end >= arg_end + PAGE_SIZE)
+- env_end = arg_end + PAGE_SIZE - 1;
++ len = env_end - arg_start;
+
+ /* We're not going to care if "*ppos" has high bits set */
+- pos = arg_start + *ppos;
+-
+- /* .. but we do check the result is in the proper range */
+- if (pos < arg_start || pos >= env_end)
++ pos = *ppos;
++ if (pos >= len)
+ return 0;
++ if (count > len - pos)
++ count = len - pos;
++ if (!count)
++ return 0;
++
++ /*
++ * Magical special case: if the argv[] end byte is not
++ * zero, the user has overwritten it with setproctitle(3).
++ *
++ * Possible future enhancement: do this only once when
++ * pos is 0, and set a flag in the 'struct file'.
++ */
++ if (access_remote_vm(mm, arg_end-1, &c, 1, FOLL_ANON) == 1 && c)
++ return get_mm_proctitle(mm, buf, count, pos, arg_start);
+
+- /* .. and we never go past env_end */
+- if (env_end - pos < count)
+- count = env_end - pos;
++ /*
++ * For the non-setproctitle() case we limit things strictly
++ * to the [arg_start, arg_end[ range.
++ */
++ pos += arg_start;
++ if (pos < arg_start || pos >= arg_end)
++ return 0;
++ if (count > arg_end - pos)
++ count = arg_end - pos;
+
+ page = (char *)__get_free_page(GFP_KERNEL);
+ if (!page)
+@@ -262,48 +317,11 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
+ while (count) {
+ int got;
+ size_t size = min_t(size_t, PAGE_SIZE, count);
+- long offset;
+
+- /*
+- * Are we already starting past the official end?
+- * We always include the last byte that is *supposed*
+- * to be NUL
+- */
+- offset = (pos >= arg_end) ? pos - arg_end + 1 : 0;
+-
+- got = access_remote_vm(mm, pos - offset, page, size + offset, FOLL_ANON);
+- if (got <= offset)
++ got = access_remote_vm(mm, pos, page, size, FOLL_ANON);
++ if (got <= 0)
+ break;
+- got -= offset;
+-
+- /* Don't walk past a NUL character once you hit arg_end */
+- if (pos + got >= arg_end) {
+- int n = 0;
+-
+- /*
+- * If we started before 'arg_end' but ended up
+- * at or after it, we start the NUL character
+- * check at arg_end-1 (where we expect the normal
+- * EOF to be).
+- *
+- * NOTE! This is smaller than 'got', because
+- * pos + got >= arg_end
+- */
+- if (pos < arg_end)
+- n = arg_end - pos - 1;
+-
+- /* Cut off at first NUL after 'n' */
+- got = n + strnlen(page+n, offset+got-n);
+- if (got < offset)
+- break;
+- got -= offset;
+-
+- /* Include the NUL if it existed */
+- if (got < size)
+- got++;
+- }
+-
+- got -= copy_to_user(buf, page+offset, got);
++ got -= copy_to_user(buf, page, got);
+ if (unlikely(!got)) {
+ if (!len)
+ len = -EFAULT;
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 11837410690f..1157f6e245af 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1026,7 +1026,15 @@ struct task_struct {
+ u64 last_sum_exec_runtime;
+ struct callback_head numa_work;
+
+- struct numa_group *numa_group;
++ /*
++ * This pointer is only modified for current in syscall and
++ * pagefault context (and for tasks being destroyed), so it can be read
++ * from any of the following contexts:
++ * - RCU read-side critical section
++ * - current->numa_group from everywhere
++ * - task's runqueue locked, task not running
++ */
++ struct numa_group __rcu *numa_group;
+
+ /*
+ * numa_faults is an array split into four regions:
+diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h
+index e7dd04a84ba8..3988762efe15 100644
+--- a/include/linux/sched/numa_balancing.h
++++ b/include/linux/sched/numa_balancing.h
+@@ -19,7 +19,7 @@
+ extern void task_numa_fault(int last_node, int node, int pages, int flags);
+ extern pid_t task_numa_group_id(struct task_struct *p);
+ extern void set_numabalancing_state(bool enabled);
+-extern void task_numa_free(struct task_struct *p);
++extern void task_numa_free(struct task_struct *p, bool final);
+ extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
+ int src_nid, int dst_cpu);
+ #else
+@@ -34,7 +34,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p)
+ static inline void set_numabalancing_state(bool enabled)
+ {
+ }
+-static inline void task_numa_free(struct task_struct *p)
++static inline void task_numa_free(struct task_struct *p, bool final)
+ {
+ }
+ static inline bool should_numa_migrate_memory(struct task_struct *p,
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index cad09858a5f2..546ebee39e2a 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -1928,8 +1928,8 @@ static int btf_array_resolve(struct btf_verifier_env *env,
+ /* Check array->index_type */
+ index_type_id = array->index_type;
+ index_type = btf_type_by_id(btf, index_type_id);
+- if (btf_type_is_resolve_source_only(index_type) ||
+- btf_type_nosize_or_null(index_type)) {
++ if (btf_type_nosize_or_null(index_type) ||
++ btf_type_is_resolve_source_only(index_type)) {
+ btf_verifier_log_type(env, v->t, "Invalid index");
+ return -EINVAL;
+ }
+@@ -1948,8 +1948,8 @@ static int btf_array_resolve(struct btf_verifier_env *env,
+ /* Check array->type */
+ elem_type_id = array->type;
+ elem_type = btf_type_by_id(btf, elem_type_id);
+- if (btf_type_is_resolve_source_only(elem_type) ||
+- btf_type_nosize_or_null(elem_type)) {
++ if (btf_type_nosize_or_null(elem_type) ||
++ btf_type_is_resolve_source_only(elem_type)) {
+ btf_verifier_log_type(env, v->t,
+ "Invalid elem");
+ return -EINVAL;
+@@ -2170,8 +2170,8 @@ static int btf_struct_resolve(struct btf_verifier_env *env,
+ const struct btf_type *member_type = btf_type_by_id(env->btf,
+ member_type_id);
+
+- if (btf_type_is_resolve_source_only(member_type) ||
+- btf_type_nosize_or_null(member_type)) {
++ if (btf_type_nosize_or_null(member_type) ||
++ btf_type_is_resolve_source_only(member_type)) {
+ btf_verifier_log_member(env, v->t, member,
+ "Invalid member");
+ return -EINVAL;
+diff --git a/kernel/fork.c b/kernel/fork.c
+index fe83343da24b..d3f006ed2f9d 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -727,7 +727,7 @@ void __put_task_struct(struct task_struct *tsk)
+ WARN_ON(tsk == current);
+
+ cgroup_free(tsk);
+- task_numa_free(tsk);
++ task_numa_free(tsk, true);
+ security_task_free(tsk);
+ exit_creds(tsk);
+ delayacct_tsk_free(tsk);
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index f35930f5e528..9ecf1e4c624b 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1067,6 +1067,21 @@ struct numa_group {
+ unsigned long faults[0];
+ };
+
++/*
++ * For functions that can be called in multiple contexts that permit reading
++ * ->numa_group (see struct task_struct for locking rules).
++ */
++static struct numa_group *deref_task_numa_group(struct task_struct *p)
++{
++ return rcu_dereference_check(p->numa_group, p == current ||
++ (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu)));
++}
++
++static struct numa_group *deref_curr_numa_group(struct task_struct *p)
++{
++ return rcu_dereference_protected(p->numa_group, p == current);
++}
++
+ static inline unsigned long group_faults_priv(struct numa_group *ng);
+ static inline unsigned long group_faults_shared(struct numa_group *ng);
+
+@@ -1110,10 +1125,12 @@ static unsigned int task_scan_start(struct task_struct *p)
+ {
+ unsigned long smin = task_scan_min(p);
+ unsigned long period = smin;
++ struct numa_group *ng;
+
+ /* Scale the maximum scan period with the amount of shared memory. */
+- if (p->numa_group) {
+- struct numa_group *ng = p->numa_group;
++ rcu_read_lock();
++ ng = rcu_dereference(p->numa_group);
++ if (ng) {
+ unsigned long shared = group_faults_shared(ng);
+ unsigned long private = group_faults_priv(ng);
+
+@@ -1121,6 +1138,7 @@ static unsigned int task_scan_start(struct task_struct *p)
+ period *= shared + 1;
+ period /= private + shared + 1;
+ }
++ rcu_read_unlock();
+
+ return max(smin, period);
+ }
+@@ -1129,13 +1147,14 @@ static unsigned int task_scan_max(struct task_struct *p)
+ {
+ unsigned long smin = task_scan_min(p);
+ unsigned long smax;
++ struct numa_group *ng;
+
+ /* Watch for min being lower than max due to floor calculations */
+ smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
+
+ /* Scale the maximum scan period with the amount of shared memory. */
+- if (p->numa_group) {
+- struct numa_group *ng = p->numa_group;
++ ng = deref_curr_numa_group(p);
++ if (ng) {
+ unsigned long shared = group_faults_shared(ng);
+ unsigned long private = group_faults_priv(ng);
+ unsigned long period = smax;
+@@ -1167,7 +1186,7 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
+ p->numa_scan_period = sysctl_numa_balancing_scan_delay;
+ p->numa_work.next = &p->numa_work;
+ p->numa_faults = NULL;
+- p->numa_group = NULL;
++ RCU_INIT_POINTER(p->numa_group, NULL);
+ p->last_task_numa_placement = 0;
+ p->last_sum_exec_runtime = 0;
+
+@@ -1214,7 +1233,16 @@ static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
+
+ pid_t task_numa_group_id(struct task_struct *p)
+ {
+- return p->numa_group ? p->numa_group->gid : 0;
++ struct numa_group *ng;
++ pid_t gid = 0;
++
++ rcu_read_lock();
++ ng = rcu_dereference(p->numa_group);
++ if (ng)
++ gid = ng->gid;
++ rcu_read_unlock();
++
++ return gid;
+ }
+
+ /*
+@@ -1239,11 +1267,13 @@ static inline unsigned long task_faults(struct task_struct *p, int nid)
+
+ static inline unsigned long group_faults(struct task_struct *p, int nid)
+ {
+- if (!p->numa_group)
++ struct numa_group *ng = deref_task_numa_group(p);
++
++ if (!ng)
+ return 0;
+
+- return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
+- p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
++ return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
++ ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
+ }
+
+ static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
+@@ -1381,12 +1411,13 @@ static inline unsigned long task_weight(struct task_struct *p, int nid,
+ static inline unsigned long group_weight(struct task_struct *p, int nid,
+ int dist)
+ {
++ struct numa_group *ng = deref_task_numa_group(p);
+ unsigned long faults, total_faults;
+
+- if (!p->numa_group)
++ if (!ng)
+ return 0;
+
+- total_faults = p->numa_group->total_faults;
++ total_faults = ng->total_faults;
+
+ if (!total_faults)
+ return 0;
+@@ -1400,7 +1431,7 @@ static inline unsigned long group_weight(struct task_struct *p, int nid,
+ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
+ int src_nid, int dst_cpu)
+ {
+- struct numa_group *ng = p->numa_group;
++ struct numa_group *ng = deref_curr_numa_group(p);
+ int dst_nid = cpu_to_node(dst_cpu);
+ int last_cpupid, this_cpupid;
+
+@@ -1583,13 +1614,14 @@ static bool load_too_imbalanced(long src_load, long dst_load,
+ static void task_numa_compare(struct task_numa_env *env,
+ long taskimp, long groupimp, bool maymove)
+ {
++ struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
+ struct rq *dst_rq = cpu_rq(env->dst_cpu);
++ long imp = p_ng ? groupimp : taskimp;
+ struct task_struct *cur;
+ long src_load, dst_load;
+- long load;
+- long imp = env->p->numa_group ? groupimp : taskimp;
+- long moveimp = imp;
+ int dist = env->dist;
++ long moveimp = imp;
++ long load;
+
+ if (READ_ONCE(dst_rq->numa_migrate_on))
+ return;
+@@ -1628,21 +1660,22 @@ static void task_numa_compare(struct task_numa_env *env,
+ * If dst and source tasks are in the same NUMA group, or not
+ * in any group then look only at task weights.
+ */
+- if (cur->numa_group == env->p->numa_group) {
++ cur_ng = rcu_dereference(cur->numa_group);
++ if (cur_ng == p_ng) {
+ imp = taskimp + task_weight(cur, env->src_nid, dist) -
+ task_weight(cur, env->dst_nid, dist);
+ /*
+ * Add some hysteresis to prevent swapping the
+ * tasks within a group over tiny differences.
+ */
+- if (cur->numa_group)
++ if (cur_ng)
+ imp -= imp / 16;
+ } else {
+ /*
+ * Compare the group weights. If a task is all by itself
+ * (not part of a group), use the task weight instead.
+ */
+- if (cur->numa_group && env->p->numa_group)
++ if (cur_ng && p_ng)
+ imp += group_weight(cur, env->src_nid, dist) -
+ group_weight(cur, env->dst_nid, dist);
+ else
+@@ -1740,11 +1773,12 @@ static int task_numa_migrate(struct task_struct *p)
+ .best_imp = 0,
+ .best_cpu = -1,
+ };
++ unsigned long taskweight, groupweight;
+ struct sched_domain *sd;
++ long taskimp, groupimp;
++ struct numa_group *ng;
+ struct rq *best_rq;
+- unsigned long taskweight, groupweight;
+ int nid, ret, dist;
+- long taskimp, groupimp;
+
+ /*
+ * Pick the lowest SD_NUMA domain, as that would have the smallest
+@@ -1790,7 +1824,8 @@ static int task_numa_migrate(struct task_struct *p)
+ * multiple NUMA nodes; in order to better consolidate the group,
+ * we need to check other locations.
+ */
+- if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
++ ng = deref_curr_numa_group(p);
++ if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
+ for_each_online_node(nid) {
+ if (nid == env.src_nid || nid == p->numa_preferred_nid)
+ continue;
+@@ -1823,7 +1858,7 @@ static int task_numa_migrate(struct task_struct *p)
+ * A task that migrated to a second choice node will be better off
+ * trying for a better one later. Do not set the preferred node here.
+ */
+- if (p->numa_group) {
++ if (ng) {
+ if (env.best_cpu == -1)
+ nid = env.src_nid;
+ else
+@@ -2118,6 +2153,7 @@ static void task_numa_placement(struct task_struct *p)
+ unsigned long total_faults;
+ u64 runtime, period;
+ spinlock_t *group_lock = NULL;
++ struct numa_group *ng;
+
+ /*
+ * The p->mm->numa_scan_seq field gets updated without
+@@ -2135,8 +2171,9 @@ static void task_numa_placement(struct task_struct *p)
+ runtime = numa_get_avg_runtime(p, &period);
+
+ /* If the task is part of a group prevent parallel updates to group stats */
+- if (p->numa_group) {
+- group_lock = &p->numa_group->lock;
++ ng = deref_curr_numa_group(p);
++ if (ng) {
++ group_lock = &ng->lock;
+ spin_lock_irq(group_lock);
+ }
+
+@@ -2177,7 +2214,7 @@ static void task_numa_placement(struct task_struct *p)
+ p->numa_faults[cpu_idx] += f_diff;
+ faults += p->numa_faults[mem_idx];
+ p->total_numa_faults += diff;
+- if (p->numa_group) {
++ if (ng) {
+ /*
+ * safe because we can only change our own group
+ *
+@@ -2185,14 +2222,14 @@ static void task_numa_placement(struct task_struct *p)
+ * nid and priv in a specific region because it
+ * is at the beginning of the numa_faults array.
+ */
+- p->numa_group->faults[mem_idx] += diff;
+- p->numa_group->faults_cpu[mem_idx] += f_diff;
+- p->numa_group->total_faults += diff;
+- group_faults += p->numa_group->faults[mem_idx];
++ ng->faults[mem_idx] += diff;
++ ng->faults_cpu[mem_idx] += f_diff;
++ ng->total_faults += diff;
++ group_faults += ng->faults[mem_idx];
+ }
+ }
+
+- if (!p->numa_group) {
++ if (!ng) {
+ if (faults > max_faults) {
+ max_faults = faults;
+ max_nid = nid;
+@@ -2203,8 +2240,8 @@ static void task_numa_placement(struct task_struct *p)
+ }
+ }
+
+- if (p->numa_group) {
+- numa_group_count_active_nodes(p->numa_group);
++ if (ng) {
++ numa_group_count_active_nodes(ng);
+ spin_unlock_irq(group_lock);
+ max_nid = preferred_group_nid(p, max_nid);
+ }
+@@ -2238,7 +2275,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
+ int cpu = cpupid_to_cpu(cpupid);
+ int i;
+
+- if (unlikely(!p->numa_group)) {
++ if (unlikely(!deref_curr_numa_group(p))) {
+ unsigned int size = sizeof(struct numa_group) +
+ 4*nr_node_ids*sizeof(unsigned long);
+
+@@ -2274,7 +2311,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
+ if (!grp)
+ goto no_join;
+
+- my_grp = p->numa_group;
++ my_grp = deref_curr_numa_group(p);
+ if (grp == my_grp)
+ goto no_join;
+
+@@ -2336,13 +2373,24 @@ no_join:
+ return;
+ }
+
+-void task_numa_free(struct task_struct *p)
++/*
++ * Get rid of NUMA staticstics associated with a task (either current or dead).
++ * If @final is set, the task is dead and has reached refcount zero, so we can
++ * safely free all relevant data structures. Otherwise, there might be
++ * concurrent reads from places like load balancing and procfs, and we should
++ * reset the data back to default state without freeing ->numa_faults.
++ */
++void task_numa_free(struct task_struct *p, bool final)
+ {
+- struct numa_group *grp = p->numa_group;
+- void *numa_faults = p->numa_faults;
++ /* safe: p either is current or is being freed by current */
++ struct numa_group *grp = rcu_dereference_raw(p->numa_group);
++ unsigned long *numa_faults = p->numa_faults;
+ unsigned long flags;
+ int i;
+
++ if (!numa_faults)
++ return;
++
+ if (grp) {
+ spin_lock_irqsave(&grp->lock, flags);
+ for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
+@@ -2355,8 +2403,14 @@ void task_numa_free(struct task_struct *p)
+ put_numa_group(grp);
+ }
+
+- p->numa_faults = NULL;
+- kfree(numa_faults);
++ if (final) {
++ p->numa_faults = NULL;
++ kfree(numa_faults);
++ } else {
++ p->total_numa_faults = 0;
++ for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
++ numa_faults[i] = 0;
++ }
+ }
+
+ /*
+@@ -2409,7 +2463,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
+ * actively using should be counted as local. This allows the
+ * scan rate to slow down when a workload has settled down.
+ */
+- ng = p->numa_group;
++ ng = deref_curr_numa_group(p);
+ if (!priv && !local && ng && ng->active_nodes > 1 &&
+ numa_is_active_node(cpu_node, ng) &&
+ numa_is_active_node(mem_node, ng))
+@@ -10708,18 +10762,22 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
+ {
+ int node;
+ unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
++ struct numa_group *ng;
+
++ rcu_read_lock();
++ ng = rcu_dereference(p->numa_group);
+ for_each_online_node(node) {
+ if (p->numa_faults) {
+ tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
+ tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
+ }
+- if (p->numa_group) {
+- gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
+- gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
++ if (ng) {
++ gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
++ gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
+ }
+ print_numa_stats(m, node, tsf, tpf, gsf, gpf);
+ }
++ rcu_read_unlock();
+ }
+ #endif /* CONFIG_NUMA_BALANCING */
+ #endif /* CONFIG_SCHED_DEBUG */
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 169112f8aa1e..ab47bf3ab66e 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -274,7 +274,8 @@ EXPORT_SYMBOL_GPL(vsock_insert_connected);
+ void vsock_remove_bound(struct vsock_sock *vsk)
+ {
+ spin_lock_bh(&vsock_table_lock);
+- __vsock_remove_bound(vsk);
++ if (__vsock_in_bound_table(vsk))
++ __vsock_remove_bound(vsk);
+ spin_unlock_bh(&vsock_table_lock);
+ }
+ EXPORT_SYMBOL_GPL(vsock_remove_bound);
+@@ -282,7 +283,8 @@ EXPORT_SYMBOL_GPL(vsock_remove_bound);
+ void vsock_remove_connected(struct vsock_sock *vsk)
+ {
+ spin_lock_bh(&vsock_table_lock);
+- __vsock_remove_connected(vsk);
++ if (__vsock_in_connected_table(vsk))
++ __vsock_remove_connected(vsk);
+ spin_unlock_bh(&vsock_table_lock);
+ }
+ EXPORT_SYMBOL_GPL(vsock_remove_connected);
+@@ -318,35 +320,10 @@ struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
+ }
+ EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
+
+-static bool vsock_in_bound_table(struct vsock_sock *vsk)
+-{
+- bool ret;
+-
+- spin_lock_bh(&vsock_table_lock);
+- ret = __vsock_in_bound_table(vsk);
+- spin_unlock_bh(&vsock_table_lock);
+-
+- return ret;
+-}
+-
+-static bool vsock_in_connected_table(struct vsock_sock *vsk)
+-{
+- bool ret;
+-
+- spin_lock_bh(&vsock_table_lock);
+- ret = __vsock_in_connected_table(vsk);
+- spin_unlock_bh(&vsock_table_lock);
+-
+- return ret;
+-}
+-
+ void vsock_remove_sock(struct vsock_sock *vsk)
+ {
+- if (vsock_in_bound_table(vsk))
+- vsock_remove_bound(vsk);
+-
+- if (vsock_in_connected_table(vsk))
+- vsock_remove_connected(vsk);
++ vsock_remove_bound(vsk);
++ vsock_remove_connected(vsk);
+ }
+ EXPORT_SYMBOL_GPL(vsock_remove_sock);
+
+@@ -477,8 +454,7 @@ static void vsock_pending_work(struct work_struct *work)
+ * incoming packets can't find this socket, and to reduce the reference
+ * count.
+ */
+- if (vsock_in_connected_table(vsk))
+- vsock_remove_connected(vsk);
++ vsock_remove_connected(vsk);
+
+ sk->sk_state = TCP_CLOSE;
+
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index b1694d5d15d3..82be7780bbe8 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -1280,13 +1280,17 @@ static void xfrm_hash_rebuild(struct work_struct *work)
+
+ hlist_for_each_entry_safe(policy, n,
+ &net->xfrm.policy_inexact[dir],
+- bydst_inexact_list)
++ bydst_inexact_list) {
++ hlist_del_rcu(&policy->bydst);
+ hlist_del_init(&policy->bydst_inexact_list);
++ }
+
+ hmask = net->xfrm.policy_bydst[dir].hmask;
+ odst = net->xfrm.policy_bydst[dir].table;
+- for (i = hmask; i >= 0; i--)
+- INIT_HLIST_HEAD(odst + i);
++ for (i = hmask; i >= 0; i--) {
++ hlist_for_each_entry_safe(policy, n, odst + i, bydst)
++ hlist_del_rcu(&policy->bydst);
++ }
+ if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
+ /* dir out => dst = remote, src = local */
+ net->xfrm.policy_bydst[dir].dbits4 = rbits4;
+@@ -1315,8 +1319,6 @@ static void xfrm_hash_rebuild(struct work_struct *work)
+ chain = policy_hash_bysel(net, &policy->selector,
+ policy->family, dir);
+
+- hlist_del_rcu(&policy->bydst);
+-
+ if (!chain) {
+ void *p = xfrm_policy_inexact_insert(policy, dir, 0);
+
+diff --git a/tools/testing/selftests/net/xfrm_policy.sh b/tools/testing/selftests/net/xfrm_policy.sh
+index 71d7fdc513c1..5445943bf07f 100755
+--- a/tools/testing/selftests/net/xfrm_policy.sh
++++ b/tools/testing/selftests/net/xfrm_policy.sh
+@@ -257,6 +257,29 @@ check_exceptions()
+ return $lret
+ }
+
++check_hthresh_repeat()
++{
++ local log=$1
++ i=0
++
++ for i in $(seq 1 10);do
++ ip -net ns1 xfrm policy update src e000:0001::0000 dst ff01::0014:0000:0001 dir in tmpl src :: dst :: proto esp mode tunnel priority 100 action allow || break
++ ip -net ns1 xfrm policy set hthresh6 0 28 || break
++
++ ip -net ns1 xfrm policy update src e000:0001::0000 dst ff01::01 dir in tmpl src :: dst :: proto esp mode tunnel priority 100 action allow || break
++ ip -net ns1 xfrm policy set hthresh6 0 28 || break
++ done
++
++ if [ $i -ne 10 ] ;then
++ echo "FAIL: $log" 1>&2
++ ret=1
++ return 1
++ fi
++
++ echo "PASS: $log"
++ return 0
++}
++
+ #check for needed privileges
+ if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+@@ -404,7 +427,9 @@ for n in ns3 ns4;do
+ ip -net $n xfrm policy set hthresh4 32 32 hthresh6 128 128
+ sleep $((RANDOM%5))
+ done
+-check_exceptions "exceptions and block policies after hresh change to normal"
++check_exceptions "exceptions and block policies after htresh change to normal"
++
++check_hthresh_repeat "policies with repeated htresh change"
+
+ for i in 1 2 3 4;do ip netns del ns$i;done
+
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-07-31 14:53 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-07-31 14:53 UTC (permalink / raw
To: gentoo-commits
commit: 4b5ce4d1c2e83a77090bd0654ef4524a4539b961
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul 31 14:53:32 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul 31 14:53:32 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4b5ce4d1
mm/vmalloc: Sync unmappings in __purge_vmap_area_lazy()
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +++
1800_vmalloc-sync-unmappings-fix.patch | 58 ++++++++++++++++++++++++++++++++++
2 files changed, 62 insertions(+)
diff --git a/0000_README b/0000_README
index 983b9f0..01e534c 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch: 1510_fs-enable-link-security-restrictions-by-default.patch
From: http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
Desc: Enable link security restrictions by default.
+Patch: 1800_vmalloc-sync-unmappings-fix.patch
+From: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/patch/?id=3f8fd02b1bf1d7ba964485a56f2f4b53ae88c167
+Desc: mm/vmalloc: Sync unmappings in __purge_vmap_area_lazy()
+
Patch: 2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch
From: https://lore.kernel.org/linux-bluetooth/20190522070540.48895-1-marcel@holtmann.org/raw
Desc: Bluetooth: Check key sizes only when Secure Simple Pairing is enabled. See bug #686758
diff --git a/1800_vmalloc-sync-unmappings-fix.patch b/1800_vmalloc-sync-unmappings-fix.patch
new file mode 100644
index 0000000..7e56e51
--- /dev/null
+++ b/1800_vmalloc-sync-unmappings-fix.patch
@@ -0,0 +1,58 @@
+From 3f8fd02b1bf1d7ba964485a56f2f4b53ae88c167 Mon Sep 17 00:00:00 2001
+From: Joerg Roedel <jroedel@suse.de>
+Date: Fri, 19 Jul 2019 20:46:52 +0200
+Subject: mm/vmalloc: Sync unmappings in __purge_vmap_area_lazy()
+
+On x86-32 with PTI enabled, parts of the kernel page-tables are not shared
+between processes. This can cause mappings in the vmalloc/ioremap area to
+persist in some page-tables after the region is unmapped and released.
+
+When the region is re-used the processes with the old mappings do not fault
+in the new mappings but still access the old ones.
+
+This causes undefined behavior, in reality often data corruption, kernel
+oopses and panics and even spontaneous reboots.
+
+Fix this problem by activly syncing unmaps in the vmalloc/ioremap area to
+all page-tables in the system before the regions can be re-used.
+
+References: https://bugzilla.suse.com/show_bug.cgi?id=1118689
+Fixes: 5d72b4fba40ef ('x86, mm: support huge I/O mapping capability I/F')
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lkml.kernel.org/r/20190719184652.11391-4-joro@8bytes.org
+---
+ mm/vmalloc.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 4fa8d84599b0..e0fc963acc41 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1258,6 +1258,12 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
+ if (unlikely(valist == NULL))
+ return false;
+
++ /*
++ * First make sure the mappings are removed from all page-tables
++ * before they are freed.
++ */
++ vmalloc_sync_all();
++
+ /*
+ * TODO: to calculate a flush range without looping.
+ * The list can be up to lazy_max_pages() elements.
+@@ -3038,6 +3044,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
+ /*
+ * Implement a stub for vmalloc_sync_all() if the architecture chose not to
+ * have one.
++ *
++ * The purpose of this function is to make sure the vmalloc area
++ * mappings are identical in all page-tables in the system.
+ */
+ void __weak vmalloc_sync_all(void)
+ {
+--
+cgit 1.2-0.3.lf.el7
+
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-07-31 10:11 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-07-31 10:11 UTC (permalink / raw
To: gentoo-commits
commit: 472a7a400815ff96bb67b3245ec107d9783f8590
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul 31 10:11:05 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul 31 10:11:05 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=472a7a40
Linux patch 5.2.5
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1004_linux-5.2.5.patch | 7465 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 7469 insertions(+)
diff --git a/0000_README b/0000_README
index ff4bd8b..983b9f0 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch: 1003_linux-5.2.4.patch
From: https://www.kernel.org
Desc: Linux 5.2.4
+Patch: 1004_linux-5.2.5.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.5
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1004_linux-5.2.5.patch b/1004_linux-5.2.5.patch
new file mode 100644
index 0000000..0cbf6b4
--- /dev/null
+++ b/1004_linux-5.2.5.patch
@@ -0,0 +1,7465 @@
+diff --git a/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt b/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt
+new file mode 100644
+index 000000000000..a30d63db3c8f
+--- /dev/null
++++ b/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt
+@@ -0,0 +1,9 @@
++Armadeus ST0700 Adapt. A Santek ST0700I5Y-RBSLW 7.0" WVGA (800x480) TFT with
++an adapter board.
++
++Required properties:
++- compatible: "armadeus,st0700-adapt"
++- power-supply: see panel-common.txt
++
++Optional properties:
++- backlight: see panel-common.txt
+diff --git a/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml
+index 4d61fe0a98a4..dc129d9a329e 100644
+--- a/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml
++++ b/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml
+@@ -23,16 +23,17 @@ properties:
+ reg:
+ maxItems: 1
+
+- ti,linear-mapping-mode:
+- description: |
+- Enable linear mapping mode. If disabled, then it will use exponential
+- mapping mode in which the ramp up/down appears to have a more uniform
+- transition to the human eye.
+- type: boolean
++ '#address-cells':
++ const: 1
++
++ '#size-cells':
++ const: 0
+
+ required:
+ - compatible
+ - reg
++ - '#address-cells'
++ - '#size-cells'
+
+ patternProperties:
+ "^led@[01]$":
+@@ -48,7 +49,6 @@ patternProperties:
+ in this property. The two current sinks can be controlled
+ independently with both banks, or bank A can be configured to control
+ both sinks with the led-sources property.
+- maxItems: 1
+ minimum: 0
+ maximum: 1
+
+@@ -73,6 +73,13 @@ patternProperties:
+ minimum: 0
+ maximum: 255
+
++ ti,linear-mapping-mode:
++ description: |
++ Enable linear mapping mode. If disabled, then it will use exponential
++ mapping mode in which the ramp up/down appears to have a more uniform
++ transition to the human eye.
++ type: boolean
++
+ required:
+ - reg
+
+diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt
+index bc7945e9dbfe..17915f64b8ee 100644
+--- a/Documentation/devicetree/bindings/usb/usb251xb.txt
++++ b/Documentation/devicetree/bindings/usb/usb251xb.txt
+@@ -64,10 +64,8 @@ Optional properties :
+ - power-on-time-ms : Specifies the time it takes from the time the host
+ initiates the power-on sequence to a port until the port has adequate
+ power. The value is given in ms in a 0 - 510 range (default is 100ms).
+- - swap-dx-lanes : Specifies the downstream ports which will swap the
+- differential-pair (D+/D-), default is not-swapped.
+- - swap-us-lanes : Selects the upstream port differential-pair (D+/D-)
+- swapping (boolean, default is not-swapped)
++ - swap-dx-lanes : Specifies the ports which will swap the differential-pair
++ (D+/D-), default is not-swapped.
+
+ Examples:
+ usb2512b@2c {
+diff --git a/Makefile b/Makefile
+index 68ee97784c4d..78bd926c8439 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+@@ -528,6 +528,7 @@ ifneq ($(GCC_TOOLCHAIN),)
+ CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN)
+ endif
+ CLANG_FLAGS += -no-integrated-as
++CLANG_FLAGS += -Werror=unknown-warning-option
+ KBUILD_CFLAGS += $(CLANG_FLAGS)
+ KBUILD_AFLAGS += $(CLANG_FLAGS)
+ export CLANG_FLAGS
+diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
+index 570d195a184d..e3a15c751b13 100644
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -96,7 +96,11 @@
+ * RAS Error Synchronization barrier
+ */
+ .macro esb
++#ifdef CONFIG_ARM64_RAS_EXTN
+ hint #16
++#else
++ nop
++#endif
+ .endm
+
+ /*
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 8c1c636308c8..f7a363cbc1bb 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -121,6 +121,7 @@ config PPC
+ select ARCH_32BIT_OFF_T if PPC32
+ select ARCH_HAS_DEBUG_VIRTUAL
+ select ARCH_HAS_DEVMEM_IS_ALLOWED
++ select ARCH_HAS_DMA_MMAP_PGPROT
+ select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_FORTIFY_SOURCE
+ select ARCH_HAS_GCOV_PROFILE_ALL
+diff --git a/arch/powerpc/boot/xz_config.h b/arch/powerpc/boot/xz_config.h
+index e22e5b3770dd..ebfadd39e192 100644
+--- a/arch/powerpc/boot/xz_config.h
++++ b/arch/powerpc/boot/xz_config.h
+@@ -20,10 +20,30 @@ static inline uint32_t swab32p(void *p)
+
+ #ifdef __LITTLE_ENDIAN__
+ #define get_le32(p) (*((uint32_t *) (p)))
++#define cpu_to_be32(x) swab32(x)
++static inline u32 be32_to_cpup(const u32 *p)
++{
++ return swab32p((u32 *)p);
++}
+ #else
+ #define get_le32(p) swab32p(p)
++#define cpu_to_be32(x) (x)
++static inline u32 be32_to_cpup(const u32 *p)
++{
++ return *p;
++}
+ #endif
+
++static inline uint32_t get_unaligned_be32(const void *p)
++{
++ return be32_to_cpup(p);
++}
++
++static inline void put_unaligned_be32(u32 val, void *p)
++{
++ *((u32 *)p) = cpu_to_be32(val);
++}
++
+ #define memeq(a, b, size) (memcmp(a, b, size) == 0)
+ #define memzero(buf, size) memset(buf, 0, size)
+
+diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
+index 74d60cfe8ce5..fd318f7c3eed 100644
+--- a/arch/powerpc/include/asm/cacheflush.h
++++ b/arch/powerpc/include/asm/cacheflush.h
+@@ -29,9 +29,12 @@
+ * not expect this type of fault. flush_cache_vmap is not exactly the right
+ * place to put this, but it seems to work well enough.
+ */
+-#define flush_cache_vmap(start, end) do { asm volatile("ptesync" ::: "memory"); } while (0)
++static inline void flush_cache_vmap(unsigned long start, unsigned long end)
++{
++ asm volatile("ptesync" ::: "memory");
++}
+ #else
+-#define flush_cache_vmap(start, end) do { } while (0)
++static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
+ #endif
+
+ #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
+index dc9a1ca70edf..c6bbe9778d3c 100644
+--- a/arch/powerpc/include/asm/pmc.h
++++ b/arch/powerpc/include/asm/pmc.h
+@@ -27,11 +27,10 @@ static inline void ppc_set_pmu_inuse(int inuse)
+ #ifdef CONFIG_PPC_PSERIES
+ get_lppaca()->pmcregs_in_use = inuse;
+ #endif
+- } else {
++ }
+ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+- get_paca()->pmcregs_in_use = inuse;
++ get_paca()->pmcregs_in_use = inuse;
+ #endif
+- }
+ #endif
+ }
+
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index 0ea6c4aa3a20..21dfff2b25a1 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -49,7 +49,8 @@ obj-y := cputable.o ptrace.o syscalls.o \
+ signal.o sysfs.o cacheinfo.o time.o \
+ prom.o traps.o setup-common.o \
+ udbg.o misc.o io.o misc_$(BITS).o \
+- of_platform.o prom_parse.o
++ of_platform.o prom_parse.o \
++ dma-common.o
+ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
+ signal_64.o ptrace32.o \
+ paca.o nvram_64.o firmware.o
+diff --git a/arch/powerpc/kernel/dma-common.c b/arch/powerpc/kernel/dma-common.c
+new file mode 100644
+index 000000000000..dc7ef6b17b69
+--- /dev/null
++++ b/arch/powerpc/kernel/dma-common.c
+@@ -0,0 +1,17 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Contains common dma routines for all powerpc platforms.
++ *
++ * Copyright (C) 2019 Shawn Anastasio.
++ */
++
++#include <linux/mm.h>
++#include <linux/dma-noncoherent.h>
++
++pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
++ unsigned long attrs)
++{
++ if (!dev_is_dma_coherent(dev))
++ return pgprot_noncached(prot);
++ return prot;
++}
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index f192d57db47d..c0e4b73191f3 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -354,10 +354,19 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
+ ptep = find_init_mm_pte(token, &hugepage_shift);
+ if (!ptep)
+ return token;
+- WARN_ON(hugepage_shift);
+- pa = pte_pfn(*ptep) << PAGE_SHIFT;
+
+- return pa | (token & (PAGE_SIZE-1));
++ pa = pte_pfn(*ptep);
++
++ /* On radix we can do hugepage mappings for io, so handle that */
++ if (hugepage_shift) {
++ pa <<= hugepage_shift;
++ pa |= token & ((1ul << hugepage_shift) - 1);
++ } else {
++ pa <<= PAGE_SHIFT;
++ pa |= token & (PAGE_SIZE - 1);
++ }
++
++ return pa;
+ }
+
+ /*
+diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
+index a293a53b4365..50262597c222 100644
+--- a/arch/powerpc/kernel/hw_breakpoint.c
++++ b/arch/powerpc/kernel/hw_breakpoint.c
+@@ -370,6 +370,11 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
+ bool dawr_force_enable;
+ EXPORT_SYMBOL_GPL(dawr_force_enable);
+
++static void set_dawr_cb(void *info)
++{
++ set_dawr(info);
++}
++
+ static ssize_t dawr_write_file_bool(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+@@ -389,7 +394,7 @@ static ssize_t dawr_write_file_bool(struct file *file,
+
+ /* If we are clearing, make sure all CPUs have the DAWR cleared */
+ if (!dawr_force_enable)
+- smp_call_function((smp_call_func_t)set_dawr, &null_brk, 0);
++ smp_call_function(set_dawr_cb, &null_brk, 0);
+
+ return rc;
+ }
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index bc68c53af67c..5645bc9cbc09 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -255,7 +255,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
+ irq_happened = get_irq_happened();
+ if (!irq_happened) {
+ #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+- WARN_ON(!(mfmsr() & MSR_EE));
++ WARN_ON_ONCE(!(mfmsr() & MSR_EE));
+ #endif
+ return;
+ }
+@@ -268,7 +268,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
+ */
+ if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
+ #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+- WARN_ON(!(mfmsr() & MSR_EE));
++ WARN_ON_ONCE(!(mfmsr() & MSR_EE));
+ #endif
+ __hard_irq_disable();
+ #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+@@ -279,7 +279,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
+ * warn if we are wrong. Only do that when IRQ tracing
+ * is enabled as mfmsr() can be costly.
+ */
+- if (WARN_ON(mfmsr() & MSR_EE))
++ if (WARN_ON_ONCE(mfmsr() & MSR_EE))
+ __hard_irq_disable();
+ #endif
+ }
+diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
+index 24522aa37665..c63c53b37e8e 100644
+--- a/arch/powerpc/kernel/pci_of_scan.c
++++ b/arch/powerpc/kernel/pci_of_scan.c
+@@ -42,6 +42,8 @@ unsigned int pci_parse_of_flags(u32 addr0, int bridge)
+ if (addr0 & 0x02000000) {
+ flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
+ flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
++ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
++ flags |= IORESOURCE_MEM_64;
+ flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
+ if (addr0 & 0x40000000)
+ flags |= IORESOURCE_PREFETCH
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index b824f4c69622..fff2eb22427d 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -980,10 +980,9 @@ int rtas_ibm_suspend_me(u64 handle)
+ cpu_hotplug_disable();
+
+ /* Check if we raced with a CPU-Offline Operation */
+- if (unlikely(!cpumask_equal(cpu_present_mask, cpu_online_mask))) {
+- pr_err("%s: Raced against a concurrent CPU-Offline\n",
+- __func__);
+- atomic_set(&data.error, -EBUSY);
++ if (!cpumask_equal(cpu_present_mask, cpu_online_mask)) {
++ pr_info("%s: Raced against a concurrent CPU-Offline\n", __func__);
++ atomic_set(&data.error, -EAGAIN);
+ goto out_hotplug_enable;
+ }
+
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index a2b74e057904..ebb78effd280 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -1198,6 +1198,9 @@ SYSCALL_DEFINE0(rt_sigreturn)
+ goto bad;
+
+ if (MSR_TM_ACTIVE(msr_hi<<32)) {
++ /* Trying to start TM on non TM system */
++ if (!cpu_has_feature(CPU_FTR_TM))
++ goto bad;
+ /* We only recheckpoint on return if we're
+ * transaction.
+ */
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index 4292ea39baa4..bee704f32f96 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -771,6 +771,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
+ if (MSR_TM_ACTIVE(msr)) {
+ /* We recheckpoint on return. */
+ struct ucontext __user *uc_transact;
++
++ /* Trying to start TM on non TM system */
++ if (!cpu_has_feature(CPU_FTR_TM))
++ goto badframe;
++
+ if (__get_user(uc_transact, &uc->uc_link))
+ goto badframe;
+ if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index ec1804f822af..cde3f5a4b3e4 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -3569,9 +3569,18 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
+ mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
+
+ if (kvmhv_on_pseries()) {
++ /*
++ * We need to save and restore the guest visible part of the
++ * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
++ * doesn't do this for us. Note only required if pseries since
++ * this is done in kvmhv_load_hv_regs_and_go() below otherwise.
++ */
++ unsigned long host_psscr;
+ /* call our hypervisor to load up HV regs and go */
+ struct hv_guest_state hvregs;
+
++ host_psscr = mfspr(SPRN_PSSCR_PR);
++ mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
+ kvmhv_save_hv_regs(vcpu, &hvregs);
+ hvregs.lpcr = lpcr;
+ vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
+@@ -3590,6 +3599,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
+ vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
+ vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
+ vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
++ vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
++ mtspr(SPRN_PSSCR_PR, host_psscr);
+
+ /* H_CEDE has to be handled now, not later */
+ if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
+@@ -3654,6 +3665,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
+ vcpu->arch.vpa.dirty = 1;
+ save_pmu = lp->pmcregs_in_use;
+ }
++ /* Must save pmu if this guest is capable of running nested guests */
++ save_pmu |= nesting_enabled(vcpu->kvm);
+
+ kvmhv_save_guest_pmu(vcpu, save_pmu);
+
+diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
+index 6ca0d7376a9f..e3ba67095895 100644
+--- a/arch/powerpc/kvm/book3s_xive.c
++++ b/arch/powerpc/kvm/book3s_xive.c
+@@ -1986,10 +1986,8 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
+
+ xive->single_escalation = xive_native_has_single_escalation();
+
+- if (ret) {
+- kfree(xive);
++ if (ret)
+ return ret;
+- }
+
+ return 0;
+ }
+diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
+index 5596c8ec221a..a998823f68a3 100644
+--- a/arch/powerpc/kvm/book3s_xive_native.c
++++ b/arch/powerpc/kvm/book3s_xive_native.c
+@@ -1090,9 +1090,9 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
+ xive->ops = &kvmppc_xive_native_ops;
+
+ if (ret)
+- kfree(xive);
++ return ret;
+
+- return ret;
++ return 0;
+ }
+
+ /*
+diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
+index 30d62ffe3310..1322c59cb5dd 100644
+--- a/arch/powerpc/mm/book3s64/hash_native.c
++++ b/arch/powerpc/mm/book3s64/hash_native.c
+@@ -56,7 +56,7 @@ static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
+ * tlbiel instruction for hash, set invalidation
+ * i.e., r=1 and is=01 or is=10 or is=11
+ */
+-static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
++static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
+ unsigned int pid,
+ unsigned int ric, unsigned int prs)
+ {
+diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
+index 28ced26f2a00..ab659044c7f6 100644
+--- a/arch/powerpc/mm/book3s64/hash_utils.c
++++ b/arch/powerpc/mm/book3s64/hash_utils.c
+@@ -1901,11 +1901,20 @@ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
+ *
+ * For guests on platforms before POWER9, we clamp the it limit to 1G
+ * to avoid some funky things such as RTAS bugs etc...
++ *
++ * On POWER9 we limit to 1TB in case the host erroneously told us that
++ * the RMA was >1TB. Effective address bits 0:23 are treated as zero
++ * (meaning the access is aliased to zero i.e. addr = addr % 1TB)
++ * for virtual real mode addressing and so it doesn't make sense to
++ * have an area larger than 1TB as it can't be addressed.
+ */
+ if (!early_cpu_has_feature(CPU_FTR_HVMODE)) {
+ ppc64_rma_size = first_memblock_size;
+ if (!early_cpu_has_feature(CPU_FTR_ARCH_300))
+ ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000);
++ else
++ ppc64_rma_size = min_t(u64, ppc64_rma_size,
++ 1UL << SID_SHIFT_1T);
+
+ /* Finally limit subsequent allocations */
+ memblock_set_current_limit(ppc64_rma_size);
+diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
+index bb9835681315..d0cd5271a57c 100644
+--- a/arch/powerpc/mm/book3s64/radix_tlb.c
++++ b/arch/powerpc/mm/book3s64/radix_tlb.c
+@@ -25,7 +25,7 @@
+ * tlbiel instruction for radix, set invalidation
+ * i.e., r=1 and is=01 or is=10 or is=11
+ */
+-static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
++static __always_inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
+ unsigned int pid,
+ unsigned int ric, unsigned int prs)
+ {
+@@ -146,8 +146,8 @@ static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
+ trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
+ }
+
+-static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
+- unsigned long ric)
++static __always_inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
++ unsigned long ric)
+ {
+ unsigned long rb,rs,prs,r;
+
+@@ -163,8 +163,8 @@ static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
+ }
+
+
+-static inline void __tlbiel_va(unsigned long va, unsigned long pid,
+- unsigned long ap, unsigned long ric)
++static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid,
++ unsigned long ap, unsigned long ric)
+ {
+ unsigned long rb,rs,prs,r;
+
+@@ -179,8 +179,8 @@ static inline void __tlbiel_va(unsigned long va, unsigned long pid,
+ trace_tlbie(0, 1, rb, rs, ric, prs, r);
+ }
+
+-static inline void __tlbie_va(unsigned long va, unsigned long pid,
+- unsigned long ap, unsigned long ric)
++static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
++ unsigned long ap, unsigned long ric)
+ {
+ unsigned long rb,rs,prs,r;
+
+@@ -195,8 +195,8 @@ static inline void __tlbie_va(unsigned long va, unsigned long pid,
+ trace_tlbie(0, 0, rb, rs, ric, prs, r);
+ }
+
+-static inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
+- unsigned long ap, unsigned long ric)
++static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
++ unsigned long ap, unsigned long ric)
+ {
+ unsigned long rb,rs,prs,r;
+
+@@ -235,7 +235,7 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
+ /*
+ * We use 128 set in radix mode and 256 set in hpt mode.
+ */
+-static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
++static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
+ {
+ int set;
+
+@@ -337,7 +337,7 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
+ asm volatile("eieio; tlbsync; ptesync": : :"memory");
+ }
+
+-static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
++static __always_inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
+ {
+ int set;
+
+@@ -377,8 +377,8 @@ static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
+ __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
+ }
+
+-static inline void _tlbiel_va(unsigned long va, unsigned long pid,
+- unsigned long psize, unsigned long ric)
++static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid,
++ unsigned long psize, unsigned long ric)
+ {
+ unsigned long ap = mmu_get_ap(psize);
+
+@@ -409,8 +409,8 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
+ __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
+ }
+
+-static inline void _tlbie_va(unsigned long va, unsigned long pid,
+- unsigned long psize, unsigned long ric)
++static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
++ unsigned long psize, unsigned long ric)
+ {
+ unsigned long ap = mmu_get_ap(psize);
+
+@@ -420,7 +420,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
+ asm volatile("eieio; tlbsync; ptesync": : :"memory");
+ }
+
+-static inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
++static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
+ unsigned long psize, unsigned long ric)
+ {
+ unsigned long ap = mmu_get_ap(psize);
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index b5d92dc32844..1de0f43a68e5 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -130,6 +130,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
+ } else {
+ pdshift = PUD_SHIFT;
+ pu = pud_alloc(mm, pg, addr);
++ if (!pu)
++ return NULL;
+ if (pshift == PUD_SHIFT)
+ return (pte_t *)pu;
+ else if (pshift > PMD_SHIFT) {
+@@ -138,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
+ } else {
+ pdshift = PMD_SHIFT;
+ pm = pmd_alloc(mm, pu, addr);
++ if (!pm)
++ return NULL;
+ if (pshift == PMD_SHIFT)
+ /* 16MB hugepage */
+ return (pte_t *)pm;
+@@ -154,12 +158,16 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
+ } else {
+ pdshift = PUD_SHIFT;
+ pu = pud_alloc(mm, pg, addr);
++ if (!pu)
++ return NULL;
+ if (pshift >= PUD_SHIFT) {
+ ptl = pud_lockptr(mm, pu);
+ hpdp = (hugepd_t *)pu;
+ } else {
+ pdshift = PMD_SHIFT;
+ pm = pmd_alloc(mm, pu, addr);
++ if (!pm)
++ return NULL;
+ ptl = pmd_lockptr(mm, pm);
+ hpdp = (hugepd_t *)pm;
+ }
+diff --git a/arch/powerpc/platforms/4xx/uic.c b/arch/powerpc/platforms/4xx/uic.c
+index 31f12ad37a98..36fb66ce54cf 100644
+--- a/arch/powerpc/platforms/4xx/uic.c
++++ b/arch/powerpc/platforms/4xx/uic.c
+@@ -154,6 +154,7 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
+
+ mtdcr(uic->dcrbase + UIC_PR, pr);
+ mtdcr(uic->dcrbase + UIC_TR, tr);
++ mtdcr(uic->dcrbase + UIC_SR, ~mask);
+
+ raw_spin_unlock_irqrestore(&uic->lock, flags);
+
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
+index 0c48c8964783..50e7aee3c7f3 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -6,6 +6,7 @@
+ * Copyright (C) 2010 IBM Corporation
+ */
+
++#include <linux/cpu.h>
+ #include <linux/kernel.h>
+ #include <linux/kobject.h>
+ #include <linux/smp.h>
+@@ -335,11 +336,19 @@ void post_mobility_fixup(void)
+ if (rc)
+ printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc);
+
++ /*
++ * We don't want CPUs to go online/offline while the device
++ * tree is being updated.
++ */
++ cpus_read_lock();
++
+ rc = pseries_devicetree_update(MIGRATION_SCOPE);
+ if (rc)
+ printk(KERN_ERR "Post-mobility device tree update "
+ "failed: %d\n", rc);
+
++ cpus_read_unlock();
++
+ /* Possibly switch to a new RFI flush type */
+ pseries_setup_rfi_flush();
+
+diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
+index 082c7e1c20f0..1cdb39575eae 100644
+--- a/arch/powerpc/sysdev/xive/common.c
++++ b/arch/powerpc/sysdev/xive/common.c
+@@ -479,7 +479,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
+ * Now go through the entire mask until we find a valid
+ * target.
+ */
+- for (;;) {
++ do {
+ /*
+ * We re-check online as the fallback case passes us
+ * an untested affinity mask
+@@ -487,12 +487,11 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
+ if (cpu_online(cpu) && xive_try_pick_target(cpu))
+ return cpu;
+ cpu = cpumask_next(cpu, mask);
+- if (cpu == first)
+- break;
+ /* Wrap around */
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(mask);
+- }
++ } while (cpu != first);
++
+ return -1;
+ }
+
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index d0620d762a5a..4a721fd62406 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -465,8 +465,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
+ local_irq_save(flags);
+ hard_irq_disable();
+
+- tracing_enabled = tracing_is_on();
+- tracing_off();
++ if (!fromipi) {
++ tracing_enabled = tracing_is_on();
++ tracing_off();
++ }
+
+ bp = in_breakpoint_table(regs->nip, &offset);
+ if (bp != NULL) {
+diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
+index c28e37a344ad..ac0561960c52 100644
+--- a/arch/sh/include/asm/io.h
++++ b/arch/sh/include/asm/io.h
+@@ -369,7 +369,11 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
+
+ #define ioremap_nocache ioremap
+ #define ioremap_uc ioremap
+-#define iounmap __iounmap
++
++static inline void iounmap(void __iomem *addr)
++{
++ __iounmap(addr);
++}
+
+ /*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
+index 9f4b4bb78120..00cefd33afdd 100644
+--- a/arch/um/include/asm/mmu_context.h
++++ b/arch/um/include/asm/mmu_context.h
+@@ -52,7 +52,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
+ * when the new ->mm is used for the first time.
+ */
+ __switch_mm(&new->context.id);
+- down_write(&new->mmap_sem);
++ down_write_nested(&new->mmap_sem, 1);
+ uml_setup_stubs(new);
+ up_write(&new->mmap_sem);
+ }
+diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
+index d213ec5c3766..f0b0c90dd398 100644
+--- a/arch/x86/include/uapi/asm/vmx.h
++++ b/arch/x86/include/uapi/asm/vmx.h
+@@ -146,7 +146,6 @@
+
+ #define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
+ #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
+-#define VMX_ABORT_VMCS_CORRUPTED 3
+ #define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
+
+ #endif /* _UAPIVMX_H */
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 66ca906aa790..801ecd1c3fd5 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1226,7 +1226,7 @@ static ssize_t l1tf_show_state(char *buf)
+
+ static ssize_t mds_show_state(char *buf)
+ {
+- if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
++ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
+ return sprintf(buf, "%s; SMT Host state unknown\n",
+ mds_strings[mds_mitigation]);
+ }
+diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
+index 4f36d3241faf..2d6898c2cb64 100644
+--- a/arch/x86/kernel/stacktrace.c
++++ b/arch/x86/kernel/stacktrace.c
+@@ -100,7 +100,7 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
+ {
+ int ret;
+
+- if (!access_ok(fp, sizeof(*frame)))
++ if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
+ return 0;
+
+ ret = 1;
+diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
+index 8eb67a670b10..653b7f617b61 100644
+--- a/arch/x86/kernel/sysfb_efi.c
++++ b/arch/x86/kernel/sysfb_efi.c
+@@ -230,9 +230,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
+ {},
+ };
+
++/*
++ * Some devices have a portrait LCD but advertise a landscape resolution (and
++ * pitch). We simply swap width and height for these devices so that we can
++ * correctly deal with some of them coming with multiple resolutions.
++ */
++static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
++ {
++ /*
++ * Lenovo MIIX310-10ICR, only some batches have the troublesome
++ * 800x1280 portrait screen. Luckily the portrait version has
++ * its own BIOS version, so we match on that.
++ */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
++ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"),
++ },
++ },
++ {
++ /* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
++ "Lenovo MIIX 320-10ICR"),
++ },
++ },
++ {
++ /* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
++ "Lenovo ideapad D330-10IGM"),
++ },
++ },
++ {},
++};
++
+ __init void sysfb_apply_efi_quirks(void)
+ {
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
+ !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
+ dmi_check_system(efifb_dmi_system_table);
++
++ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
++ dmi_check_system(efifb_dmi_swap_width_height)) {
++ u16 temp = screen_info.lfb_width;
++
++ screen_info.lfb_width = screen_info.lfb_height;
++ screen_info.lfb_height = temp;
++ screen_info.lfb_linelength = 4 * screen_info.lfb_width;
++ }
+ }
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index b101127e13b6..ef6575ab60ed 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -91,6 +91,10 @@ static void init_vmcs_shadow_fields(void)
+ pr_err("Missing field from shadow_read_write_field %x\n",
+ field + 1);
+
++ WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
++ field <= GUEST_TR_AR_BYTES,
++ "Update vmcs12_write_any() to expose AR_BYTES RW");
++
+ /*
+ * PML and the preemption timer can be emulated, but the
+ * processor cannot vmwrite to fields that don't exist
+@@ -2969,6 +2973,25 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
+ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+ vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
+
++ /*
++ * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
++ * nested early checks are disabled. In the event of a "late" VM-Fail,
++ * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
++ * software model to the pre-VMEntry host state. When EPT is disabled,
++ * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
++ * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
++ * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
++ * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
++ * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
++ * guaranteed to be overwritten with a shadow CR3 prior to re-entering
++ * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
++ * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
++ * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
++ * path would need to manually save/restore vmcs01.GUEST_CR3.
++ */
++ if (!enable_ept && !nested_early_check)
++ vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
++
+ vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
+
+ prepare_vmcs02_early(vmx, vmcs12);
+@@ -3780,18 +3803,8 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
+ vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
+
+ nested_ept_uninit_mmu_context(vcpu);
+-
+- /*
+- * This is only valid if EPT is in use, otherwise the vmcs01 GUEST_CR3
+- * points to shadow pages! Fortunately we only get here after a WARN_ON
+- * if EPT is disabled, so a VMabort is perfectly fine.
+- */
+- if (enable_ept) {
+- vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+- __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+- } else {
+- nested_vmx_abort(vcpu, VMX_ABORT_VMCS_CORRUPTED);
+- }
++ vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
++ __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+
+ /*
+ * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
+@@ -3799,7 +3812,8 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
+ * VMFail, like everything else we just need to ensure our
+ * software model is up-to-date.
+ */
+- ept_save_pdptrs(vcpu);
++ if (enable_ept)
++ ept_save_pdptrs(vcpu);
+
+ kvm_mmu_reset_context(vcpu);
+
+@@ -4013,7 +4027,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
+ * #UD or #GP.
+ */
+ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
+- u32 vmx_instruction_info, bool wr, gva_t *ret)
++ u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
+ {
+ gva_t off;
+ bool exn;
+@@ -4120,7 +4134,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
+ */
+ if (!(s.base == 0 && s.limit == 0xffffffff &&
+ ((s.type & 8) || !(s.type & 4))))
+- exn = exn || (off + sizeof(u64) > s.limit);
++ exn = exn || ((u64)off + len - 1 > s.limit);
+ }
+ if (exn) {
+ kvm_queue_exception_e(vcpu,
+@@ -4139,7 +4153,8 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
+ struct x86_exception e;
+
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+- vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
++ vmcs_read32(VMX_INSTRUCTION_INFO), false,
++ sizeof(*vmpointer), &gva))
+ return 1;
+
+ if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
+@@ -4390,6 +4405,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
+ u64 field_value;
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
++ int len;
+ gva_t gva = 0;
+ struct vmcs12 *vmcs12;
+
+@@ -4427,12 +4443,12 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
+ kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
+ field_value);
+ } else {
++ len = is_64_bit_mode(vcpu) ? 8 : 4;
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+- vmx_instruction_info, true, &gva))
++ vmx_instruction_info, true, len, &gva))
+ return 1;
+ /* _system ok, nested_vmx_check_permission has verified cpl=0 */
+- kvm_write_guest_virt_system(vcpu, gva, &field_value,
+- (is_long_mode(vcpu) ? 8 : 4), NULL);
++ kvm_write_guest_virt_system(vcpu, gva, &field_value, len, NULL);
+ }
+
+ return nested_vmx_succeed(vcpu);
+@@ -4442,6 +4458,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
+ static int handle_vmwrite(struct kvm_vcpu *vcpu)
+ {
+ unsigned long field;
++ int len;
+ gva_t gva;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+@@ -4467,11 +4484,11 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
+ field_value = kvm_register_readl(vcpu,
+ (((vmx_instruction_info) >> 3) & 0xf));
+ else {
++ len = is_64_bit_mode(vcpu) ? 8 : 4;
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+- vmx_instruction_info, false, &gva))
++ vmx_instruction_info, false, len, &gva))
+ return 1;
+- if (kvm_read_guest_virt(vcpu, gva, &field_value,
+- (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
++ if (kvm_read_guest_virt(vcpu, gva, &field_value, len, &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+@@ -4500,6 +4517,17 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
+ vmcs12 = get_shadow_vmcs12(vcpu);
+ }
+
++ /*
++ * Some Intel CPUs intentionally drop the reserved bits of the AR byte
++ * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
++ * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
++ * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
++ * from L1 will return a different value than VMREAD from L2 (L1 sees
++ * the stripped down value, L2 sees the full value as stored by KVM).
++ */
++ if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
++ field_value &= 0x1f0ff;
++
+ if (vmcs12_write_any(vmcs12, field, field_value) < 0)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+@@ -4619,7 +4647,8 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
+ if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
+ return 1;
+
+- if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
++ if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
++ true, sizeof(gpa_t), &gva))
+ return 1;
+ /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
+ if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr,
+@@ -4665,7 +4694,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+ * operand is read even if it isn't needed (e.g., for type==global)
+ */
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+- vmx_instruction_info, false, &gva))
++ vmx_instruction_info, false, sizeof(operand), &gva))
+ return 1;
+ if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+@@ -4727,7 +4756,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
+ * operand is read even if it isn't needed (e.g., for type==global)
+ */
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+- vmx_instruction_info, false, &gva))
++ vmx_instruction_info, false, sizeof(operand), &gva))
+ return 1;
+ if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+@@ -5753,14 +5782,6 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
+ {
+ int i;
+
+- /*
+- * Without EPT it is not possible to restore L1's CR3 and PDPTR on
+- * VMfail, because they are not available in vmcs01. Just always
+- * use hardware checks.
+- */
+- if (!enable_ept)
+- nested_early_check = 1;
+-
+ if (!cpu_has_vmx_shadow_vmcs())
+ enable_shadow_vmcs = 0;
+ if (enable_shadow_vmcs) {
+diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
+index e847ff1019a2..29d205bb4e4f 100644
+--- a/arch/x86/kvm/vmx/nested.h
++++ b/arch/x86/kvm/vmx/nested.h
+@@ -21,7 +21,7 @@ void nested_sync_from_vmcs12(struct kvm_vcpu *vcpu);
+ int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+ int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
+ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
+- u32 vmx_instruction_info, bool wr, gva_t *ret);
++ u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
+
+ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
+ {
+diff --git a/arch/x86/kvm/vmx/vmcs_shadow_fields.h b/arch/x86/kvm/vmx/vmcs_shadow_fields.h
+index 132432f375c2..97dd5295be31 100644
+--- a/arch/x86/kvm/vmx/vmcs_shadow_fields.h
++++ b/arch/x86/kvm/vmx/vmcs_shadow_fields.h
+@@ -40,14 +40,14 @@ SHADOW_FIELD_RO(VM_EXIT_INSTRUCTION_LEN)
+ SHADOW_FIELD_RO(IDT_VECTORING_INFO_FIELD)
+ SHADOW_FIELD_RO(IDT_VECTORING_ERROR_CODE)
+ SHADOW_FIELD_RO(VM_EXIT_INTR_ERROR_CODE)
++SHADOW_FIELD_RO(GUEST_CS_AR_BYTES)
++SHADOW_FIELD_RO(GUEST_SS_AR_BYTES)
+ SHADOW_FIELD_RW(CPU_BASED_VM_EXEC_CONTROL)
+ SHADOW_FIELD_RW(EXCEPTION_BITMAP)
+ SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE)
+ SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD)
+ SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN)
+ SHADOW_FIELD_RW(TPR_THRESHOLD)
+-SHADOW_FIELD_RW(GUEST_CS_AR_BYTES)
+-SHADOW_FIELD_RW(GUEST_SS_AR_BYTES)
+ SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO)
+ SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE)
+
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 306ed28569c0..924c2a79e4a9 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -5349,7 +5349,8 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
+ * is read even if it isn't needed (e.g., for type==all)
+ */
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+- vmx_instruction_info, false, &gva))
++ vmx_instruction_info, false,
++ sizeof(operand), &gva))
+ return 1;
+
+ if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index a4eceb0b5dde..a8ad3a4d86b1 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3264,6 +3264,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+
+ kvm_x86_ops->vcpu_load(vcpu, cpu);
+
++ fpregs_assert_state_consistent();
++ if (test_thread_flag(TIF_NEED_FPU_LOAD))
++ switch_fpu_return();
++
+ /* Apply any externally detected TSC adjustments (due to suspend) */
+ if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
+ adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
+@@ -7955,9 +7959,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ wait_lapic_expire(vcpu);
+ guest_enter_irqoff();
+
+- fpregs_assert_state_consistent();
+- if (test_thread_flag(TIF_NEED_FPU_LOAD))
+- switch_fpu_return();
++ /* The preempt notifier should have taken care of the FPU already. */
++ WARN_ON_ONCE(test_thread_flag(TIF_NEED_FPU_LOAD));
+
+ if (unlikely(vcpu->arch.switch_db_regs)) {
+ set_debugreg(0, 7);
+diff --git a/block/bio-integrity.c b/block/bio-integrity.c
+index 4db620849515..fb95dbb21dd8 100644
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -276,8 +276,12 @@ bool bio_integrity_prep(struct bio *bio)
+ ret = bio_integrity_add_page(bio, virt_to_page(buf),
+ bytes, offset);
+
+- if (ret == 0)
+- return false;
++ if (ret == 0) {
++ printk(KERN_ERR "could not attach integrity payload\n");
++ kfree(buf);
++ status = BLK_STS_RESOURCE;
++ goto err_end_io;
++ }
+
+ if (ret < bytes)
+ break;
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 8340f69670d8..5183fca0818a 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -117,6 +117,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
+ rq->internal_tag = -1;
+ rq->start_time_ns = ktime_get_ns();
+ rq->part = NULL;
++ refcount_set(&rq->ref, 1);
+ }
+ EXPORT_SYMBOL(blk_rq_init);
+
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 38a59a630cd4..dc1c83eafc22 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2988,7 +2988,7 @@ static void binder_transaction(struct binder_proc *proc,
+ else
+ return_error = BR_DEAD_REPLY;
+ mutex_unlock(&context->context_mgr_node_lock);
+- if (target_node && target_proc == proc) {
++ if (target_node && target_proc->pid == proc->pid) {
+ binder_user_error("%d:%d got transaction to context manager from process owning it\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+@@ -3239,7 +3239,8 @@ static void binder_transaction(struct binder_proc *proc,
+ buffer_offset = off_start_offset;
+ off_end_offset = off_start_offset + tr->offsets_size;
+ sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
+- sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
++ sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
++ ALIGN(secctx_sz, sizeof(u64));
+ off_min = 0;
+ for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
+ buffer_offset += sizeof(binder_size_t)) {
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index fd7511e04e62..eaf3aa0cb803 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -2211,6 +2211,24 @@ void put_device(struct device *dev)
+ }
+ EXPORT_SYMBOL_GPL(put_device);
+
++bool kill_device(struct device *dev)
++{
++ /*
++ * Require the device lock and set the "dead" flag to guarantee that
++ * the update behavior is consistent with the other bitfields near
++ * it and that we cannot have an asynchronous probe routine trying
++ * to run while we are tearing out the bus/class/sysfs from
++ * underneath the device.
++ */
++ lockdep_assert_held(&dev->mutex);
++
++ if (dev->p->dead)
++ return false;
++ dev->p->dead = true;
++ return true;
++}
++EXPORT_SYMBOL_GPL(kill_device);
++
+ /**
+ * device_del - delete device from system.
+ * @dev: device.
+@@ -2230,15 +2248,8 @@ void device_del(struct device *dev)
+ struct kobject *glue_dir = NULL;
+ struct class_interface *class_intf;
+
+- /*
+- * Hold the device lock and set the "dead" flag to guarantee that
+- * the update behavior is consistent with the other bitfields near
+- * it and that we cannot have an asynchronous probe routine trying
+- * to run while we are tearing out the bus/class/sysfs from
+- * underneath the device.
+- */
+ device_lock(dev);
+- dev->p->dead = true;
++ kill_device(dev);
+ device_unlock(dev);
+
+ /* Notify clients of device removal. This call must come
+diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
+index 5c39f20378b8..9ac6671bb514 100644
+--- a/drivers/char/hpet.c
++++ b/drivers/char/hpet.c
+@@ -567,8 +567,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
+ unsigned long long m;
+
+ m = hpets->hp_tick_freq + (dis >> 1);
+- do_div(m, dis);
+- return (unsigned long)m;
++ return div64_ul(m, dis);
+ }
+
+ static int
+diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
+index f2a91c4d8cab..0cd849675d99 100644
+--- a/drivers/char/ipmi/ipmi_si_platform.c
++++ b/drivers/char/ipmi/ipmi_si_platform.c
+@@ -19,6 +19,7 @@
+ #include "ipmi_si.h"
+ #include "ipmi_dmi.h"
+
++static bool platform_registered;
+ static bool si_tryplatform = true;
+ #ifdef CONFIG_ACPI
+ static bool si_tryacpi = true;
+@@ -469,9 +470,12 @@ void ipmi_si_platform_init(void)
+ int rv = platform_driver_register(&ipmi_platform_driver);
+ if (rv)
+ pr_err("Unable to register driver: %d\n", rv);
++ else
++ platform_registered = true;
+ }
+
+ void ipmi_si_platform_shutdown(void)
+ {
+- platform_driver_unregister(&ipmi_platform_driver);
++ if (platform_registered)
++ platform_driver_unregister(&ipmi_platform_driver);
+ }
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index cf8156d6bc07..305fa5054274 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -303,6 +303,7 @@ struct ssif_info {
+ ((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat]))
+
+ static bool initialized;
++static bool platform_registered;
+
+ static void return_hosed_msg(struct ssif_info *ssif_info,
+ struct ipmi_smi_msg *msg);
+@@ -2088,6 +2089,8 @@ static int init_ipmi_ssif(void)
+ rv = platform_driver_register(&ipmi_driver);
+ if (rv)
+ pr_err("Unable to register driver: %d\n", rv);
++ else
++ platform_registered = true;
+ }
+
+ ssif_i2c_driver.address_list = ssif_address_list();
+@@ -2111,7 +2114,7 @@ static void cleanup_ipmi_ssif(void)
+
+ kfree(ssif_i2c_driver.address_list);
+
+- if (ssif_trydmi)
++ if (ssif_trydmi && platform_registered)
+ platform_driver_unregister(&ipmi_driver);
+
+ free_ssif_clients();
+diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
+index 8072c195d831..dd414250e77e 100644
+--- a/drivers/fpga/Kconfig
++++ b/drivers/fpga/Kconfig
+@@ -40,6 +40,7 @@ config ALTERA_PR_IP_CORE_PLAT
+ config FPGA_MGR_ALTERA_PS_SPI
+ tristate "Altera FPGA Passive Serial over SPI"
+ depends on SPI
++ select BITREVERSE
+ help
+ FPGA manager driver support for Altera Arria/Cyclone/Stratix
+ using the passive serial interface over SPI.
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index a6e5184d436c..4b192e0ce92f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -896,6 +896,9 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
+ AMDGPU_FENCE_OWNER_KFD, false);
+ if (ret)
+ goto wait_pd_fail;
++ ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
++ if (ret)
++ goto reserve_shared_fail;
+ amdgpu_bo_fence(vm->root.base.bo,
+ &vm->process_info->eviction_fence->base, true);
+ amdgpu_bo_unreserve(vm->root.base.bo);
+@@ -909,6 +912,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
+
+ return 0;
+
++reserve_shared_fail:
+ wait_pd_fail:
+ validate_pd_fail:
+ amdgpu_bo_unreserve(vm->root.base.bo);
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+index 72837b8c7031..c2086eb00555 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+@@ -1163,6 +1163,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
+ tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
+ WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
+
++ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
++ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
++
+ /* After HDP is initialized, flush HDP.*/
+ adev->nbio_funcs->hdp_flush(adev, NULL);
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index ae381450601c..afbaf6f5131e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1268,12 +1268,17 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
+ return 0;
+ }
+
+-static int unmap_sdma_queues(struct device_queue_manager *dqm,
+- unsigned int sdma_engine)
++static int unmap_sdma_queues(struct device_queue_manager *dqm)
+ {
+- return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
+- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
+- sdma_engine);
++ int i, retval = 0;
++
++ for (i = 0; i < dqm->dev->device_info->num_sdma_engines; i++) {
++ retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
++ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
++ if (retval)
++ return retval;
++ }
++ return retval;
+ }
+
+ /* dqm->lock mutex has to be locked before calling this function */
+@@ -1312,10 +1317,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
+ pr_debug("Before destroying queues, sdma queue count is : %u\n",
+ dqm->sdma_queue_count);
+
+- if (dqm->sdma_queue_count > 0) {
+- unmap_sdma_queues(dqm, 0);
+- unmap_sdma_queues(dqm, 1);
+- }
++ if (dqm->sdma_queue_count > 0)
++ unmap_sdma_queues(dqm);
+
+ retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
+ filter, filter_param, false, 0);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index 9dbba609450e..8fe74b821b32 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -76,6 +76,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ struct v9_mqd *m;
+ struct kfd_dev *kfd = mm->dev;
+
++ *mqd_mem_obj = NULL;
+ /* From V9, for CWSR, the control stack is located on the next page
+ * boundary after the mqd, we will use the gtt allocation function
+ * instead of sub-allocation function.
+@@ -93,8 +94,10 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
+ } else
+ retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
+ mqd_mem_obj);
+- if (retval != 0)
++ if (retval) {
++ kfree(*mqd_mem_obj);
+ return -ENOMEM;
++ }
+
+ m = (struct v9_mqd *) (*mqd_mem_obj)->cpu_ptr;
+ addr = (*mqd_mem_obj)->gpu_addr;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index ab7c5c3004ee..dc3ac66a4450 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -2592,7 +2592,7 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
+ address->type = PLN_ADDR_TYPE_GRAPHICS;
+ address->grph.addr.low_part = lower_32_bits(afb->address);
+ address->grph.addr.high_part = upper_32_bits(afb->address);
+- } else {
++ } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
+ uint64_t chroma_addr = afb->address + fb->offsets[1];
+
+ plane_size->video.luma_size.x = 0;
+@@ -4627,6 +4627,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ {
+ struct amdgpu_device *adev = dm->ddev->dev_private;
+
++ /*
++ * Some of the properties below require access to state, like bpc.
++ * Allocate some default initial connector state with our reset helper.
++ */
++ if (aconnector->base.funcs->reset)
++ aconnector->base.funcs->reset(&aconnector->base);
++
+ aconnector->connector_id = link_index;
+ aconnector->dc_link = link;
+ aconnector->base.interlace_allowed = false;
+@@ -4809,9 +4816,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ &aconnector->base,
+ &amdgpu_dm_connector_helper_funcs);
+
+- if (aconnector->base.funcs->reset)
+- aconnector->base.funcs->reset(&aconnector->base);
+-
+ amdgpu_dm_connector_init_helper(
+ dm,
+ aconnector,
+@@ -4952,12 +4956,12 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
+ int x, y;
+ int xorigin = 0, yorigin = 0;
+
+- if (!crtc || !plane->state->fb) {
+- position->enable = false;
+- position->x = 0;
+- position->y = 0;
++ position->enable = false;
++ position->x = 0;
++ position->y = 0;
++
++ if (!crtc || !plane->state->fb)
+ return 0;
+- }
+
+ if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
+ (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
+@@ -4971,6 +4975,10 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
+ x = plane->state->crtc_x;
+ y = plane->state->crtc_y;
+
++ if (x <= -amdgpu_crtc->max_cursor_width ||
++ y <= -amdgpu_crtc->max_cursor_height)
++ return 0;
++
+ if (crtc->primary->state) {
+ /* avivo cursor are offset into the total surface */
+ x += crtc->primary->state->src_x >> 16;
+@@ -6327,6 +6335,10 @@ static bool should_reset_plane(struct drm_atomic_state *state,
+ if (!new_crtc_state)
+ return true;
+
++ /* CRTC Degamma changes currently require us to recreate planes. */
++ if (new_crtc_state->color_mgmt_changed)
++ return true;
++
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state))
+ return true;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 18c775a950cc..ee6b646180b6 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1138,9 +1138,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
+ const struct dc_link *link = context->streams[i]->link;
+ struct dc_stream_status *status;
+
+- if (context->streams[i]->apply_seamless_boot_optimization)
+- context->streams[i]->apply_seamless_boot_optimization = false;
+-
+ if (!context->streams[i]->mode_changed)
+ continue;
+
+@@ -1792,10 +1789,15 @@ static void commit_planes_for_stream(struct dc *dc,
+ if (dc->optimize_seamless_boot && surface_count > 0) {
+ /* Optimize seamless boot flag keeps clocks and watermarks high until
+ * first flip. After first flip, optimization is required to lower
+- * bandwidth.
++ * bandwidth. Important to note that it is expected UEFI will
++ * only light up a single display on POST, therefore we only expect
++ * one stream with seamless boot flag set.
+ */
+- dc->optimize_seamless_boot = false;
+- dc->optimized_required = true;
++ if (stream->apply_seamless_boot_optimization) {
++ stream->apply_seamless_boot_optimization = false;
++ dc->optimize_seamless_boot = false;
++ dc->optimized_required = true;
++ }
+ }
+
+ if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index b37ecc3ede61..a3ff33ff6da1 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -960,6 +960,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+
+ link->type = dc_connection_none;
+ sink_caps.signal = SIGNAL_TYPE_NONE;
++ /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk
++ * is not cleared. If we emulate a DP signal on this connection, it thinks
++ * the dongle is still there and limits the number of modes we can emulate.
++ * Clear dongle_max_pix_clk on disconnect to fix this
++ */
++ link->dongle_max_pix_clk = 0;
+ }
+
+ LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n",
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 1ee544a32ebb..253311864cdd 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -1624,8 +1624,7 @@ static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settin
+ uint32_t link_bw;
+
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14 ||
+- link->dpcd_caps.edp_supported_link_rates_count == 0 ||
+- link->dc->config.optimize_edp_link_rate == false) {
++ link->dpcd_caps.edp_supported_link_rates_count == 0) {
+ *link_setting = link->verified_link_cap;
+ return true;
+ }
+@@ -2597,7 +2596,8 @@ void detect_edp_sink_caps(struct dc_link *link)
+ memset(supported_link_rates, 0, sizeof(supported_link_rates));
+
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
+- link->dc->config.optimize_edp_link_rate) {
++ (link->dc->config.optimize_edp_link_rate ||
++ link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
+ // Read DPCD 00010h - 0001Fh 16 bytes at one shot
+ core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
+ supported_link_rates, sizeof(supported_link_rates));
+@@ -2612,6 +2612,9 @@ void detect_edp_sink_caps(struct dc_link *link)
+ link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
+ link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
+ link->dpcd_caps.edp_supported_link_rates_count++;
++
++ if (link->reported_link_cap.link_rate < link_rate)
++ link->reported_link_cap.link_rate = link_rate;
+ }
+ }
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+index da96229db53a..2959c3c9390b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+@@ -473,6 +473,8 @@ void dce_abm_destroy(struct abm **abm)
+ {
+ struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
+
++ abm_dce->base.funcs->set_abm_immediate_disable(*abm);
++
+ kfree(abm_dce);
+ *abm = NULL;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+index 818536eea00a..c6a607cd0e4b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+@@ -388,6 +388,9 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
+ /* Set initialized ramping boundary value */
+ REG_WRITE(MASTER_COMM_DATA_REG1, 0xFFFF);
+
++ /* Set backlight ramping stepsize */
++ REG_WRITE(MASTER_COMM_DATA_REG2, abm_gain_stepsize);
++
+ /* Set command to initialize microcontroller */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ MCP_INIT_DMCU);
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+index 60ce56f60ae3..5bd0df55aa5d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+@@ -263,4 +263,6 @@ struct dmcu *dcn10_dmcu_create(
+
+ void dce_dmcu_destroy(struct dmcu **dmcu);
+
++static const uint32_t abm_gain_stepsize = 0x0060;
++
+ #endif /* _DCE_ABM_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index 7ac50ab1b762..7d7e93c87c28 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -242,6 +242,9 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params,
+ prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
+
+ switch (plane_state->format) {
++ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
++ prescale_params->scale = 0x2082;
++ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ prescale_params->scale = 0x2020;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 33d311cea28c..9e4d70a0055e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -23,6 +23,7 @@
+ *
+ */
+
++#include <linux/delay.h>
+ #include "dm_services.h"
+ #include "core_types.h"
+ #include "resource.h"
+diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+index a1055413bade..31f867bb5afe 100644
+--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+@@ -1564,7 +1564,8 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
+
+ output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+
+- if (ramp && (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
++ if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
++ (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
+ rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
+ sizeof(*rgb_user),
+ GFP_KERNEL);
+diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
+index b86cc705138c..d8b945596b09 100644
+--- a/drivers/gpu/drm/bochs/bochs_drv.c
++++ b/drivers/gpu/drm/bochs/bochs_drv.c
+@@ -7,6 +7,7 @@
+ #include <linux/slab.h>
+ #include <drm/drm_fb_helper.h>
+ #include <drm/drm_probe_helper.h>
++#include <drm/drm_atomic_helper.h>
+
+ #include "bochs.h"
+
+@@ -171,6 +172,7 @@ static void bochs_pci_remove(struct pci_dev *pdev)
+ {
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
++ drm_atomic_helper_shutdown(dev);
+ drm_dev_unregister(dev);
+ bochs_unload(dev);
+ drm_dev_put(dev);
+diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
+index 1211b5379df1..8e3c5e599eba 100644
+--- a/drivers/gpu/drm/bridge/sii902x.c
++++ b/drivers/gpu/drm/bridge/sii902x.c
+@@ -229,10 +229,11 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
+ struct regmap *regmap = sii902x->regmap;
+ u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
+ struct hdmi_avi_infoframe frame;
++ u16 pixel_clock_10kHz = adj->clock / 10;
+ int ret;
+
+- buf[0] = adj->clock;
+- buf[1] = adj->clock >> 8;
++ buf[0] = pixel_clock_10kHz & 0xff;
++ buf[1] = pixel_clock_10kHz >> 8;
+ buf[2] = adj->vrefresh;
+ buf[3] = 0x00;
+ buf[4] = adj->hdisplay;
+diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
+index 4655bb1eb88f..f59a51e19dab 100644
+--- a/drivers/gpu/drm/bridge/tc358767.c
++++ b/drivers/gpu/drm/bridge/tc358767.c
+@@ -1141,6 +1141,13 @@ static int tc_connector_get_modes(struct drm_connector *connector)
+ struct tc_data *tc = connector_to_tc(connector);
+ struct edid *edid;
+ unsigned int count;
++ int ret;
++
++ ret = tc_get_display_props(tc);
++ if (ret < 0) {
++ dev_err(tc->dev, "failed to read display props: %d\n", ret);
++ return 0;
++ }
+
+ if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
+ count = tc->panel->funcs->get_modes(tc->panel);
+diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
+index a879aac21246..3a8af9978ebd 100644
+--- a/drivers/gpu/drm/bridge/ti-tfp410.c
++++ b/drivers/gpu/drm/bridge/ti-tfp410.c
+@@ -372,7 +372,8 @@ static int tfp410_fini(struct device *dev)
+ {
+ struct tfp410 *dvi = dev_get_drvdata(dev);
+
+- cancel_delayed_work_sync(&dvi->hpd_work);
++ if (dvi->hpd_irq >= 0)
++ cancel_delayed_work_sync(&dvi->hpd_work);
+
+ drm_bridge_remove(&dvi->bridge);
+
+diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
+index 00e743153e94..fde298d9f510 100644
+--- a/drivers/gpu/drm/drm_debugfs_crc.c
++++ b/drivers/gpu/drm/drm_debugfs_crc.c
+@@ -389,12 +389,13 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+ struct drm_crtc_crc *crc = &crtc->crc;
+ struct drm_crtc_crc_entry *entry;
+ int head, tail;
++ unsigned long flags;
+
+- spin_lock(&crc->lock);
++ spin_lock_irqsave(&crc->lock, flags);
+
+ /* Caller may not have noticed yet that userspace has stopped reading */
+ if (!crc->entries) {
+- spin_unlock(&crc->lock);
++ spin_unlock_irqrestore(&crc->lock, flags);
+ return -EINVAL;
+ }
+
+@@ -405,7 +406,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+ bool was_overflow = crc->overflow;
+
+ crc->overflow = true;
+- spin_unlock(&crc->lock);
++ spin_unlock_irqrestore(&crc->lock, flags);
+
+ if (!was_overflow)
+ DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
+@@ -421,7 +422,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
+ head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
+ crc->head = head;
+
+- spin_unlock(&crc->lock);
++ spin_unlock_irqrestore(&crc->lock, flags);
+
+ wake_up_interruptible(&crc->wq);
+
+diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
+index 1e5593575d23..6192b7b20d84 100644
+--- a/drivers/gpu/drm/drm_edid_load.c
++++ b/drivers/gpu/drm/drm_edid_load.c
+@@ -278,6 +278,8 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector)
+ * the last one found one as a fallback.
+ */
+ fwstr = kstrdup(edid_firmware, GFP_KERNEL);
++ if (!fwstr)
++ return ERR_PTR(-ENOMEM);
+ edidstr = fwstr;
+
+ while ((edidname = strsep(&edidstr, ","))) {
+diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
+index c88e538b2ef4..81b48e273cbd 100644
+--- a/drivers/gpu/drm/i915/i915_request.c
++++ b/drivers/gpu/drm/i915/i915_request.c
+@@ -443,7 +443,7 @@ void __i915_request_submit(struct i915_request *request)
+ */
+ if (request->sched.semaphores &&
+ i915_sw_fence_signaled(&request->semaphore))
+- request->hw_context->saturated |= request->sched.semaphores;
++ engine->saturated |= request->sched.semaphores;
+
+ /* We may be recursing from the signal callback of another i915 fence */
+ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+@@ -829,7 +829,7 @@ already_busywaiting(struct i915_request *rq)
+ *
+ * See the are-we-too-late? check in __i915_request_submit().
+ */
+- return rq->sched.semaphores | rq->hw_context->saturated;
++ return rq->sched.semaphores | rq->engine->saturated;
+ }
+
+ static int
+diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c
+index 924cc556223a..8931e0fee873 100644
+--- a/drivers/gpu/drm/i915/intel_context.c
++++ b/drivers/gpu/drm/i915/intel_context.c
+@@ -230,7 +230,6 @@ intel_context_init(struct intel_context *ce,
+ ce->gem_context = ctx;
+ ce->engine = engine;
+ ce->ops = engine->cops;
+- ce->saturated = 0;
+
+ INIT_LIST_HEAD(&ce->signal_link);
+ INIT_LIST_HEAD(&ce->signals);
+diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h
+index 339c7437fe82..fd47b9d49e09 100644
+--- a/drivers/gpu/drm/i915/intel_context_types.h
++++ b/drivers/gpu/drm/i915/intel_context_types.h
+@@ -59,8 +59,6 @@ struct intel_context {
+ atomic_t pin_count;
+ struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
+
+- intel_engine_mask_t saturated; /* submitting semaphores too late? */
+-
+ /**
+ * active_tracker: Active tracker for the external rq activity
+ * on this intel_context object.
+diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
+index eea9bec04f1b..9d4f12e982c3 100644
+--- a/drivers/gpu/drm/i915/intel_engine_cs.c
++++ b/drivers/gpu/drm/i915/intel_engine_cs.c
+@@ -1200,6 +1200,7 @@ void intel_engines_park(struct drm_i915_private *i915)
+
+ i915_gem_batch_pool_fini(&engine->batch_pool);
+ engine->execlists.no_priolist = false;
++ engine->saturated = 0;
+ }
+
+ i915->gt.active_engines = 0;
+diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h
+index 1f970c76b6a6..4270ddb45f41 100644
+--- a/drivers/gpu/drm/i915/intel_engine_types.h
++++ b/drivers/gpu/drm/i915/intel_engine_types.h
+@@ -285,6 +285,8 @@ struct intel_engine_cs {
+ struct intel_context *kernel_context; /* pinned */
+ struct intel_context *preempt_context; /* pinned; optional */
+
++ intel_engine_mask_t saturated; /* submitting semaphores too late? */
++
+ struct drm_i915_gem_object *default_state;
+ void *pinned_default_state;
+
+diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
+index d29721e177bf..8fef224b93c8 100644
+--- a/drivers/gpu/drm/lima/lima_pp.c
++++ b/drivers/gpu/drm/lima/lima_pp.c
+@@ -64,7 +64,13 @@ static irqreturn_t lima_pp_bcast_irq_handler(int irq, void *data)
+ struct lima_ip *pp_bcast = data;
+ struct lima_device *dev = pp_bcast->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+- struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame;
++ struct drm_lima_m450_pp_frame *frame;
++
++ /* for shared irq case */
++ if (!pipe->current_task)
++ return IRQ_NONE;
++
++ frame = pipe->current_task->frame;
+
+ for (i = 0; i < frame->num_pp; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+index 38e2cfa9cec7..6910d0468e3c 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+@@ -74,7 +74,7 @@ bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
+ u32 val;
+
+ /* This can be called from gpu state code so make sure GMU is valid */
+- if (IS_ERR_OR_NULL(gmu->mmio))
++ if (!gmu->initialized)
+ return false;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+@@ -90,7 +90,7 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+ u32 val;
+
+ /* This can be called from gpu state code so make sure GMU is valid */
+- if (IS_ERR_OR_NULL(gmu->mmio))
++ if (!gmu->initialized)
+ return false;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+@@ -504,8 +504,10 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
+ wmb();
+
+ err:
+- devm_iounmap(gmu->dev, pdcptr);
+- devm_iounmap(gmu->dev, seqptr);
++ if (!IS_ERR_OR_NULL(pdcptr))
++ devm_iounmap(gmu->dev, pdcptr);
++ if (!IS_ERR_OR_NULL(seqptr))
++ devm_iounmap(gmu->dev, seqptr);
+ }
+
+ /*
+@@ -695,7 +697,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int status, ret;
+
+- if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
++ if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
+ return 0;
+
+ gmu->hung = false;
+@@ -765,7 +767,7 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
+ {
+ u32 reg;
+
+- if (!gmu->mmio)
++ if (!gmu->initialized)
+ return true;
+
+ reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
+@@ -1227,7 +1229,7 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
+ {
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+
+- if (IS_ERR_OR_NULL(gmu->mmio))
++ if (!gmu->initialized)
+ return;
+
+ a6xx_gmu_stop(a6xx_gpu);
+@@ -1245,6 +1247,8 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
+ iommu_detach_device(gmu->domain, gmu->dev);
+
+ iommu_domain_free(gmu->domain);
++
++ gmu->initialized = false;
+ }
+
+ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+@@ -1309,6 +1313,8 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+ /* Set up the HFI queues */
+ a6xx_hfi_init(gmu);
+
++ gmu->initialized = true;
++
+ return 0;
+ err:
+ a6xx_gmu_memory_free(gmu, gmu->hfi);
+diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+index bedd8e6a63aa..39a26dd63674 100644
+--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
++++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+@@ -75,6 +75,7 @@ struct a6xx_gmu {
+
+ struct a6xx_hfi_queue queues[2];
+
++ bool initialized;
+ bool hung;
+ };
+
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index a9c0ac937b00..9acbbc0f3232 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -56,7 +56,6 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
+ return ret;
+
+ mem_phys = r.start;
+- mem_size = resource_size(&r);
+
+ /* Request the MDT file for the firmware */
+ fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+@@ -72,6 +71,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
+ goto out;
+ }
+
++ if (mem_size > resource_size(&r)) {
++ DRM_DEV_ERROR(dev,
++ "memory region is too small to load the MDT\n");
++ ret = -E2BIG;
++ goto out;
++ }
++
+ /* Allocate memory for the firmware image */
+ mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
+ if (!mem_region) {
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index f38d7367bd3b..4a0fe8a25ad7 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -1306,16 +1306,24 @@ static int msm_pdev_probe(struct platform_device *pdev)
+
+ ret = add_gpu_components(&pdev->dev, &match);
+ if (ret)
+- return ret;
++ goto fail;
+
+ /* on all devices that I am aware of, iommu's which can map
+ * any address the cpu can see are used:
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+ if (ret)
+- return ret;
++ goto fail;
++
++ ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
++ if (ret)
++ goto fail;
+
+- return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
++ return 0;
++
++fail:
++ of_platform_depopulate(&pdev->dev);
++ return ret;
+ }
+
+ static int msm_pdev_remove(struct platform_device *pdev)
+diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
+index 8712af79a49c..4c43dd282acc 100644
+--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
++++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
+@@ -384,10 +384,20 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
+ int r;
+
+ drm_display_mode_to_videomode(mode, &vm);
+- r = priv->dispc_ops->mgr_check_timings(priv->dispc, omap_crtc->channel,
+- &vm);
+- if (r)
+- return r;
++
++ /*
++ * DSI might not call this, since the supplied mode is not a
++ * valid DISPC mode. DSI will calculate and configure the
++ * proper DISPC mode later.
++ */
++ if (omap_crtc->pipe->output->next == NULL ||
++ omap_crtc->pipe->output->next->type != OMAP_DISPLAY_TYPE_DSI) {
++ r = priv->dispc_ops->mgr_check_timings(priv->dispc,
++ omap_crtc->channel,
++ &vm);
++ if (r)
++ return r;
++ }
+
+ /* Check for bandwidth limit */
+ if (priv->max_bandwidth) {
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 569be4efd8d1..397a3086eac8 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -446,6 +446,32 @@ static const struct panel_desc ampire_am800480r3tmqwa1h = {
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ };
+
++static const struct display_timing santek_st0700i5y_rbslw_f_timing = {
++ .pixelclock = { 26400000, 33300000, 46800000 },
++ .hactive = { 800, 800, 800 },
++ .hfront_porch = { 16, 210, 354 },
++ .hback_porch = { 45, 36, 6 },
++ .hsync_len = { 1, 10, 40 },
++ .vactive = { 480, 480, 480 },
++ .vfront_porch = { 7, 22, 147 },
++ .vback_porch = { 22, 13, 3 },
++ .vsync_len = { 1, 10, 20 },
++ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
++ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE
++};
++
++static const struct panel_desc armadeus_st0700_adapt = {
++ .timings = &santek_st0700i5y_rbslw_f_timing,
++ .num_timings = 1,
++ .bpc = 6,
++ .size = {
++ .width = 154,
++ .height = 86,
++ },
++ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
++ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
++};
++
+ static const struct drm_display_mode auo_b101aw03_mode = {
+ .clock = 51450,
+ .hdisplay = 1024,
+@@ -2570,6 +2596,9 @@ static const struct of_device_id platform_of_match[] = {
+ }, {
+ .compatible = "arm,rtsm-display",
+ .data = &arm_rtsm,
++ }, {
++ .compatible = "armadeus,st0700-adapt",
++ .data = &armadeus_st0700_adapt,
+ }, {
+ .compatible = "auo,b101aw03",
+ .data = &auo_b101aw03,
+@@ -3098,7 +3127,14 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
+ dsi->format = desc->format;
+ dsi->lanes = desc->lanes;
+
+- return mipi_dsi_attach(dsi);
++ err = mipi_dsi_attach(dsi);
++ if (err) {
++ struct panel_simple *panel = dev_get_drvdata(&dsi->dev);
++
++ drm_panel_remove(&panel->base);
++ }
++
++ return err;
+ }
+
+ static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+index 12ed5265a90b..09046135e720 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+@@ -1011,7 +1011,8 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct vop *vop = to_vop(crtc);
+
+ adjusted_mode->clock =
+- clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
++ DIV_ROUND_UP(clk_round_rate(vop->dclk, mode->clock * 1000),
++ 1000);
+
+ return true;
+ }
+diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
+index b69ae10ca238..d724fb3de44e 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
++++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
+@@ -102,7 +102,6 @@ struct virtio_gpu_fence {
+ struct dma_fence f;
+ struct virtio_gpu_fence_driver *drv;
+ struct list_head node;
+- uint64_t seq;
+ };
+ #define to_virtio_fence(x) \
+ container_of(x, struct virtio_gpu_fence, f)
+diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
+index 87d1966192f4..72b4f7561432 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
++++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
+@@ -40,16 +40,14 @@ bool virtio_fence_signaled(struct dma_fence *f)
+ {
+ struct virtio_gpu_fence *fence = to_virtio_fence(f);
+
+- if (atomic64_read(&fence->drv->last_seq) >= fence->seq)
++ if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
+ return true;
+ return false;
+ }
+
+ static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
+ {
+- struct virtio_gpu_fence *fence = to_virtio_fence(f);
+-
+- snprintf(str, size, "%llu", fence->seq);
++ snprintf(str, size, "%llu", f->seqno);
+ }
+
+ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
+@@ -76,6 +74,11 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
+ return fence;
+
+ fence->drv = drv;
++
++ /* This only partially initializes the fence because the seqno is
++ * unknown yet. The fence must not be used outside of the driver
++ * until virtio_gpu_fence_emit is called.
++ */
+ dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
+
+ return fence;
+@@ -89,13 +92,13 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&drv->lock, irq_flags);
+- fence->seq = ++drv->sync_seq;
++ fence->f.seqno = ++drv->sync_seq;
+ dma_fence_get(&fence->f);
+ list_add_tail(&fence->node, &drv->fences);
+ spin_unlock_irqrestore(&drv->lock, irq_flags);
+
+ cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
+- cmd_hdr->fence_id = cpu_to_le64(fence->seq);
++ cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno);
+ return 0;
+ }
+
+@@ -109,7 +112,7 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
+ spin_lock_irqsave(&drv->lock, irq_flags);
+ atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
+ list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
+- if (last_seq < fence->seq)
++ if (last_seq < fence->f.seqno)
+ continue;
+ dma_fence_signal_locked(&fence->f);
+ list_del(&fence->node);
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+index 949a264985fc..19fbffd0f7a3 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+@@ -542,6 +542,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
+ if (!ret)
+ return -EBUSY;
+
++ /* is_valid check must proceed before copy of the cache entry. */
++ smp_rmb();
++
+ ptr = cache_ent->caps_cache;
+
+ copy_exit:
+diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
+index 5bb0f0a084e9..a7684f9c80db 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
+@@ -583,6 +583,8 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
+ cache_ent->id == le32_to_cpu(cmd->capset_id)) {
+ memcpy(cache_ent->caps_cache, resp->capset_data,
+ cache_ent->size);
++ /* Copy must occur before is_valid is signalled. */
++ smp_wmb();
+ atomic_set(&cache_ent->is_valid, 1);
+ break;
+ }
+diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
+index bb66dbcd5e3f..e447b7588d06 100644
+--- a/drivers/gpu/drm/vkms/vkms_crtc.c
++++ b/drivers/gpu/drm/vkms/vkms_crtc.c
+@@ -15,6 +15,10 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+
+ spin_lock(&output->lock);
+
++ ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
++ output->period_ns);
++ WARN_ON(ret_overrun != 1);
++
+ ret = drm_crtc_handle_vblank(crtc);
+ if (!ret)
+ DRM_ERROR("vkms failure on handling vblank");
+@@ -35,10 +39,6 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+ DRM_WARN("failed to queue vkms_crc_work_handle");
+ }
+
+- ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
+- output->period_ns);
+- WARN_ON(ret_overrun != 1);
+-
+ spin_unlock(&output->lock);
+
+ return HRTIMER_RESTART;
+@@ -74,11 +74,21 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
+ {
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
+ struct vkms_output *output = &vkmsdev->output;
++ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+ *vblank_time = output->vblank_hrtimer.node.expires;
+
+- if (!in_vblank_irq)
+- *vblank_time -= output->period_ns;
++ if (WARN_ON(*vblank_time == vblank->time))
++ return true;
++
++ /*
++ * To prevent races we roll the hrtimer forward before we do any
++ * interrupt processing - this is how real hw works (the interrupt is
++ * only generated after all the vblank registers are updated) and what
++ * the vblank core expects. Therefore we need to always correct the
++ * timestampe by one frame.
++ */
++ *vblank_time -= output->period_ns;
+
+ return true;
+ }
+diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
+index 9797ccb0a073..6387302c1245 100644
+--- a/drivers/gpu/host1x/bus.c
++++ b/drivers/gpu/host1x/bus.c
+@@ -414,6 +414,9 @@ static int host1x_device_add(struct host1x *host1x,
+
+ of_dma_configure(&device->dev, host1x->dev->of_node, true);
+
++ device->dev.dma_parms = &device->dma_parms;
++ dma_set_max_seg_size(&device->dev, SZ_4M);
++
+ err = host1x_device_parse_dt(device, driver);
+ if (err < 0) {
+ kfree(device);
+diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
+index 1c8f708f212b..ee2412b7459c 100644
+--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
++++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
+@@ -51,6 +51,7 @@ struct gpu_i2c_dev {
+ void __iomem *regs;
+ struct i2c_adapter adapter;
+ struct i2c_board_info *gpu_ccgx_ucsi;
++ struct i2c_client *ccgx_client;
+ };
+
+ static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd)
+@@ -261,8 +262,6 @@ static const struct property_entry ccgx_props[] = {
+
+ static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
+ {
+- struct i2c_client *ccgx_client;
+-
+ i2cd->gpu_ccgx_ucsi = devm_kzalloc(i2cd->dev,
+ sizeof(*i2cd->gpu_ccgx_ucsi),
+ GFP_KERNEL);
+@@ -274,8 +273,8 @@ static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
+ i2cd->gpu_ccgx_ucsi->addr = 0x8;
+ i2cd->gpu_ccgx_ucsi->irq = irq;
+ i2cd->gpu_ccgx_ucsi->properties = ccgx_props;
+- ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
+- if (!ccgx_client)
++ i2cd->ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
++ if (!i2cd->ccgx_client)
+ return -ENODEV;
+
+ return 0;
+@@ -354,6 +353,13 @@ static __maybe_unused int gpu_i2c_resume(struct device *dev)
+ struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev);
+
+ gpu_enable_i2c_bus(i2cd);
++ /*
++ * Runtime resume ccgx client so that it can see for any
++ * connector change event. Old ccg firmware has known
++ * issue of not triggering interrupt when a device is
++ * connected to runtime resume the controller.
++ */
++ pm_request_resume(&i2cd->ccgx_client->dev);
+ return 0;
+ }
+
+diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
+index 48337bef5b87..3d90c0bb049e 100644
+--- a/drivers/i2c/busses/i2c-stm32f7.c
++++ b/drivers/i2c/busses/i2c-stm32f7.c
+@@ -25,7 +25,6 @@
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+-#include <linux/of_irq.h>
+ #include <linux/of_platform.h>
+ #include <linux/platform_device.h>
+ #include <linux/pinctrl/consumer.h>
+@@ -1816,15 +1815,14 @@ static struct i2c_algorithm stm32f7_i2c_algo = {
+
+ static int stm32f7_i2c_probe(struct platform_device *pdev)
+ {
+- struct device_node *np = pdev->dev.of_node;
+ struct stm32f7_i2c_dev *i2c_dev;
+ const struct stm32f7_i2c_setup *setup;
+ struct resource *res;
+- u32 irq_error, irq_event, clk_rate, rise_time, fall_time;
++ u32 clk_rate, rise_time, fall_time;
+ struct i2c_adapter *adap;
+ struct reset_control *rst;
+ dma_addr_t phy_addr;
+- int ret;
++ int irq_error, irq_event, ret;
+
+ i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
+ if (!i2c_dev)
+@@ -1836,16 +1834,20 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
+ return PTR_ERR(i2c_dev->base);
+ phy_addr = (dma_addr_t)res->start;
+
+- irq_event = irq_of_parse_and_map(np, 0);
+- if (!irq_event) {
+- dev_err(&pdev->dev, "IRQ event missing or invalid\n");
+- return -EINVAL;
++ irq_event = platform_get_irq(pdev, 0);
++ if (irq_event <= 0) {
++ if (irq_event != -EPROBE_DEFER)
++ dev_err(&pdev->dev, "Failed to get IRQ event: %d\n",
++ irq_event);
++ return irq_event ? : -ENOENT;
+ }
+
+- irq_error = irq_of_parse_and_map(np, 1);
+- if (!irq_error) {
+- dev_err(&pdev->dev, "IRQ error missing or invalid\n");
+- return -EINVAL;
++ irq_error = platform_get_irq(pdev, 1);
++ if (irq_error <= 0) {
++ if (irq_error != -EPROBE_DEFER)
++ dev_err(&pdev->dev, "Failed to get IRQ error: %d\n",
++ irq_error);
++ return irq_error ? : -ENOENT;
+ }
+
+ i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
+diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c
+index 3b84cb243a87..055227cb3d43 100644
+--- a/drivers/iio/accel/adxl372.c
++++ b/drivers/iio/accel/adxl372.c
+@@ -782,10 +782,14 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev)
+ unsigned int mask;
+ int i, ret;
+
+- ret = adxl372_set_interrupts(st, ADXL372_INT1_MAP_FIFO_FULL_MSK, 0);
++ ret = iio_triggered_buffer_postenable(indio_dev);
+ if (ret < 0)
+ return ret;
+
++ ret = adxl372_set_interrupts(st, ADXL372_INT1_MAP_FIFO_FULL_MSK, 0);
++ if (ret < 0)
++ goto err;
++
+ mask = *indio_dev->active_scan_mask;
+
+ for (i = 0; i < ARRAY_SIZE(adxl372_axis_lookup_table); i++) {
+@@ -793,8 +797,10 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev)
+ break;
+ }
+
+- if (i == ARRAY_SIZE(adxl372_axis_lookup_table))
+- return -EINVAL;
++ if (i == ARRAY_SIZE(adxl372_axis_lookup_table)) {
++ ret = -EINVAL;
++ goto err;
++ }
+
+ st->fifo_format = adxl372_axis_lookup_table[i].fifo_format;
+ st->fifo_set_size = bitmap_weight(indio_dev->active_scan_mask,
+@@ -814,26 +820,25 @@ static int adxl372_buffer_postenable(struct iio_dev *indio_dev)
+ if (ret < 0) {
+ st->fifo_mode = ADXL372_FIFO_BYPASSED;
+ adxl372_set_interrupts(st, 0, 0);
+- return ret;
++ goto err;
+ }
+
+- return iio_triggered_buffer_postenable(indio_dev);
++ return 0;
++
++err:
++ iio_triggered_buffer_predisable(indio_dev);
++ return ret;
+ }
+
+ static int adxl372_buffer_predisable(struct iio_dev *indio_dev)
+ {
+ struct adxl372_state *st = iio_priv(indio_dev);
+- int ret;
+-
+- ret = iio_triggered_buffer_predisable(indio_dev);
+- if (ret < 0)
+- return ret;
+
+ adxl372_set_interrupts(st, 0, 0);
+ st->fifo_mode = ADXL372_FIFO_BYPASSED;
+ adxl372_configure_fifo(st);
+
+- return 0;
++ return iio_triggered_buffer_predisable(indio_dev);
+ }
+
+ static const struct iio_buffer_setup_ops adxl372_buffer_ops = {
+diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
+index 19adc2b23472..588907cc3b6b 100644
+--- a/drivers/iio/adc/stm32-dfsdm-adc.c
++++ b/drivers/iio/adc/stm32-dfsdm-adc.c
+@@ -1456,6 +1456,12 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
+ * So IRQ associated to filter instance 0 is dedicated to the Filter 0.
+ */
+ irq = platform_get_irq(pdev, 0);
++ if (irq < 0) {
++ if (irq != -EPROBE_DEFER)
++ dev_err(dev, "Failed to get IRQ: %d\n", irq);
++ return irq;
++ }
++
+ ret = devm_request_irq(dev, irq, stm32_dfsdm_irq,
+ 0, pdev->name, adc);
+ if (ret < 0) {
+diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
+index 0a4d3746d21c..26e2011c5868 100644
+--- a/drivers/iio/adc/stm32-dfsdm-core.c
++++ b/drivers/iio/adc/stm32-dfsdm-core.c
+@@ -233,6 +233,8 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev,
+ }
+ priv->dfsdm.phys_base = res->start;
+ priv->dfsdm.base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(priv->dfsdm.base))
++ return PTR_ERR(priv->dfsdm.base);
+
+ /*
+ * "dfsdm" clock is mandatory for DFSDM peripheral clocking.
+@@ -242,8 +244,10 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev,
+ */
+ priv->clk = devm_clk_get(&pdev->dev, "dfsdm");
+ if (IS_ERR(priv->clk)) {
+- dev_err(&pdev->dev, "No stm32_dfsdm_clk clock found\n");
+- return -EINVAL;
++ ret = PTR_ERR(priv->clk);
++ if (ret != -EPROBE_DEFER)
++ dev_err(&pdev->dev, "Failed to get clock (%d)\n", ret);
++ return ret;
+ }
+
+ priv->aclk = devm_clk_get(&pdev->dev, "audio");
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index 2f7d14159841..9b76a8fcdd24 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -337,7 +337,7 @@ static int dst_fetch_ha(const struct dst_entry *dst,
+ neigh_event_send(n, NULL);
+ ret = -ENODATA;
+ } else {
+- memcpy(dev_addr->dst_dev_addr, n->ha, MAX_ADDR_LEN);
++ neigh_ha_snapshot(dev_addr->dst_dev_addr, n, dst->dev);
+ }
+
+ neigh_release(n);
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+index 5689d742bafb..4c88d6f72574 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+@@ -772,6 +772,8 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
+ struct i40iw_qp *iwqp = to_iwqp(ibqp);
+ struct i40iw_sc_qp *qp = &iwqp->sc_qp;
+
++ attr->qp_state = iwqp->ibqp_state;
++ attr->cur_qp_state = attr->qp_state;
+ attr->qp_access_flags = 0;
+ attr->cap.max_send_wr = qp->qp_uk.sq_size;
+ attr->cap.max_recv_wr = qp->qp_uk.rq_size;
+diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
+index 6c529e6f3a01..348c1df69cdc 100644
+--- a/drivers/infiniband/hw/mlx5/mad.c
++++ b/drivers/infiniband/hw/mlx5/mad.c
+@@ -200,19 +200,33 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
+ vl_15_dropped);
+ }
+
+-static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
++static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad)
+ {
+- int err;
++ struct mlx5_core_dev *mdev;
++ bool native_port = true;
++ u8 mdev_port_num;
+ void *out_cnt;
++ int err;
+
++ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
++ if (!mdev) {
++ /* Fail to get the native port, likely due to 2nd port is still
++ * unaffiliated. In such case default to 1st port and attached
++ * PF device.
++ */
++ native_port = false;
++ mdev = dev->mdev;
++ mdev_port_num = 1;
++ }
+ /* Declaring support of extended counters */
+ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
+ struct ib_class_port_info cpi = {};
+
+ cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
+- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
++ err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
++ goto done;
+ }
+
+ if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
+@@ -221,11 +235,13 @@ static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
+ int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+
+ out_cnt = kvzalloc(sz, GFP_KERNEL);
+- if (!out_cnt)
+- return IB_MAD_RESULT_FAILURE;
++ if (!out_cnt) {
++ err = IB_MAD_RESULT_FAILURE;
++ goto done;
++ }
+
+ err = mlx5_core_query_vport_counter(mdev, 0, 0,
+- port_num, out_cnt, sz);
++ mdev_port_num, out_cnt, sz);
+ if (!err)
+ pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
+ } else {
+@@ -234,20 +250,23 @@ static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ out_cnt = kvzalloc(sz, GFP_KERNEL);
+- if (!out_cnt)
+- return IB_MAD_RESULT_FAILURE;
++ if (!out_cnt) {
++ err = IB_MAD_RESULT_FAILURE;
++ goto done;
++ }
+
+- err = mlx5_core_query_ib_ppcnt(mdev, port_num,
++ err = mlx5_core_query_ib_ppcnt(mdev, mdev_port_num,
+ out_cnt, sz);
+ if (!err)
+ pma_cnt_assign(pma_cnt, out_cnt);
+- }
+-
++ }
+ kvfree(out_cnt);
+- if (err)
+- return IB_MAD_RESULT_FAILURE;
+-
+- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
++ err = err ? IB_MAD_RESULT_FAILURE :
++ IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
++done:
++ if (native_port)
++ mlx5_ib_put_native_port_mdev(dev, port_num);
++ return err;
+ }
+
+ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+@@ -259,8 +278,6 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct ib_mad *in_mad = (const struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+- struct mlx5_core_dev *mdev;
+- u8 mdev_port_num;
+ int ret;
+
+ if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+@@ -269,19 +286,14 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+
+ memset(out_mad->data, 0, sizeof(out_mad->data));
+
+- mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+- if (!mdev)
+- return IB_MAD_RESULT_FAILURE;
+-
+- if (MLX5_CAP_GEN(mdev, vport_counters) &&
++ if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
+ in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
+- ret = process_pma_cmd(mdev, mdev_port_num, in_mad, out_mad);
++ ret = process_pma_cmd(dev, port_num, in_mad, out_mad);
+ } else {
+ ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
+ in_mad, out_mad);
+ }
+- mlx5_ib_put_native_port_mdev(dev, port_num);
+ return ret;
+ }
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
+index aca9f60f9b21..1cbfbd98eb22 100644
+--- a/drivers/infiniband/sw/rxe/rxe_resp.c
++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
+@@ -431,6 +431,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
+ qp->resp.va = reth_va(pkt);
+ qp->resp.rkey = reth_rkey(pkt);
+ qp->resp.resid = reth_len(pkt);
++ qp->resp.length = reth_len(pkt);
+ }
+ access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
+ : IB_ACCESS_REMOTE_WRITE;
+@@ -856,7 +857,9 @@ static enum resp_states do_complete(struct rxe_qp *qp,
+ pkt->mask & RXE_WRITE_MASK) ?
+ IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
+ wc->vendor_err = 0;
+- wc->byte_len = wqe->dma.length - wqe->dma.resid;
++ wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
++ pkt->mask & RXE_WRITE_MASK) ?
++ qp->resp.length : wqe->dma.length - wqe->dma.resid;
+
+ /* fields after byte_len are different between kernel and user
+ * space
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
+index e8be7f44e3be..28bfb3ece104 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
+@@ -213,6 +213,7 @@ struct rxe_resp_info {
+ struct rxe_mem *mr;
+ u32 resid;
+ u32 rkey;
++ u32 length;
+ u64 atomic_orig;
+
+ /* SRQ only */
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index 04ea7db08e87..ac0583ff280d 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -1893,12 +1893,6 @@ static void ipoib_child_init(struct net_device *ndev)
+ struct ipoib_dev_priv *priv = ipoib_priv(ndev);
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+- dev_hold(priv->parent);
+-
+- down_write(&ppriv->vlan_rwsem);
+- list_add_tail(&priv->list, &ppriv->child_intfs);
+- up_write(&ppriv->vlan_rwsem);
+-
+ priv->max_ib_mtu = ppriv->max_ib_mtu;
+ set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
+ memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
+@@ -1941,6 +1935,17 @@ static int ipoib_ndo_init(struct net_device *ndev)
+ if (rc) {
+ pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
+ priv->ca->name, priv->dev->name, priv->port, rc);
++ return rc;
++ }
++
++ if (priv->parent) {
++ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
++
++ dev_hold(priv->parent);
++
++ down_write(&ppriv->vlan_rwsem);
++ list_add_tail(&priv->list, &ppriv->child_intfs);
++ up_write(&ppriv->vlan_rwsem);
+ }
+
+ return 0;
+@@ -1958,6 +1963,14 @@ static void ipoib_ndo_uninit(struct net_device *dev)
+ */
+ WARN_ON(!list_empty(&priv->child_intfs));
+
++ if (priv->parent) {
++ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
++
++ down_write(&ppriv->vlan_rwsem);
++ list_del(&priv->list);
++ up_write(&ppriv->vlan_rwsem);
++ }
++
+ ipoib_neigh_hash_uninit(dev);
+
+ ipoib_ib_dev_cleanup(dev);
+@@ -1969,15 +1982,8 @@ static void ipoib_ndo_uninit(struct net_device *dev)
+ priv->wq = NULL;
+ }
+
+- if (priv->parent) {
+- struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+-
+- down_write(&ppriv->vlan_rwsem);
+- list_del(&priv->list);
+- up_write(&ppriv->vlan_rwsem);
+-
++ if (priv->parent)
+ dev_put(priv->parent);
+- }
+ }
+
+ static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 162b3236e72c..2101601adf57 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -3752,7 +3752,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
+
+ freelist = domain_unmap(domain, start_pfn, last_pfn);
+
+- if (intel_iommu_strict || (pdev && pdev->untrusted)) {
++ if (intel_iommu_strict || (pdev && pdev->untrusted) ||
++ !has_iova_flush_queue(&domain->iovad)) {
+ iommu_flush_iotlb_psi(iommu, domain, start_pfn,
+ nrpages, !freelist, 0);
+ /* free iova */
+diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
+index d499b2621239..3e1a8a675572 100644
+--- a/drivers/iommu/iova.c
++++ b/drivers/iommu/iova.c
+@@ -54,9 +54,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
+ }
+ EXPORT_SYMBOL_GPL(init_iova_domain);
+
++bool has_iova_flush_queue(struct iova_domain *iovad)
++{
++ return !!iovad->fq;
++}
++
+ static void free_iova_flush_queue(struct iova_domain *iovad)
+ {
+- if (!iovad->fq)
++ if (!has_iova_flush_queue(iovad))
+ return;
+
+ if (timer_pending(&iovad->fq_timer))
+@@ -74,13 +79,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
+ int init_iova_flush_queue(struct iova_domain *iovad,
+ iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
+ {
++ struct iova_fq __percpu *queue;
+ int cpu;
+
+ atomic64_set(&iovad->fq_flush_start_cnt, 0);
+ atomic64_set(&iovad->fq_flush_finish_cnt, 0);
+
+- iovad->fq = alloc_percpu(struct iova_fq);
+- if (!iovad->fq)
++ queue = alloc_percpu(struct iova_fq);
++ if (!queue)
+ return -ENOMEM;
+
+ iovad->flush_cb = flush_cb;
+@@ -89,13 +95,17 @@ int init_iova_flush_queue(struct iova_domain *iovad,
+ for_each_possible_cpu(cpu) {
+ struct iova_fq *fq;
+
+- fq = per_cpu_ptr(iovad->fq, cpu);
++ fq = per_cpu_ptr(queue, cpu);
+ fq->head = 0;
+ fq->tail = 0;
+
+ spin_lock_init(&fq->lock);
+ }
+
++ smp_wmb();
++
++ iovad->fq = queue;
++
+ timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
+ atomic_set(&iovad->fq_timer_on, 0);
+
+@@ -127,8 +137,9 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
+ struct iova *cached_iova;
+
+ cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
+- if (free->pfn_hi < iovad->dma_32bit_pfn &&
+- free->pfn_lo >= cached_iova->pfn_lo) {
++ if (free == cached_iova ||
++ (free->pfn_hi < iovad->dma_32bit_pfn &&
++ free->pfn_lo >= cached_iova->pfn_lo)) {
+ iovad->cached32_node = rb_next(&free->node);
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn;
+ }
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index f4b1950d35f3..0b821a5b2db8 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -418,11 +418,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
+
+ of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
+ if (!strncmp(name, mbox_name, strlen(name)))
+- break;
++ return mbox_request_channel(cl, index);
+ index++;
+ }
+
+- return mbox_request_channel(cl, index);
++ dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
++ __func__, name);
++ return ERR_PTR(-EINVAL);
+ }
+ EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
+
+diff --git a/drivers/media/platform/coda/Makefile b/drivers/media/platform/coda/Makefile
+index f13adacd924e..cfe3ef8fad8a 100644
+--- a/drivers/media/platform/coda/Makefile
++++ b/drivers/media/platform/coda/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ ccflags-y += -I$(src)
+
+-coda-objs := coda-common.o coda-bit.o coda-gdi.o coda-h264.o coda-jpeg.o
++coda-vpu-objs := coda-common.o coda-bit.o coda-gdi.o coda-h264.o coda-jpeg.o
+
+-obj-$(CONFIG_VIDEO_CODA) += coda.o
++obj-$(CONFIG_VIDEO_CODA) += coda-vpu.o
+ obj-$(CONFIG_VIDEO_IMX_VDOA) += imx-vdoa.o
+diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
+index 6cfb293396f2..693ee73eb291 100644
+--- a/drivers/memstick/core/memstick.c
++++ b/drivers/memstick/core/memstick.c
+@@ -625,13 +625,18 @@ static int __init memstick_init(void)
+ return -ENOMEM;
+
+ rc = bus_register(&memstick_bus_type);
+- if (!rc)
+- rc = class_register(&memstick_host_class);
++ if (rc)
++ goto error_destroy_workqueue;
+
+- if (!rc)
+- return 0;
++ rc = class_register(&memstick_host_class);
++ if (rc)
++ goto error_bus_unregister;
++
++ return 0;
+
++error_bus_unregister:
+ bus_unregister(&memstick_bus_type);
++error_destroy_workqueue:
+ destroy_workqueue(workqueue);
+
+ return rc;
+diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
+index 2bdc7b02157a..4a31907a4525 100644
+--- a/drivers/mfd/arizona-core.c
++++ b/drivers/mfd/arizona-core.c
+@@ -993,7 +993,7 @@ int arizona_dev_init(struct arizona *arizona)
+ unsigned int reg, val;
+ int (*apply_patch)(struct arizona *) = NULL;
+ const struct mfd_cell *subdevs = NULL;
+- int n_subdevs, ret, i;
++ int n_subdevs = 0, ret, i;
+
+ dev_set_drvdata(arizona->dev, arizona);
+ mutex_init(&arizona->clk_lock);
+diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
+index a5391f96eafd..607383b67cf1 100644
+--- a/drivers/mfd/cros_ec_dev.c
++++ b/drivers/mfd/cros_ec_dev.c
+@@ -285,13 +285,15 @@ static void cros_ec_sensors_register(struct cros_ec_dev *ec)
+
+ resp = (struct ec_response_motion_sense *)msg->data;
+ sensor_num = resp->dump.sensor_count;
+- /* Allocate 1 extra sensors in FIFO are needed */
+- sensor_cells = kcalloc(sensor_num + 1, sizeof(struct mfd_cell),
++ /*
++ * Allocate 2 extra sensors if lid angle sensor and/or FIFO are needed.
++ */
++ sensor_cells = kcalloc(sensor_num + 2, sizeof(struct mfd_cell),
+ GFP_KERNEL);
+ if (sensor_cells == NULL)
+ goto error;
+
+- sensor_platforms = kcalloc(sensor_num + 1,
++ sensor_platforms = kcalloc(sensor_num,
+ sizeof(struct cros_ec_sensor_platform),
+ GFP_KERNEL);
+ if (sensor_platforms == NULL)
+@@ -351,6 +353,11 @@ static void cros_ec_sensors_register(struct cros_ec_dev *ec)
+ sensor_cells[id].name = "cros-ec-ring";
+ id++;
+ }
++ if (cros_ec_check_features(ec,
++ EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS)) {
++ sensor_cells[id].name = "cros-ec-lid-angle";
++ id++;
++ }
+
+ ret = mfd_add_devices(ec->dev, 0, sensor_cells, id,
+ NULL, 0, NULL);
+diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
+index f1c51ce309fa..7e3959aaa285 100644
+--- a/drivers/mfd/hi655x-pmic.c
++++ b/drivers/mfd/hi655x-pmic.c
+@@ -109,6 +109,8 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
+
+ pmic->regmap = devm_regmap_init_mmio_clk(dev, NULL, base,
+ &hi655x_regmap_config);
++ if (IS_ERR(pmic->regmap))
++ return PTR_ERR(pmic->regmap);
+
+ regmap_read(pmic->regmap, HI655X_BUS_ADDR(HI655X_VER_REG), &pmic->ver);
+ if ((pmic->ver < PMU_VER_START) || (pmic->ver > PMU_VER_END)) {
+diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
+index 2a77988d0462..826b971ccb86 100644
+--- a/drivers/mfd/madera-core.c
++++ b/drivers/mfd/madera-core.c
+@@ -286,6 +286,7 @@ const struct of_device_id madera_of_match[] = {
+ { .compatible = "cirrus,wm1840", .data = (void *)WM1840 },
+ {}
+ };
++MODULE_DEVICE_TABLE(of, madera_of_match);
+ EXPORT_SYMBOL_GPL(madera_of_match);
+
+ static int madera_get_reset_gpio(struct madera *madera)
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index dbf684c4ebfb..23276a80e3b4 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -175,6 +175,7 @@ static int mfd_add_device(struct device *parent, int id,
+ for_each_child_of_node(parent->of_node, np) {
+ if (of_device_is_compatible(np, cell->of_compatible)) {
+ pdev->dev.of_node = np;
++ pdev->dev.fwnode = &np->fwnode;
+ break;
+ }
+ }
+diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
+index f88094719552..f2abe27010ef 100644
+--- a/drivers/misc/eeprom/Kconfig
++++ b/drivers/misc/eeprom/Kconfig
+@@ -5,6 +5,7 @@ config EEPROM_AT24
+ tristate "I2C EEPROMs / RAMs / ROMs from most vendors"
+ depends on I2C && SYSFS
+ select NVMEM
++ select NVMEM_SYSFS
+ select REGMAP_I2C
+ help
+ Enable this driver to get read/write support to most I2C EEPROMs
+@@ -34,6 +35,7 @@ config EEPROM_AT25
+ tristate "SPI EEPROMs from most vendors"
+ depends on SPI && SYSFS
+ select NVMEM
++ select NVMEM_SYSFS
+ help
+ Enable this driver to get read/write support to most SPI EEPROMs,
+ after you configure the board init code to know about each eeprom
+@@ -80,6 +82,7 @@ config EEPROM_93XX46
+ depends on SPI && SYSFS
+ select REGMAP
+ select NVMEM
++ select NVMEM_SYSFS
+ help
+ Driver for the microwire EEPROM chipsets 93xx46x. The driver
+ supports both read and write commands and also the command to
+diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
+index d74b182e19f3..6c0173772162 100644
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -81,6 +81,9 @@
+
+ #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
+
++#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
++#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
++
+ /*
+ * MEI HW Section
+ */
+diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
+index 7a2b3545a7f9..57cb68f5cc64 100644
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -98,6 +98,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
+
++ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
++ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
++
+ /* required last entry */
+ {0, }
+ };
+diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
+index dd21315922c8..9dc4548271b4 100644
+--- a/drivers/mmc/host/sdhci-pci-o2micro.c
++++ b/drivers/mmc/host/sdhci-pci-o2micro.c
+@@ -395,11 +395,21 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
+ {
+ struct sdhci_pci_chip *chip;
+ struct sdhci_host *host;
+- u32 reg;
++ u32 reg, caps;
+ int ret;
+
+ chip = slot->chip;
+ host = slot->host;
++
++ caps = sdhci_readl(host, SDHCI_CAPABILITIES);
++
++ /*
++ * mmc_select_bus_width() will test the bus to determine the actual bus
++ * width.
++ */
++ if (caps & SDHCI_CAN_DO_8BIT)
++ host->mmc->caps |= MMC_CAP_8_BIT_DATA;
++
+ switch (chip->pdev->device) {
+ case PCI_DEVICE_ID_O2_SDS0:
+ case PCI_DEVICE_ID_O2_SEABIRD0:
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+index a76529a7662d..c2e92786608b 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+@@ -1054,14 +1054,12 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
+ }
+ }
+
+-static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
+- struct cudbg_buffer *dbg_buff,
+- struct cudbg_error *cudbg_err,
+- u8 mem_type)
++static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
++ struct cudbg_error *cudbg_err,
++ u8 mem_type)
+ {
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_meminfo mem_info;
+- unsigned long size;
+ u8 mc_idx;
+ int rc;
+
+@@ -1075,7 +1073,16 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
+ if (rc)
+ return rc;
+
+- size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
++ return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
++}
++
++static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
++ struct cudbg_buffer *dbg_buff,
++ struct cudbg_error *cudbg_err,
++ u8 mem_type)
++{
++ unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type);
++
+ return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
+ cudbg_err);
+ }
+diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
+index 2dca3034fee0..dfb93228d6a7 100644
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -73,7 +73,7 @@ static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
+ {
+ nvdimm_bus_lock(&nvdimm_bus->dev);
+ if (--nvdimm_bus->probe_active == 0)
+- wake_up(&nvdimm_bus->probe_wait);
++ wake_up(&nvdimm_bus->wait);
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+ }
+
+@@ -341,7 +341,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
+ return NULL;
+ INIT_LIST_HEAD(&nvdimm_bus->list);
+ INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
+- init_waitqueue_head(&nvdimm_bus->probe_wait);
++ init_waitqueue_head(&nvdimm_bus->wait);
+ nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
+ if (nvdimm_bus->id < 0) {
+ kfree(nvdimm_bus);
+@@ -426,6 +426,9 @@ static int nd_bus_remove(struct device *dev)
+ list_del_init(&nvdimm_bus->list);
+ mutex_unlock(&nvdimm_bus_list_mutex);
+
++ wait_event(nvdimm_bus->wait,
++ atomic_read(&nvdimm_bus->ioctl_active) == 0);
++
+ nd_synchronize();
+ device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
+
+@@ -547,13 +550,38 @@ EXPORT_SYMBOL(nd_device_register);
+
+ void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
+ {
++ bool killed;
++
+ switch (mode) {
+ case ND_ASYNC:
++ /*
++ * In the async case this is being triggered with the
++ * device lock held and the unregistration work needs to
++ * be moved out of line iff this is thread has won the
++ * race to schedule the deletion.
++ */
++ if (!kill_device(dev))
++ return;
++
+ get_device(dev);
+ async_schedule_domain(nd_async_device_unregister, dev,
+ &nd_async_domain);
+ break;
+ case ND_SYNC:
++ /*
++ * In the sync case the device is being unregistered due
++ * to a state change of the parent. Claim the kill state
++ * to synchronize against other unregistration requests,
++ * or otherwise let the async path handle it if the
++ * unregistration was already queued.
++ */
++ device_lock(dev);
++ killed = kill_device(dev);
++ device_unlock(dev);
++
++ if (!killed)
++ return;
++
+ nd_synchronize();
+ device_unregister(dev);
+ break;
+@@ -860,7 +888,7 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
+ if (nvdimm_bus->probe_active == 0)
+ break;
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+- wait_event(nvdimm_bus->probe_wait,
++ wait_event(nvdimm_bus->wait,
+ nvdimm_bus->probe_active == 0);
+ nvdimm_bus_lock(&nvdimm_bus->dev);
+ } while (true);
+@@ -1090,24 +1118,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ return rc;
+ }
+
+-static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+-{
+- long id = (long) file->private_data;
+- int rc = -ENXIO, ro;
+- struct nvdimm_bus *nvdimm_bus;
+-
+- ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
+- mutex_lock(&nvdimm_bus_list_mutex);
+- list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
+- if (nvdimm_bus->id == id) {
+- rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg);
+- break;
+- }
+- }
+- mutex_unlock(&nvdimm_bus_list_mutex);
+-
+- return rc;
+-}
++enum nd_ioctl_mode {
++ BUS_IOCTL,
++ DIMM_IOCTL,
++};
+
+ static int match_dimm(struct device *dev, void *data)
+ {
+@@ -1122,31 +1136,62 @@ static int match_dimm(struct device *dev, void *data)
+ return 0;
+ }
+
+-static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
++ enum nd_ioctl_mode mode)
++
+ {
+- int rc = -ENXIO, ro;
+- struct nvdimm_bus *nvdimm_bus;
++ struct nvdimm_bus *nvdimm_bus, *found = NULL;
++ long id = (long) file->private_data;
++ struct nvdimm *nvdimm = NULL;
++ int rc, ro;
+
+ ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
+ mutex_lock(&nvdimm_bus_list_mutex);
+ list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
+- struct device *dev = device_find_child(&nvdimm_bus->dev,
+- file->private_data, match_dimm);
+- struct nvdimm *nvdimm;
+-
+- if (!dev)
+- continue;
++ if (mode == DIMM_IOCTL) {
++ struct device *dev;
++
++ dev = device_find_child(&nvdimm_bus->dev,
++ file->private_data, match_dimm);
++ if (!dev)
++ continue;
++ nvdimm = to_nvdimm(dev);
++ found = nvdimm_bus;
++ } else if (nvdimm_bus->id == id) {
++ found = nvdimm_bus;
++ }
+
+- nvdimm = to_nvdimm(dev);
+- rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
+- put_device(dev);
+- break;
++ if (found) {
++ atomic_inc(&nvdimm_bus->ioctl_active);
++ break;
++ }
+ }
+ mutex_unlock(&nvdimm_bus_list_mutex);
+
++ if (!found)
++ return -ENXIO;
++
++ nvdimm_bus = found;
++ rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
++
++ if (nvdimm)
++ put_device(&nvdimm->dev);
++ if (atomic_dec_and_test(&nvdimm_bus->ioctl_active))
++ wake_up(&nvdimm_bus->wait);
++
+ return rc;
+ }
+
++static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ return nd_ioctl(file, cmd, arg, BUS_IOCTL);
++}
++
++static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ return nd_ioctl(file, cmd, arg, DIMM_IOCTL);
++}
++
+ static int nd_open(struct inode *inode, struct file *file)
+ {
+ long minor = iminor(inode);
+@@ -1158,16 +1203,16 @@ static int nd_open(struct inode *inode, struct file *file)
+ static const struct file_operations nvdimm_bus_fops = {
+ .owner = THIS_MODULE,
+ .open = nd_open,
+- .unlocked_ioctl = nd_ioctl,
+- .compat_ioctl = nd_ioctl,
++ .unlocked_ioctl = bus_ioctl,
++ .compat_ioctl = bus_ioctl,
+ .llseek = noop_llseek,
+ };
+
+ static const struct file_operations nvdimm_fops = {
+ .owner = THIS_MODULE,
+ .open = nd_open,
+- .unlocked_ioctl = nvdimm_ioctl,
+- .compat_ioctl = nvdimm_ioctl,
++ .unlocked_ioctl = dimm_ioctl,
++ .compat_ioctl = dimm_ioctl,
+ .llseek = noop_llseek,
+ };
+
+diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
+index 391e88de3a29..6cd470547106 100644
+--- a/drivers/nvdimm/nd-core.h
++++ b/drivers/nvdimm/nd-core.h
+@@ -17,10 +17,11 @@ extern struct workqueue_struct *nvdimm_wq;
+
+ struct nvdimm_bus {
+ struct nvdimm_bus_descriptor *nd_desc;
+- wait_queue_head_t probe_wait;
++ wait_queue_head_t wait;
+ struct list_head list;
+ struct device dev;
+ int id, probe_active;
++ atomic_t ioctl_active;
+ struct list_head mapping_list;
+ struct mutex reconfig_mutex;
+ struct badrange badrange;
+diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
+index ef46cc3a71ae..488c47ac4c4a 100644
+--- a/drivers/nvdimm/region.c
++++ b/drivers/nvdimm/region.c
+@@ -34,17 +34,6 @@ static int nd_region_probe(struct device *dev)
+ if (rc)
+ return rc;
+
+- rc = nd_region_register_namespaces(nd_region, &err);
+- if (rc < 0)
+- return rc;
+-
+- ndrd = dev_get_drvdata(dev);
+- ndrd->ns_active = rc;
+- ndrd->ns_count = rc + err;
+-
+- if (rc && err && rc == err)
+- return -ENODEV;
+-
+ if (is_nd_pmem(&nd_region->dev)) {
+ struct resource ndr_res;
+
+@@ -60,6 +49,17 @@ static int nd_region_probe(struct device *dev)
+ nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
+ }
+
++ rc = nd_region_register_namespaces(nd_region, &err);
++ if (rc < 0)
++ return rc;
++
++ ndrd = dev_get_drvdata(dev);
++ ndrd->ns_active = rc;
++ ndrd->ns_count = rc + err;
++
++ if (rc && err && rc == err)
++ return -ENODEV;
++
+ nd_region->btt_seed = nd_btt_create(nd_region);
+ nd_region->pfn_seed = nd_pfn_create(nd_region);
+ nd_region->dax_seed = nd_dax_create(nd_region);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 22c68e3b71d5..4a1d2ab4d161 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -11,6 +11,7 @@
+ #include <linux/hdreg.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#include <linux/backing-dev.h>
+ #include <linux/list_sort.h>
+ #include <linux/slab.h>
+ #include <linux/types.h>
+@@ -3256,6 +3257,10 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+ goto out_free_ns;
+ }
+
++ if (ctrl->opts && ctrl->opts->data_digest)
++ ns->queue->backing_dev_info->capabilities
++ |= BDI_CAP_STABLE_WRITES;
++
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
+ if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
+ blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index f5bc1c30cef5..7fbcd72c438f 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1456,11 +1456,15 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+
+ if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
+ nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth));
+- nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
+- nvmeq->sq_cmds);
+- if (nvmeq->sq_dma_addr) {
+- set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
+- return 0;
++ if (nvmeq->sq_cmds) {
++ nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
++ nvmeq->sq_cmds);
++ if (nvmeq->sq_dma_addr) {
++ set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
++ return 0;
++ }
++
++ pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(depth));
+ }
+ }
+
+@@ -2517,7 +2521,8 @@ static void nvme_reset_work(struct work_struct *work)
+ * Limit the max command size to prevent iod->sg allocations going
+ * over a single page.
+ */
+- dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
++ dev->ctrl.max_hw_sectors = min_t(u32,
++ NVME_MAX_KB_SZ << 1, dma_max_mapping_size(dev->dev) >> 9);
+ dev->ctrl.max_segments = NVME_MAX_SEGS;
+
+ /*
+diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
+index 08a2501b9357..606b13d35d16 100644
+--- a/drivers/nvme/host/tcp.c
++++ b/drivers/nvme/host/tcp.c
+@@ -860,7 +860,14 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
+ else
+ flags |= MSG_MORE;
+
+- ret = kernel_sendpage(queue->sock, page, offset, len, flags);
++ /* can't zcopy slab pages */
++ if (unlikely(PageSlab(page))) {
++ ret = sock_no_sendpage(queue->sock, page, offset, len,
++ flags);
++ } else {
++ ret = kernel_sendpage(queue->sock, page, offset, len,
++ flags);
++ }
+ if (ret <= 0)
+ return ret;
+
+diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
+index 419451efd58c..4234ddb4722f 100644
+--- a/drivers/pci/controller/dwc/pci-dra7xx.c
++++ b/drivers/pci/controller/dwc/pci-dra7xx.c
+@@ -26,6 +26,7 @@
+ #include <linux/types.h>
+ #include <linux/mfd/syscon.h>
+ #include <linux/regmap.h>
++#include <linux/gpio/consumer.h>
+
+ #include "../../pci.h"
+ #include "pcie-designware.h"
+diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
+index 77052a0712d0..387a20f3c240 100644
+--- a/drivers/pci/controller/pcie-mobiveil.c
++++ b/drivers/pci/controller/pcie-mobiveil.c
+@@ -501,6 +501,12 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
+ return err;
+ }
+
++ /* setup bus numbers */
++ value = csr_readl(pcie, PCI_PRIMARY_BUS);
++ value &= 0xff000000;
++ value |= 0x00ff0100;
++ csr_writel(pcie, value, PCI_PRIMARY_BUS);
++
+ /*
+ * program Bus Master Enable Bit in Command Register in PAB Config
+ * Space
+@@ -540,7 +546,7 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
+ resource_size(pcie->ob_io_res));
+
+ /* memory inbound translation window */
+- program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
++ program_ib_windows(pcie, WIN_NUM_0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
+
+ /* Get the I/O and memory ranges from DT */
+ resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
+@@ -552,11 +558,18 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
+ if (type) {
+ /* configure outbound translation window */
+ program_ob_windows(pcie, pcie->ob_wins_configured,
+- win->res->start, 0, type,
+- resource_size(win->res));
++ win->res->start,
++ win->res->start - win->offset,
++ type, resource_size(win->res));
+ }
+ }
+
++ /* fixup for PCIe class register */
++ value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
++ value &= 0xff;
++ value |= (PCI_CLASS_BRIDGE_PCI << 16);
++ csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
++
+ /* setup MSI hardware registers */
+ mobiveil_pcie_enable_msi(pcie);
+
+@@ -797,9 +810,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
+ goto error;
+ }
+
+- /* fixup for PCIe class register */
+- csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS);
+-
+ /* initialize the IRQ domains */
+ ret = mobiveil_pcie_init_irq_domain(pcie);
+ if (ret) {
+diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
+index 3b031f00a94a..45c0f344ccd1 100644
+--- a/drivers/pci/controller/pcie-xilinx-nwl.c
++++ b/drivers/pci/controller/pcie-xilinx-nwl.c
+@@ -482,15 +482,13 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ int i;
+
+ mutex_lock(&msi->lock);
+- bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0,
+- nr_irqs, 0);
+- if (bit >= INT_PCI_MSI_NR) {
++ bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR,
++ get_count_order(nr_irqs));
++ if (bit < 0) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+- bitmap_set(msi->bitmap, bit, nr_irqs);
+-
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
+ domain->host_data, handle_simple_irq,
+@@ -508,7 +506,8 @@ static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ struct nwl_msi *msi = &pcie->msi;
+
+ mutex_lock(&msi->lock);
+- bitmap_clear(msi->bitmap, data->hwirq, nr_irqs);
++ bitmap_release_region(msi->bitmap, data->hwirq,
++ get_count_order(nr_irqs));
+ mutex_unlock(&msi->lock);
+ }
+
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index 27806987e93b..7d41e6684b87 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -434,10 +434,16 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
+ int bar;
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ const struct pci_epc_features *epc_features;
++ size_t test_reg_size;
+
+ epc_features = epf_test->epc_features;
+
+- base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg),
++ if (epc_features->bar_fixed_size[test_reg_bar])
++ test_reg_size = bar_size[test_reg_bar];
++ else
++ test_reg_size = sizeof(struct pci_epf_test_reg);
++
++ base = pci_epf_alloc_space(epf, test_reg_size,
+ test_reg_bar, epc_features->align);
+ if (!base) {
+ dev_err(dev, "Failed to allocated register space\n");
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index ca3793002e2f..74c3df250d9c 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -414,6 +414,9 @@ static int pci_device_probe(struct device *dev)
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = to_pci_driver(dev->driver);
+
++ if (!pci_device_can_probe(pci_dev))
++ return -ENODEV;
++
+ pci_assign_irq(pci_dev);
+
+ error = pcibios_alloc_irq(pci_dev);
+@@ -421,12 +424,10 @@ static int pci_device_probe(struct device *dev)
+ return error;
+
+ pci_dev_get(pci_dev);
+- if (pci_device_can_probe(pci_dev)) {
+- error = __pci_device_probe(drv, pci_dev);
+- if (error) {
+- pcibios_free_irq(pci_dev);
+- pci_dev_put(pci_dev);
+- }
++ error = __pci_device_probe(drv, pci_dev);
++ if (error) {
++ pcibios_free_irq(pci_dev);
++ pci_dev_put(pci_dev);
+ }
+
+ return error;
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 6d27475e39b2..4e83c347de5d 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -477,7 +477,7 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
+ pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
+ return count;
+ }
+-static struct device_attribute dev_remove_attr = __ATTR(remove,
++static struct device_attribute dev_remove_attr = __ATTR_IGNORE_LOCKDEP(remove,
+ (S_IWUSR|S_IWGRP),
+ NULL, remove_store);
+
+diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
+index 6233a7979a93..ac322d643c7a 100644
+--- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
++++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
+@@ -188,7 +188,7 @@ static const struct regmap_config phy_g12a_usb3_pcie_cr_regmap_conf = {
+ .reg_read = phy_g12a_usb3_pcie_cr_bus_read,
+ .reg_write = phy_g12a_usb3_pcie_cr_bus_write,
+ .max_register = 0xffff,
+- .fast_io = true,
++ .disable_locking = true,
+ };
+
+ static int phy_g12a_usb3_init(struct phy *phy)
+diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
+index 8dc5710d9c98..2926e4937301 100644
+--- a/drivers/phy/renesas/phy-rcar-gen2.c
++++ b/drivers/phy/renesas/phy-rcar-gen2.c
+@@ -391,6 +391,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
+ error = of_property_read_u32(np, "reg", &channel_num);
+ if (error || channel_num > 2) {
+ dev_err(dev, "Invalid \"reg\" property\n");
++ of_node_put(np);
+ return error;
+ }
+ channel->select_mask = select_mask[channel_num];
+@@ -406,6 +407,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
+ data->gen2_phy_ops);
+ if (IS_ERR(phy->phy)) {
+ dev_err(dev, "Failed to create PHY\n");
++ of_node_put(np);
+ return PTR_ERR(phy->phy);
+ }
+ phy_set_drvdata(phy->phy, phy);
+diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+index 1322185a00a2..8ffba67568ec 100644
+--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+@@ -13,6 +13,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/module.h>
++#include <linux/mutex.h>
+ #include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/of_device.h>
+@@ -106,6 +107,7 @@ struct rcar_gen3_chan {
+ struct rcar_gen3_phy rphys[NUM_OF_PHYS];
+ struct regulator *vbus;
+ struct work_struct work;
++ struct mutex lock; /* protects rphys[...].powered */
+ enum usb_dr_mode dr_mode;
+ bool extcon_host;
+ bool is_otg_channel;
+@@ -437,15 +439,16 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
+ struct rcar_gen3_chan *channel = rphy->ch;
+ void __iomem *usb2_base = channel->base;
+ u32 val;
+- int ret;
++ int ret = 0;
+
++ mutex_lock(&channel->lock);
+ if (!rcar_gen3_are_all_rphys_power_off(channel))
+- return 0;
++ goto out;
+
+ if (channel->vbus) {
+ ret = regulator_enable(channel->vbus);
+ if (ret)
+- return ret;
++ goto out;
+ }
+
+ val = readl(usb2_base + USB2_USBCTR);
+@@ -454,7 +457,10 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
+ val &= ~USB2_USBCTR_PLL_RST;
+ writel(val, usb2_base + USB2_USBCTR);
+
++out:
++ /* The powered flag should be set for any other phys anyway */
+ rphy->powered = true;
++ mutex_unlock(&channel->lock);
+
+ return 0;
+ }
+@@ -465,14 +471,18 @@ static int rcar_gen3_phy_usb2_power_off(struct phy *p)
+ struct rcar_gen3_chan *channel = rphy->ch;
+ int ret = 0;
+
++ mutex_lock(&channel->lock);
+ rphy->powered = false;
+
+ if (!rcar_gen3_are_all_rphys_power_off(channel))
+- return 0;
++ goto out;
+
+ if (channel->vbus)
+ ret = regulator_disable(channel->vbus);
+
++out:
++ mutex_unlock(&channel->lock);
++
+ return ret;
+ }
+
+@@ -639,6 +649,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
+ if (!phy_usb2_ops)
+ return -EINVAL;
+
++ mutex_init(&channel->lock);
+ for (i = 0; i < NUM_OF_PHYS; i++) {
+ channel->rphys[i].phy = devm_phy_create(dev, NULL,
+ phy_usb2_ops);
+diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
+index 807a3263d849..62a622159006 100644
+--- a/drivers/pinctrl/pinctrl-rockchip.c
++++ b/drivers/pinctrl/pinctrl-rockchip.c
+@@ -3204,6 +3204,7 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
+ base,
+ &rockchip_regmap_config);
+ }
++ of_node_put(node);
+ }
+
+ bank->irq = irq_of_parse_and_map(bank->of_node, 0);
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index 5d5cc6111081..7c2fd1d72e18 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -1317,7 +1317,7 @@ config HUAWEI_WMI
+
+ config PCENGINES_APU2
+ tristate "PC Engines APUv2/3 front button and LEDs driver"
+- depends on INPUT && INPUT_KEYBOARD
++ depends on INPUT && INPUT_KEYBOARD && GPIOLIB
+ depends on LEDS_CLASS
+ select GPIO_AMD_FCH
+ select KEYBOARD_GPIO_POLLED
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 9b18a184e0aa..abfa99d18fea 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -85,6 +85,7 @@ static bool ashs_present(void)
+ struct bios_args {
+ u32 arg0;
+ u32 arg1;
++ u32 arg2; /* At least TUF Gaming series uses 3 dword input buffer. */
+ } __packed;
+
+ /*
+@@ -211,11 +212,13 @@ static void asus_wmi_input_exit(struct asus_wmi *asus)
+ asus->inputdev = NULL;
+ }
+
+-int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval)
++static int asus_wmi_evaluate_method3(u32 method_id,
++ u32 arg0, u32 arg1, u32 arg2, u32 *retval)
+ {
+ struct bios_args args = {
+ .arg0 = arg0,
+ .arg1 = arg1,
++ .arg2 = arg2,
+ };
+ struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+@@ -247,6 +250,11 @@ exit:
+
+ return 0;
+ }
++
++int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval)
++{
++ return asus_wmi_evaluate_method3(method_id, arg0, arg1, 0, retval);
++}
+ EXPORT_SYMBOL_GPL(asus_wmi_evaluate_method);
+
+ static int asus_wmi_evaluate_method_agfn(const struct acpi_buffer args)
+diff --git a/drivers/regulator/88pm800-regulator.c b/drivers/regulator/88pm800-regulator.c
+new file mode 100644
+index 000000000000..69ae25886181
+--- /dev/null
++++ b/drivers/regulator/88pm800-regulator.c
+@@ -0,0 +1,286 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Regulators driver for Marvell 88PM800
++ *
++ * Copyright (C) 2012 Marvell International Ltd.
++ * Joseph(Yossi) Hanin <yhanin@marvell.com>
++ * Yi Zhang <yizhang@marvell.com>
++ */
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/init.h>
++#include <linux/err.h>
++#include <linux/regmap.h>
++#include <linux/regulator/driver.h>
++#include <linux/regulator/machine.h>
++#include <linux/mfd/88pm80x.h>
++#include <linux/delay.h>
++#include <linux/io.h>
++#include <linux/of.h>
++#include <linux/regulator/of_regulator.h>
++
++/* LDO1 with DVC[0..3] */
++#define PM800_LDO1_VOUT (0x08) /* VOUT1 */
++#define PM800_LDO1_VOUT_2 (0x09)
++#define PM800_LDO1_VOUT_3 (0x0A)
++#define PM800_LDO2_VOUT (0x0B)
++#define PM800_LDO3_VOUT (0x0C)
++#define PM800_LDO4_VOUT (0x0D)
++#define PM800_LDO5_VOUT (0x0E)
++#define PM800_LDO6_VOUT (0x0F)
++#define PM800_LDO7_VOUT (0x10)
++#define PM800_LDO8_VOUT (0x11)
++#define PM800_LDO9_VOUT (0x12)
++#define PM800_LDO10_VOUT (0x13)
++#define PM800_LDO11_VOUT (0x14)
++#define PM800_LDO12_VOUT (0x15)
++#define PM800_LDO13_VOUT (0x16)
++#define PM800_LDO14_VOUT (0x17)
++#define PM800_LDO15_VOUT (0x18)
++#define PM800_LDO16_VOUT (0x19)
++#define PM800_LDO17_VOUT (0x1A)
++#define PM800_LDO18_VOUT (0x1B)
++#define PM800_LDO19_VOUT (0x1C)
++
++/* BUCK1 with DVC[0..3] */
++#define PM800_BUCK1 (0x3C)
++#define PM800_BUCK1_1 (0x3D)
++#define PM800_BUCK1_2 (0x3E)
++#define PM800_BUCK1_3 (0x3F)
++#define PM800_BUCK2 (0x40)
++#define PM800_BUCK3 (0x41)
++#define PM800_BUCK4 (0x42)
++#define PM800_BUCK4_1 (0x43)
++#define PM800_BUCK4_2 (0x44)
++#define PM800_BUCK4_3 (0x45)
++#define PM800_BUCK5 (0x46)
++
++#define PM800_BUCK_ENA (0x50)
++#define PM800_LDO_ENA1_1 (0x51)
++#define PM800_LDO_ENA1_2 (0x52)
++#define PM800_LDO_ENA1_3 (0x53)
++
++#define PM800_LDO_ENA2_1 (0x56)
++#define PM800_LDO_ENA2_2 (0x57)
++#define PM800_LDO_ENA2_3 (0x58)
++
++#define PM800_BUCK1_MISC1 (0x78)
++#define PM800_BUCK3_MISC1 (0x7E)
++#define PM800_BUCK4_MISC1 (0x81)
++#define PM800_BUCK5_MISC1 (0x84)
++
++struct pm800_regulator_info {
++ struct regulator_desc desc;
++ int max_ua;
++};
++
++/*
++ * vreg - the buck regs string.
++ * ereg - the string for the enable register.
++ * ebit - the bit number in the enable register.
++ * amax - the current
++ * Buck has 2 kinds of voltage steps. It is easy to find voltage by ranges,
++ * not the constant voltage table.
++ * n_volt - Number of available selectors
++ */
++#define PM800_BUCK(match, vreg, ereg, ebit, amax, volt_ranges, n_volt) \
++{ \
++ .desc = { \
++ .name = #vreg, \
++ .of_match = of_match_ptr(#match), \
++ .regulators_node = of_match_ptr("regulators"), \
++ .ops = &pm800_volt_range_ops, \
++ .type = REGULATOR_VOLTAGE, \
++ .id = PM800_ID_##vreg, \
++ .owner = THIS_MODULE, \
++ .n_voltages = n_volt, \
++ .linear_ranges = volt_ranges, \
++ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
++ .vsel_reg = PM800_##vreg, \
++ .vsel_mask = 0x7f, \
++ .enable_reg = PM800_##ereg, \
++ .enable_mask = 1 << (ebit), \
++ }, \
++ .max_ua = (amax), \
++}
++
++/*
++ * vreg - the LDO regs string
++ * ereg - the string for the enable register.
++ * ebit - the bit number in the enable register.
++ * amax - the current
++ * volt_table - the LDO voltage table
++ * For all the LDOes, there are too many ranges. Using volt_table will be
++ * simpler and faster.
++ */
++#define PM800_LDO(match, vreg, ereg, ebit, amax, ldo_volt_table) \
++{ \
++ .desc = { \
++ .name = #vreg, \
++ .of_match = of_match_ptr(#match), \
++ .regulators_node = of_match_ptr("regulators"), \
++ .ops = &pm800_volt_table_ops, \
++ .type = REGULATOR_VOLTAGE, \
++ .id = PM800_ID_##vreg, \
++ .owner = THIS_MODULE, \
++ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
++ .vsel_reg = PM800_##vreg##_VOUT, \
++ .vsel_mask = 0xf, \
++ .enable_reg = PM800_##ereg, \
++ .enable_mask = 1 << (ebit), \
++ .volt_table = ldo_volt_table, \
++ }, \
++ .max_ua = (amax), \
++}
++
++/* Ranges are sorted in ascending order. */
++static const struct regulator_linear_range buck1_volt_range[] = {
++ REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500),
++ REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x54, 50000),
++};
++
++/* BUCK 2~5 have same ranges. */
++static const struct regulator_linear_range buck2_5_volt_range[] = {
++ REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500),
++ REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x72, 50000),
++};
++
++static const unsigned int ldo1_volt_table[] = {
++ 600000, 650000, 700000, 750000, 800000, 850000, 900000, 950000,
++ 1000000, 1050000, 1100000, 1150000, 1200000, 1300000, 1400000, 1500000,
++};
++
++static const unsigned int ldo2_volt_table[] = {
++ 1700000, 1800000, 1900000, 2000000, 2100000, 2500000, 2700000, 2800000,
++};
++
++/* LDO 3~17 have same voltage table. */
++static const unsigned int ldo3_17_volt_table[] = {
++ 1200000, 1250000, 1700000, 1800000, 1850000, 1900000, 2500000, 2600000,
++ 2700000, 2750000, 2800000, 2850000, 2900000, 3000000, 3100000, 3300000,
++};
++
++/* LDO 18~19 have same voltage table. */
++static const unsigned int ldo18_19_volt_table[] = {
++ 1700000, 1800000, 1900000, 2500000, 2800000, 2900000, 3100000, 3300000,
++};
++
++static int pm800_get_current_limit(struct regulator_dev *rdev)
++{
++ struct pm800_regulator_info *info = rdev_get_drvdata(rdev);
++
++ return info->max_ua;
++}
++
++static const struct regulator_ops pm800_volt_range_ops = {
++ .list_voltage = regulator_list_voltage_linear_range,
++ .map_voltage = regulator_map_voltage_linear_range,
++ .set_voltage_sel = regulator_set_voltage_sel_regmap,
++ .get_voltage_sel = regulator_get_voltage_sel_regmap,
++ .enable = regulator_enable_regmap,
++ .disable = regulator_disable_regmap,
++ .is_enabled = regulator_is_enabled_regmap,
++ .get_current_limit = pm800_get_current_limit,
++};
++
++static const struct regulator_ops pm800_volt_table_ops = {
++ .list_voltage = regulator_list_voltage_table,
++ .map_voltage = regulator_map_voltage_iterate,
++ .set_voltage_sel = regulator_set_voltage_sel_regmap,
++ .get_voltage_sel = regulator_get_voltage_sel_regmap,
++ .enable = regulator_enable_regmap,
++ .disable = regulator_disable_regmap,
++ .is_enabled = regulator_is_enabled_regmap,
++ .get_current_limit = pm800_get_current_limit,
++};
++
++/* The array is indexed by id(PM800_ID_XXX) */
++static struct pm800_regulator_info pm800_regulator_info[] = {
++ PM800_BUCK(buck1, BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55),
++ PM800_BUCK(buck2, BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73),
++ PM800_BUCK(buck3, BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73),
++ PM800_BUCK(buck4, BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73),
++ PM800_BUCK(buck5, BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73),
++
++ PM800_LDO(ldo1, LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table),
++ PM800_LDO(ldo2, LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table),
++ PM800_LDO(ldo3, LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo4, LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo5, LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo6, LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo7, LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo8, LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo9, LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo10, LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo11, LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo12, LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo13, LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo14, LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo15, LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo16, LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo17, LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table),
++ PM800_LDO(ldo18, LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table),
++ PM800_LDO(ldo19, LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table),
++};
++
++static int pm800_regulator_probe(struct platform_device *pdev)
++{
++ struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
++ struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent);
++ struct regulator_config config = { };
++ struct regulator_init_data *init_data;
++ int i, ret;
++
++ if (pdata && pdata->num_regulators) {
++ unsigned int count = 0;
++
++ /* Check whether num_regulator is valid. */
++ for (i = 0; i < ARRAY_SIZE(pdata->regulators); i++) {
++ if (pdata->regulators[i])
++ count++;
++ }
++ if (count != pdata->num_regulators)
++ return -EINVAL;
++ }
++
++ config.dev = chip->dev;
++ config.regmap = chip->subchip->regmap_power;
++ for (i = 0; i < PM800_ID_RG_MAX; i++) {
++ struct regulator_dev *regulator;
++
++ if (pdata && pdata->num_regulators) {
++ init_data = pdata->regulators[i];
++ if (!init_data)
++ continue;
++
++ config.init_data = init_data;
++ }
++
++ config.driver_data = &pm800_regulator_info[i];
++
++ regulator = devm_regulator_register(&pdev->dev,
++ &pm800_regulator_info[i].desc, &config);
++ if (IS_ERR(regulator)) {
++ ret = PTR_ERR(regulator);
++ dev_err(&pdev->dev, "Failed to register %s\n",
++ pm800_regulator_info[i].desc.name);
++ return ret;
++ }
++ }
++
++ return 0;
++}
++
++static struct platform_driver pm800_regulator_driver = {
++ .driver = {
++ .name = "88pm80x-regulator",
++ },
++ .probe = pm800_regulator_probe,
++};
++
++module_platform_driver(pm800_regulator_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Joseph(Yossi) Hanin <yhanin@marvell.com>");
++MODULE_DESCRIPTION("Regulator Driver for Marvell 88PM800 PMIC");
++MODULE_ALIAS("platform:88pm800-regulator");
+diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
+deleted file mode 100644
+index 69ae25886181..000000000000
+--- a/drivers/regulator/88pm800.c
++++ /dev/null
+@@ -1,286 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * Regulators driver for Marvell 88PM800
+- *
+- * Copyright (C) 2012 Marvell International Ltd.
+- * Joseph(Yossi) Hanin <yhanin@marvell.com>
+- * Yi Zhang <yizhang@marvell.com>
+- */
+-#include <linux/module.h>
+-#include <linux/moduleparam.h>
+-#include <linux/init.h>
+-#include <linux/err.h>
+-#include <linux/regmap.h>
+-#include <linux/regulator/driver.h>
+-#include <linux/regulator/machine.h>
+-#include <linux/mfd/88pm80x.h>
+-#include <linux/delay.h>
+-#include <linux/io.h>
+-#include <linux/of.h>
+-#include <linux/regulator/of_regulator.h>
+-
+-/* LDO1 with DVC[0..3] */
+-#define PM800_LDO1_VOUT (0x08) /* VOUT1 */
+-#define PM800_LDO1_VOUT_2 (0x09)
+-#define PM800_LDO1_VOUT_3 (0x0A)
+-#define PM800_LDO2_VOUT (0x0B)
+-#define PM800_LDO3_VOUT (0x0C)
+-#define PM800_LDO4_VOUT (0x0D)
+-#define PM800_LDO5_VOUT (0x0E)
+-#define PM800_LDO6_VOUT (0x0F)
+-#define PM800_LDO7_VOUT (0x10)
+-#define PM800_LDO8_VOUT (0x11)
+-#define PM800_LDO9_VOUT (0x12)
+-#define PM800_LDO10_VOUT (0x13)
+-#define PM800_LDO11_VOUT (0x14)
+-#define PM800_LDO12_VOUT (0x15)
+-#define PM800_LDO13_VOUT (0x16)
+-#define PM800_LDO14_VOUT (0x17)
+-#define PM800_LDO15_VOUT (0x18)
+-#define PM800_LDO16_VOUT (0x19)
+-#define PM800_LDO17_VOUT (0x1A)
+-#define PM800_LDO18_VOUT (0x1B)
+-#define PM800_LDO19_VOUT (0x1C)
+-
+-/* BUCK1 with DVC[0..3] */
+-#define PM800_BUCK1 (0x3C)
+-#define PM800_BUCK1_1 (0x3D)
+-#define PM800_BUCK1_2 (0x3E)
+-#define PM800_BUCK1_3 (0x3F)
+-#define PM800_BUCK2 (0x40)
+-#define PM800_BUCK3 (0x41)
+-#define PM800_BUCK4 (0x42)
+-#define PM800_BUCK4_1 (0x43)
+-#define PM800_BUCK4_2 (0x44)
+-#define PM800_BUCK4_3 (0x45)
+-#define PM800_BUCK5 (0x46)
+-
+-#define PM800_BUCK_ENA (0x50)
+-#define PM800_LDO_ENA1_1 (0x51)
+-#define PM800_LDO_ENA1_2 (0x52)
+-#define PM800_LDO_ENA1_3 (0x53)
+-
+-#define PM800_LDO_ENA2_1 (0x56)
+-#define PM800_LDO_ENA2_2 (0x57)
+-#define PM800_LDO_ENA2_3 (0x58)
+-
+-#define PM800_BUCK1_MISC1 (0x78)
+-#define PM800_BUCK3_MISC1 (0x7E)
+-#define PM800_BUCK4_MISC1 (0x81)
+-#define PM800_BUCK5_MISC1 (0x84)
+-
+-struct pm800_regulator_info {
+- struct regulator_desc desc;
+- int max_ua;
+-};
+-
+-/*
+- * vreg - the buck regs string.
+- * ereg - the string for the enable register.
+- * ebit - the bit number in the enable register.
+- * amax - the current
+- * Buck has 2 kinds of voltage steps. It is easy to find voltage by ranges,
+- * not the constant voltage table.
+- * n_volt - Number of available selectors
+- */
+-#define PM800_BUCK(match, vreg, ereg, ebit, amax, volt_ranges, n_volt) \
+-{ \
+- .desc = { \
+- .name = #vreg, \
+- .of_match = of_match_ptr(#match), \
+- .regulators_node = of_match_ptr("regulators"), \
+- .ops = &pm800_volt_range_ops, \
+- .type = REGULATOR_VOLTAGE, \
+- .id = PM800_ID_##vreg, \
+- .owner = THIS_MODULE, \
+- .n_voltages = n_volt, \
+- .linear_ranges = volt_ranges, \
+- .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
+- .vsel_reg = PM800_##vreg, \
+- .vsel_mask = 0x7f, \
+- .enable_reg = PM800_##ereg, \
+- .enable_mask = 1 << (ebit), \
+- }, \
+- .max_ua = (amax), \
+-}
+-
+-/*
+- * vreg - the LDO regs string
+- * ereg - the string for the enable register.
+- * ebit - the bit number in the enable register.
+- * amax - the current
+- * volt_table - the LDO voltage table
+- * For all the LDOes, there are too many ranges. Using volt_table will be
+- * simpler and faster.
+- */
+-#define PM800_LDO(match, vreg, ereg, ebit, amax, ldo_volt_table) \
+-{ \
+- .desc = { \
+- .name = #vreg, \
+- .of_match = of_match_ptr(#match), \
+- .regulators_node = of_match_ptr("regulators"), \
+- .ops = &pm800_volt_table_ops, \
+- .type = REGULATOR_VOLTAGE, \
+- .id = PM800_ID_##vreg, \
+- .owner = THIS_MODULE, \
+- .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+- .vsel_reg = PM800_##vreg##_VOUT, \
+- .vsel_mask = 0xf, \
+- .enable_reg = PM800_##ereg, \
+- .enable_mask = 1 << (ebit), \
+- .volt_table = ldo_volt_table, \
+- }, \
+- .max_ua = (amax), \
+-}
+-
+-/* Ranges are sorted in ascending order. */
+-static const struct regulator_linear_range buck1_volt_range[] = {
+- REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500),
+- REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x54, 50000),
+-};
+-
+-/* BUCK 2~5 have same ranges. */
+-static const struct regulator_linear_range buck2_5_volt_range[] = {
+- REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500),
+- REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x72, 50000),
+-};
+-
+-static const unsigned int ldo1_volt_table[] = {
+- 600000, 650000, 700000, 750000, 800000, 850000, 900000, 950000,
+- 1000000, 1050000, 1100000, 1150000, 1200000, 1300000, 1400000, 1500000,
+-};
+-
+-static const unsigned int ldo2_volt_table[] = {
+- 1700000, 1800000, 1900000, 2000000, 2100000, 2500000, 2700000, 2800000,
+-};
+-
+-/* LDO 3~17 have same voltage table. */
+-static const unsigned int ldo3_17_volt_table[] = {
+- 1200000, 1250000, 1700000, 1800000, 1850000, 1900000, 2500000, 2600000,
+- 2700000, 2750000, 2800000, 2850000, 2900000, 3000000, 3100000, 3300000,
+-};
+-
+-/* LDO 18~19 have same voltage table. */
+-static const unsigned int ldo18_19_volt_table[] = {
+- 1700000, 1800000, 1900000, 2500000, 2800000, 2900000, 3100000, 3300000,
+-};
+-
+-static int pm800_get_current_limit(struct regulator_dev *rdev)
+-{
+- struct pm800_regulator_info *info = rdev_get_drvdata(rdev);
+-
+- return info->max_ua;
+-}
+-
+-static const struct regulator_ops pm800_volt_range_ops = {
+- .list_voltage = regulator_list_voltage_linear_range,
+- .map_voltage = regulator_map_voltage_linear_range,
+- .set_voltage_sel = regulator_set_voltage_sel_regmap,
+- .get_voltage_sel = regulator_get_voltage_sel_regmap,
+- .enable = regulator_enable_regmap,
+- .disable = regulator_disable_regmap,
+- .is_enabled = regulator_is_enabled_regmap,
+- .get_current_limit = pm800_get_current_limit,
+-};
+-
+-static const struct regulator_ops pm800_volt_table_ops = {
+- .list_voltage = regulator_list_voltage_table,
+- .map_voltage = regulator_map_voltage_iterate,
+- .set_voltage_sel = regulator_set_voltage_sel_regmap,
+- .get_voltage_sel = regulator_get_voltage_sel_regmap,
+- .enable = regulator_enable_regmap,
+- .disable = regulator_disable_regmap,
+- .is_enabled = regulator_is_enabled_regmap,
+- .get_current_limit = pm800_get_current_limit,
+-};
+-
+-/* The array is indexed by id(PM800_ID_XXX) */
+-static struct pm800_regulator_info pm800_regulator_info[] = {
+- PM800_BUCK(buck1, BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55),
+- PM800_BUCK(buck2, BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73),
+- PM800_BUCK(buck3, BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73),
+- PM800_BUCK(buck4, BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73),
+- PM800_BUCK(buck5, BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73),
+-
+- PM800_LDO(ldo1, LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table),
+- PM800_LDO(ldo2, LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table),
+- PM800_LDO(ldo3, LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo4, LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo5, LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo6, LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo7, LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo8, LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo9, LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo10, LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo11, LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo12, LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo13, LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo14, LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo15, LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo16, LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo17, LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table),
+- PM800_LDO(ldo18, LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table),
+- PM800_LDO(ldo19, LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table),
+-};
+-
+-static int pm800_regulator_probe(struct platform_device *pdev)
+-{
+- struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+- struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent);
+- struct regulator_config config = { };
+- struct regulator_init_data *init_data;
+- int i, ret;
+-
+- if (pdata && pdata->num_regulators) {
+- unsigned int count = 0;
+-
+- /* Check whether num_regulator is valid. */
+- for (i = 0; i < ARRAY_SIZE(pdata->regulators); i++) {
+- if (pdata->regulators[i])
+- count++;
+- }
+- if (count != pdata->num_regulators)
+- return -EINVAL;
+- }
+-
+- config.dev = chip->dev;
+- config.regmap = chip->subchip->regmap_power;
+- for (i = 0; i < PM800_ID_RG_MAX; i++) {
+- struct regulator_dev *regulator;
+-
+- if (pdata && pdata->num_regulators) {
+- init_data = pdata->regulators[i];
+- if (!init_data)
+- continue;
+-
+- config.init_data = init_data;
+- }
+-
+- config.driver_data = &pm800_regulator_info[i];
+-
+- regulator = devm_regulator_register(&pdev->dev,
+- &pm800_regulator_info[i].desc, &config);
+- if (IS_ERR(regulator)) {
+- ret = PTR_ERR(regulator);
+- dev_err(&pdev->dev, "Failed to register %s\n",
+- pm800_regulator_info[i].desc.name);
+- return ret;
+- }
+- }
+-
+- return 0;
+-}
+-
+-static struct platform_driver pm800_regulator_driver = {
+- .driver = {
+- .name = "88pm80x-regulator",
+- },
+- .probe = pm800_regulator_probe,
+-};
+-
+-module_platform_driver(pm800_regulator_driver);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_AUTHOR("Joseph(Yossi) Hanin <yhanin@marvell.com>");
+-MODULE_DESCRIPTION("Regulator Driver for Marvell 88PM800 PMIC");
+-MODULE_ALIAS("platform:88pm800-regulator");
+diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
+index 93f53840e8f1..486edf784c13 100644
+--- a/drivers/regulator/Makefile
++++ b/drivers/regulator/Makefile
+@@ -11,7 +11,7 @@ obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
+ obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
+
+ obj-$(CONFIG_REGULATOR_88PG86X) += 88pg86x.o
+-obj-$(CONFIG_REGULATOR_88PM800) += 88pm800.o
++obj-$(CONFIG_REGULATOR_88PM800) += 88pm800-regulator.o
+ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
+ obj-$(CONFIG_REGULATOR_CPCAP) += cpcap-regulator.o
+ obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
+diff --git a/drivers/staging/kpc2000/TODO b/drivers/staging/kpc2000/TODO
+index 8c7af29fefae..ed951acc829a 100644
+--- a/drivers/staging/kpc2000/TODO
++++ b/drivers/staging/kpc2000/TODO
+@@ -1,7 +1,6 @@
+ - the kpc_spi driver doesn't seem to let multiple transactions (to different instances of the core) happen in parallel...
+ - The kpc_i2c driver is a hot mess, it should probably be cleaned up a ton. It functions against current hardware though.
+ - pcard->card_num in kp2000_pcie_probe() is a global variable and needs atomic / locking / something better.
+-- probe_core_uio() probably needs error handling
+ - the loop in kp2000_probe_cores() that uses probe_core_uio() also probably needs error handling
+ - would be nice if the AIO fileops in kpc_dma could be made to work
+ - probably want to add a CONFIG_ option to control compilation of the AIO functions
+diff --git a/drivers/staging/kpc2000/kpc2000/cell_probe.c b/drivers/staging/kpc2000/kpc2000/cell_probe.c
+index e0dba91e7fa8..d6b57f550876 100644
+--- a/drivers/staging/kpc2000/kpc2000/cell_probe.c
++++ b/drivers/staging/kpc2000/kpc2000/cell_probe.c
+@@ -295,6 +295,7 @@ int probe_core_uio(unsigned int core_num, struct kp2000_device *pcard, char *na
+ kudev->dev = device_create(kpc_uio_class, &pcard->pdev->dev, MKDEV(0,0), kudev, "%s.%d.%d.%d", kudev->uioinfo.name, pcard->card_num, cte.type, kudev->core_num);
+ if (IS_ERR(kudev->dev)) {
+ dev_err(&pcard->pdev->dev, "probe_core_uio device_create failed!\n");
++ kfree(kudev);
+ return -ENODEV;
+ }
+ dev_set_drvdata(kudev->dev, kudev);
+@@ -302,6 +303,8 @@ int probe_core_uio(unsigned int core_num, struct kp2000_device *pcard, char *na
+ rv = uio_register_device(kudev->dev, &kudev->uioinfo);
+ if (rv){
+ dev_err(&pcard->pdev->dev, "probe_core_uio failed uio_register_device: %d\n", rv);
++ put_device(kudev->dev);
++ kfree(kudev);
+ return rv;
+ }
+
+diff --git a/drivers/staging/kpc2000/kpc_spi/spi_driver.c b/drivers/staging/kpc2000/kpc_spi/spi_driver.c
+index 86df16547a92..2f535022dc03 100644
+--- a/drivers/staging/kpc2000/kpc_spi/spi_driver.c
++++ b/drivers/staging/kpc2000/kpc_spi/spi_driver.c
+@@ -333,7 +333,7 @@ kp_spi_transfer_one_message(struct spi_master *master, struct spi_message *m)
+ list_for_each_entry(transfer, &m->transfers, transfer_list) {
+ if (transfer->tx_buf == NULL && transfer->rx_buf == NULL && transfer->len) {
+ status = -EINVAL;
+- break;
++ goto error;
+ }
+
+ /* transfer */
+@@ -371,7 +371,7 @@ kp_spi_transfer_one_message(struct spi_master *master, struct spi_message *m)
+
+ if (count != transfer->len) {
+ status = -EIO;
+- break;
++ goto error;
+ }
+ }
+
+@@ -389,6 +389,10 @@ kp_spi_transfer_one_message(struct spi_master *master, struct spi_message *m)
+ /* done work */
+ spi_finalize_current_message(master);
+ return 0;
++
++ error:
++ m->status = status;
++ return status;
+ }
+
+ static void
+diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
+index ccafcc2c87ac..70433f756d8e 100644
+--- a/drivers/staging/vt6656/main_usb.c
++++ b/drivers/staging/vt6656/main_usb.c
+@@ -402,16 +402,19 @@ static void vnt_free_int_bufs(struct vnt_private *priv)
+ kfree(priv->int_buf.data_buf);
+ }
+
+-static bool vnt_alloc_bufs(struct vnt_private *priv)
++static int vnt_alloc_bufs(struct vnt_private *priv)
+ {
++ int ret = 0;
+ struct vnt_usb_send_context *tx_context;
+ struct vnt_rcb *rcb;
+ int ii;
+
+ for (ii = 0; ii < priv->num_tx_context; ii++) {
+ tx_context = kmalloc(sizeof(*tx_context), GFP_KERNEL);
+- if (!tx_context)
++ if (!tx_context) {
++ ret = -ENOMEM;
+ goto free_tx;
++ }
+
+ priv->tx_context[ii] = tx_context;
+ tx_context->priv = priv;
+@@ -419,16 +422,20 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
+
+ /* allocate URBs */
+ tx_context->urb = usb_alloc_urb(0, GFP_KERNEL);
+- if (!tx_context->urb)
++ if (!tx_context->urb) {
++ ret = -ENOMEM;
+ goto free_tx;
++ }
+
+ tx_context->in_use = false;
+ }
+
+ for (ii = 0; ii < priv->num_rcb; ii++) {
+ priv->rcb[ii] = kzalloc(sizeof(*priv->rcb[ii]), GFP_KERNEL);
+- if (!priv->rcb[ii])
++ if (!priv->rcb[ii]) {
++ ret = -ENOMEM;
+ goto free_rx_tx;
++ }
+
+ rcb = priv->rcb[ii];
+
+@@ -436,39 +443,46 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
+
+ /* allocate URBs */
+ rcb->urb = usb_alloc_urb(0, GFP_KERNEL);
+- if (!rcb->urb)
++ if (!rcb->urb) {
++ ret = -ENOMEM;
+ goto free_rx_tx;
++ }
+
+ rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
+- if (!rcb->skb)
++ if (!rcb->skb) {
++ ret = -ENOMEM;
+ goto free_rx_tx;
++ }
+
+ rcb->in_use = false;
+
+ /* submit rx urb */
+- if (vnt_submit_rx_urb(priv, rcb))
++ ret = vnt_submit_rx_urb(priv, rcb);
++ if (ret)
+ goto free_rx_tx;
+ }
+
+ priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL);
+- if (!priv->interrupt_urb)
++ if (!priv->interrupt_urb) {
++ ret = -ENOMEM;
+ goto free_rx_tx;
++ }
+
+ priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
+ if (!priv->int_buf.data_buf) {
+- usb_free_urb(priv->interrupt_urb);
+- goto free_rx_tx;
++ ret = -ENOMEM;
++ goto free_rx_tx_urb;
+ }
+
+- return true;
++ return 0;
+
++free_rx_tx_urb:
++ usb_free_urb(priv->interrupt_urb);
+ free_rx_tx:
+ vnt_free_rx_bufs(priv);
+-
+ free_tx:
+ vnt_free_tx_bufs(priv);
+-
+- return false;
++ return ret;
+ }
+
+ static void vnt_tx_80211(struct ieee80211_hw *hw,
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 682300713be4..eb2e2d141c01 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1874,7 +1874,8 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+ status = serial8250_rx_chars(up, status);
+ }
+ serial8250_modem_status(up);
+- if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE))
++ if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE) &&
++ (up->ier & UART_IER_THRI))
+ serial8250_tx_chars(up);
+
+ uart_unlock_and_check_sysrq(port, flags);
+diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+index b929c7ae3a27..7bab9a3eda92 100644
+--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
++++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+@@ -407,7 +407,16 @@ static int cpm_uart_startup(struct uart_port *port)
+ clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX);
+ }
+ cpm_uart_initbd(pinfo);
+- cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
++ if (IS_SMC(pinfo)) {
++ out_be32(&pinfo->smcup->smc_rstate, 0);
++ out_be32(&pinfo->smcup->smc_tstate, 0);
++ out_be16(&pinfo->smcup->smc_rbptr,
++ in_be16(&pinfo->smcup->smc_rbase));
++ out_be16(&pinfo->smcup->smc_tbptr,
++ in_be16(&pinfo->smcup->smc_tbase));
++ } else {
++ cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
++ }
+ }
+ /* Install interrupt handler. */
+ retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port);
+@@ -861,16 +870,14 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
+ (u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE);
+
+ /*
+- * In case SMC1 is being relocated...
++ * In case SMC is being relocated...
+ */
+-#if defined (CONFIG_I2C_SPI_SMC1_UCODE_PATCH)
+ out_be16(&up->smc_rbptr, in_be16(&pinfo->smcup->smc_rbase));
+ out_be16(&up->smc_tbptr, in_be16(&pinfo->smcup->smc_tbase));
+ out_be32(&up->smc_rstate, 0);
+ out_be32(&up->smc_tstate, 0);
+ out_be16(&up->smc_brkcr, 1); /* number of break chars */
+ out_be16(&up->smc_brkec, 0);
+-#endif
+
+ /* Set up the uart parameters in the
+ * parameter ram.
+@@ -884,8 +891,6 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
+ out_be16(&up->smc_brkec, 0);
+ out_be16(&up->smc_brkcr, 1);
+
+- cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
+-
+ /* Set UART mode, 8 bit, no parity, one stop.
+ * Enable receive and transmit.
+ */
+diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
+index f460cca139e2..13ac36e2da4f 100644
+--- a/drivers/tty/serial/digicolor-usart.c
++++ b/drivers/tty/serial/digicolor-usart.c
+@@ -541,7 +541,11 @@ static int __init digicolor_uart_init(void)
+ if (ret)
+ return ret;
+
+- return platform_driver_register(&digicolor_uart_platform);
++ ret = platform_driver_register(&digicolor_uart_platform);
++ if (ret)
++ uart_unregister_driver(&digicolor_uart);
++
++ return ret;
+ }
+ module_init(digicolor_uart_init);
+
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 8b752e895053..10db3e54ac9e 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -383,6 +383,7 @@ static void imx_uart_ucrs_restore(struct imx_port *sport,
+ }
+ #endif
+
++/* called with port.lock taken and irqs caller dependent */
+ static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2)
+ {
+ *ucr2 &= ~(UCR2_CTSC | UCR2_CTS);
+@@ -391,6 +392,7 @@ static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2)
+ mctrl_gpio_set(sport->gpios, sport->port.mctrl);
+ }
+
++/* called with port.lock taken and irqs caller dependent */
+ static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2)
+ {
+ *ucr2 &= ~UCR2_CTSC;
+@@ -400,6 +402,7 @@ static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2)
+ mctrl_gpio_set(sport->gpios, sport->port.mctrl);
+ }
+
++/* called with port.lock taken and irqs caller dependent */
+ static void imx_uart_rts_auto(struct imx_port *sport, u32 *ucr2)
+ {
+ *ucr2 |= UCR2_CTSC;
+@@ -1549,6 +1552,16 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
+ old_csize = CS8;
+ }
+
++ del_timer_sync(&sport->timer);
++
++ /*
++ * Ask the core to calculate the divisor for us.
++ */
++ baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
++ quot = uart_get_divisor(port, baud);
++
++ spin_lock_irqsave(&sport->port.lock, flags);
++
+ if ((termios->c_cflag & CSIZE) == CS8)
+ ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
+ else
+@@ -1592,16 +1605,6 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
+ ucr2 |= UCR2_PROE;
+ }
+
+- del_timer_sync(&sport->timer);
+-
+- /*
+- * Ask the core to calculate the divisor for us.
+- */
+- baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
+- quot = uart_get_divisor(port, baud);
+-
+- spin_lock_irqsave(&sport->port.lock, flags);
+-
+ sport->port.read_status_mask = 0;
+ if (termios->c_iflag & INPCK)
+ sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR);
+diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
+index e5aebbf5f302..c3afd128b8fc 100644
+--- a/drivers/tty/serial/max310x.c
++++ b/drivers/tty/serial/max310x.c
+@@ -496,37 +496,48 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
+
+ static int max310x_set_baud(struct uart_port *port, int baud)
+ {
+- unsigned int mode = 0, clk = port->uartclk, div = clk / baud;
++ unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0;
+
+- /* Check for minimal value for divider */
+- if (div < 16)
+- div = 16;
+-
+- if (clk % baud && (div / 16) < 0x8000) {
++ /*
++ * Calculate the integer divisor first. Select a proper mode
++ * in case if the requested baud is too high for the pre-defined
++ * clocks frequency.
++ */
++ div = port->uartclk / baud;
++ if (div < 8) {
++ /* Mode x4 */
++ c = 4;
++ mode = MAX310X_BRGCFG_4XMODE_BIT;
++ } else if (div < 16) {
+ /* Mode x2 */
++ c = 8;
+ mode = MAX310X_BRGCFG_2XMODE_BIT;
+- clk = port->uartclk * 2;
+- div = clk / baud;
+-
+- if (clk % baud && (div / 16) < 0x8000) {
+- /* Mode x4 */
+- mode = MAX310X_BRGCFG_4XMODE_BIT;
+- clk = port->uartclk * 4;
+- div = clk / baud;
+- }
++ } else {
++ c = 16;
+ }
+
+- max310x_port_write(port, MAX310X_BRGDIVMSB_REG, (div / 16) >> 8);
+- max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div / 16);
+- max310x_port_write(port, MAX310X_BRGCFG_REG, (div % 16) | mode);
++ /* Calculate the divisor in accordance with the fraction coefficient */
++ div /= c;
++ F = c*baud;
++
++ /* Calculate the baud rate fraction */
++ if (div > 0)
++ frac = (16*(port->uartclk % F)) / F;
++ else
++ div = 1;
++
++ max310x_port_write(port, MAX310X_BRGDIVMSB_REG, div >> 8);
++ max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div);
++ max310x_port_write(port, MAX310X_BRGCFG_REG, frac | mode);
+
+- return DIV_ROUND_CLOSEST(clk, div);
++ /* Return the actual baud rate we just programmed */
++ return (16*port->uartclk) / (c*(16*div + frac));
+ }
+
+ static int max310x_update_best_err(unsigned long f, long *besterr)
+ {
+ /* Use baudrate 115200 for calculate error */
+- long err = f % (115200 * 16);
++ long err = f % (460800 * 16);
+
+ if ((*besterr < 0) || (*besterr > err)) {
+ *besterr = err;
+diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
+index 23833ad952ba..3657a24913fc 100644
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -383,10 +383,14 @@ no_rx:
+
+ static inline void msm_wait_for_xmitr(struct uart_port *port)
+ {
++ unsigned int timeout = 500000;
++
+ while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
+ if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
+ break;
+ udelay(1);
++ if (!timeout--)
++ break;
+ }
+ msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
+ }
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 83f4dd0bfd74..4223cb496764 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1777,6 +1777,7 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
+ {
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport;
++ int ret;
+
+ uport = uart_port_check(state);
+ if (!uport || uport->flags & UPF_DEAD)
+@@ -1787,7 +1788,11 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
+ /*
+ * Start up the serial port.
+ */
+- return uart_startup(tty, state, 0);
++ ret = uart_startup(tty, state, 0);
++ if (ret > 0)
++ tty_port_set_active(port, 1);
++
++ return ret;
+ }
+
+ static const char *uart_type(struct uart_port *port)
+diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
+index 39ed56214cd3..2b400189be91 100644
+--- a/drivers/tty/serial/serial_mctrl_gpio.c
++++ b/drivers/tty/serial/serial_mctrl_gpio.c
+@@ -12,6 +12,7 @@
+ #include <linux/termios.h>
+ #include <linux/serial_core.h>
+ #include <linux/module.h>
++#include <linux/property.h>
+
+ #include "serial_mctrl_gpio.h"
+
+@@ -116,6 +117,19 @@ struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx)
+
+ for (i = 0; i < UART_GPIO_MAX; i++) {
+ enum gpiod_flags flags;
++ char *gpio_str;
++ bool present;
++
++ /* Check if GPIO property exists and continue if not */
++ gpio_str = kasprintf(GFP_KERNEL, "%s-gpios",
++ mctrl_gpios_desc[i].name);
++ if (!gpio_str)
++ continue;
++
++ present = device_property_present(dev, gpio_str);
++ kfree(gpio_str);
++ if (!present)
++ continue;
+
+ if (mctrl_gpios_desc[i].dir_out)
+ flags = GPIOD_OUT_LOW;
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index abc705716aa0..d18c680aa64b 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -1398,6 +1398,7 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned long flags;
+ dma_addr_t buf;
++ int head, tail;
+
+ /*
+ * DMA is idle now.
+@@ -1407,16 +1408,23 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
+ * consistent xmit buffer state.
+ */
+ spin_lock_irq(&port->lock);
+- buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1));
++ head = xmit->head;
++ tail = xmit->tail;
++ buf = s->tx_dma_addr + (tail & (UART_XMIT_SIZE - 1));
+ s->tx_dma_len = min_t(unsigned int,
+- CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
+- CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
+- spin_unlock_irq(&port->lock);
++ CIRC_CNT(head, tail, UART_XMIT_SIZE),
++ CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE));
++ if (!s->tx_dma_len) {
++ /* Transmit buffer has been flushed */
++ spin_unlock_irq(&port->lock);
++ return;
++ }
+
+ desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
++ spin_unlock_irq(&port->lock);
+ dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
+ goto switch_to_pio;
+ }
+@@ -1424,18 +1432,18 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
+ dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
+ DMA_TO_DEVICE);
+
+- spin_lock_irq(&port->lock);
+ desc->callback = sci_dma_tx_complete;
+ desc->callback_param = s;
+- spin_unlock_irq(&port->lock);
+ s->cookie_tx = dmaengine_submit(desc);
+ if (dma_submit_error(s->cookie_tx)) {
++ spin_unlock_irq(&port->lock);
+ dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
+ goto switch_to_pio;
+ }
+
++ spin_unlock_irq(&port->lock);
+ dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
+- __func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
++ __func__, xmit->buf, tail, head, s->cookie_tx);
+
+ dma_async_issue_pending(chan);
+ return;
+@@ -1648,11 +1656,18 @@ static void sci_free_dma(struct uart_port *port)
+
+ static void sci_flush_buffer(struct uart_port *port)
+ {
++ struct sci_port *s = to_sci_port(port);
++
+ /*
+ * In uart_flush_buffer(), the xmit circular buffer has just been
+- * cleared, so we have to reset tx_dma_len accordingly.
++ * cleared, so we have to reset tx_dma_len accordingly, and stop any
++ * pending transfers
+ */
+- to_sci_port(port)->tx_dma_len = 0;
++ s->tx_dma_len = 0;
++ if (s->chan_tx) {
++ dmaengine_terminate_async(s->chan_tx);
++ s->cookie_tx = -EINVAL;
++ }
+ }
+ #else /* !CONFIG_SERIAL_SH_SCI_DMA */
+ static inline void sci_request_dma(struct uart_port *port)
+diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
+index 63e34d868de8..f8503f8fc44e 100644
+--- a/drivers/tty/serial/sunhv.c
++++ b/drivers/tty/serial/sunhv.c
+@@ -397,7 +397,7 @@ static const struct uart_ops sunhv_pops = {
+ static struct uart_driver sunhv_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "sunhv",
+- .dev_name = "ttyS",
++ .dev_name = "ttyHV",
+ .major = TTY_MAJOR,
+ };
+
+diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
+index 605354fd60b1..9dcc4d855ddd 100644
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -29,12 +29,12 @@
+
+ #define CDNS_UART_TTY_NAME "ttyPS"
+ #define CDNS_UART_NAME "xuartps"
+-#define CDNS_UART_MAJOR 0 /* use dynamic node allocation */
+ #define CDNS_UART_FIFO_SIZE 64 /* FIFO size */
+ #define CDNS_UART_REGISTER_SPACE 0x1000
+
+ /* Rx Trigger level */
+ static int rx_trigger_level = 56;
++static int uartps_major;
+ module_param(rx_trigger_level, uint, S_IRUGO);
+ MODULE_PARM_DESC(rx_trigger_level, "Rx trigger level, 1-63 bytes");
+
+@@ -1517,7 +1517,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
+ cdns_uart_uart_driver->owner = THIS_MODULE;
+ cdns_uart_uart_driver->driver_name = driver_name;
+ cdns_uart_uart_driver->dev_name = CDNS_UART_TTY_NAME;
+- cdns_uart_uart_driver->major = CDNS_UART_MAJOR;
++ cdns_uart_uart_driver->major = uartps_major;
+ cdns_uart_uart_driver->minor = cdns_uart_data->id;
+ cdns_uart_uart_driver->nr = 1;
+
+@@ -1546,6 +1546,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
+ goto err_out_id;
+ }
+
++ uartps_major = cdns_uart_uart_driver->tty_driver->major;
+ cdns_uart_data->cdns_uart_driver = cdns_uart_uart_driver;
+
+ /*
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 2c8e60c7dbd8..2844366dc173 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4002,6 +4002,9 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
+ * control transfers to set the hub timeout or enable device-initiated U1/U2
+ * will be successful.
+ *
++ * If the control transfer to enable device-initiated U1/U2 entry fails, then
++ * hub-initiated U1/U2 will be disabled.
++ *
+ * If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI
+ * driver know about it. If that call fails, it should be harmless, and just
+ * take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency.
+@@ -4056,23 +4059,24 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
+ * host know that this link state won't be enabled.
+ */
+ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
+- } else {
+- /* Only a configured device will accept the Set Feature
+- * U1/U2_ENABLE
+- */
+- if (udev->actconfig)
+- usb_set_device_initiated_lpm(udev, state, true);
++ return;
++ }
+
+- /* As soon as usb_set_lpm_timeout(timeout) returns 0, the
+- * hub-initiated LPM is enabled. Thus, LPM is enabled no
+- * matter the result of usb_set_device_initiated_lpm().
+- * The only difference is whether device is able to initiate
+- * LPM.
+- */
++ /* Only a configured device will accept the Set Feature
++ * U1/U2_ENABLE
++ */
++ if (udev->actconfig &&
++ usb_set_device_initiated_lpm(udev, state, true) == 0) {
+ if (state == USB3_LPM_U1)
+ udev->usb3_lpm_u1_enabled = 1;
+ else if (state == USB3_LPM_U2)
+ udev->usb3_lpm_u2_enabled = 1;
++ } else {
++ /* Don't request U1/U2 entry if the device
++ * cannot transition to U1/U2.
++ */
++ usb_set_lpm_timeout(udev, state, 0);
++ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
+ }
+ }
+
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 4aff1d8dbc4f..6e9e172010fc 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1423,11 +1423,6 @@ static int dwc3_probe(struct platform_device *pdev)
+ dwc->regs = regs;
+ dwc->regs_size = resource_size(&dwc_res);
+
+- if (!dwc3_core_is_valid(dwc)) {
+- dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
+- return -ENODEV;
+- }
+-
+ dwc3_get_properties(dwc);
+
+ dwc->reset = devm_reset_control_get_optional_shared(dev, NULL);
+@@ -1460,6 +1455,12 @@ static int dwc3_probe(struct platform_device *pdev)
+ if (ret)
+ goto unprepare_clks;
+
++ if (!dwc3_core_is_valid(dwc)) {
++ dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
++ ret = -ENODEV;
++ goto disable_clks;
++ }
++
+ platform_set_drvdata(pdev, dwc);
+ dwc3_cache_hwparams(dwc);
+
+@@ -1525,6 +1526,7 @@ err1:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
++disable_clks:
+ clk_bulk_disable(dwc->num_clks, dwc->clks);
+ unprepare_clks:
+ clk_bulk_unprepare(dwc->num_clks, dwc->clks);
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index c7ed90084d1a..213ff03c8a9f 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -1183,11 +1183,12 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
+ ENTER();
+
+ if (!is_sync_kiocb(kiocb)) {
+- p = kmalloc(sizeof(io_data), GFP_KERNEL);
++ p = kzalloc(sizeof(io_data), GFP_KERNEL);
+ if (unlikely(!p))
+ return -ENOMEM;
+ p->aio = true;
+ } else {
++ memset(p, 0, sizeof(*p));
+ p->aio = false;
+ }
+
+@@ -1219,11 +1220,12 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
+ ENTER();
+
+ if (!is_sync_kiocb(kiocb)) {
+- p = kmalloc(sizeof(io_data), GFP_KERNEL);
++ p = kzalloc(sizeof(io_data), GFP_KERNEL);
+ if (unlikely(!p))
+ return -ENOMEM;
+ p->aio = true;
+ } else {
++ memset(p, 0, sizeof(*p));
+ p->aio = false;
+ }
+
+diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
+index 09a8ebd95588..6968b9f2b76b 100644
+--- a/drivers/usb/host/hwa-hc.c
++++ b/drivers/usb/host/hwa-hc.c
+@@ -159,7 +159,7 @@ out:
+ return result;
+
+ error_set_cluster_id:
+- wusb_cluster_id_put(wusbhc->cluster_id);
++ wusb_cluster_id_put(addr);
+ error_cluster_id_get:
+ goto out;
+
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 3ce71cbfbb58..ad05c27b3a7b 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -205,7 +205,7 @@ int usb_amd_find_chipset_info(void)
+ {
+ unsigned long flags;
+ struct amd_chipset_info info;
+- int ret;
++ int need_pll_quirk = 0;
+
+ spin_lock_irqsave(&amd_lock, flags);
+
+@@ -219,21 +219,28 @@ int usb_amd_find_chipset_info(void)
+ spin_unlock_irqrestore(&amd_lock, flags);
+
+ if (!amd_chipset_sb_type_init(&info)) {
+- ret = 0;
+ goto commit;
+ }
+
+- /* Below chipset generations needn't enable AMD PLL quirk */
+- if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
+- info.sb_type.gen == AMD_CHIPSET_SB600 ||
+- info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
+- (info.sb_type.gen == AMD_CHIPSET_SB700 &&
+- info.sb_type.rev > 0x3b)) {
++ switch (info.sb_type.gen) {
++ case AMD_CHIPSET_SB700:
++ need_pll_quirk = info.sb_type.rev <= 0x3B;
++ break;
++ case AMD_CHIPSET_SB800:
++ case AMD_CHIPSET_HUDSON2:
++ case AMD_CHIPSET_BOLTON:
++ need_pll_quirk = 1;
++ break;
++ default:
++ need_pll_quirk = 0;
++ break;
++ }
++
++ if (!need_pll_quirk) {
+ if (info.smbus_dev) {
+ pci_dev_put(info.smbus_dev);
+ info.smbus_dev = NULL;
+ }
+- ret = 0;
+ goto commit;
+ }
+
+@@ -252,7 +259,7 @@ int usb_amd_find_chipset_info(void)
+ }
+ }
+
+- ret = info.probe_result = 1;
++ need_pll_quirk = info.probe_result = 1;
+ printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
+
+ commit:
+@@ -263,7 +270,7 @@ commit:
+
+ /* Mark that we where here */
+ amd_chipset.probe_count++;
+- ret = amd_chipset.probe_result;
++ need_pll_quirk = amd_chipset.probe_result;
+
+ spin_unlock_irqrestore(&amd_lock, flags);
+
+@@ -277,7 +284,7 @@ commit:
+ spin_unlock_irqrestore(&amd_lock, flags);
+ }
+
+- return ret;
++ return need_pll_quirk;
+ }
+ EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
+
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 92e764c54154..fabbce1c542a 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -2170,7 +2170,8 @@ static inline bool xhci_urb_suitable_for_idt(struct urb *urb)
+ if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) &&
+ usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE &&
+ urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE &&
+- !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP))
++ !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) &&
++ !urb->num_sgs)
+ return true;
+
+ return false;
+diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
+index 4d6ae3795a88..6ca9111d150a 100644
+--- a/drivers/usb/misc/usb251xb.c
++++ b/drivers/usb/misc/usb251xb.c
+@@ -375,7 +375,8 @@ out_err:
+
+ #ifdef CONFIG_OF
+ static void usb251xb_get_ports_field(struct usb251xb *hub,
+- const char *prop_name, u8 port_cnt, u8 *fld)
++ const char *prop_name, u8 port_cnt,
++ bool ds_only, u8 *fld)
+ {
+ struct device *dev = hub->dev;
+ struct property *prop;
+@@ -383,7 +384,7 @@ static void usb251xb_get_ports_field(struct usb251xb *hub,
+ u32 port;
+
+ of_property_for_each_u32(dev->of_node, prop_name, prop, p, port) {
+- if ((port >= 1) && (port <= port_cnt))
++ if ((port >= ds_only ? 1 : 0) && (port <= port_cnt))
+ *fld |= BIT(port);
+ else
+ dev_warn(dev, "port %u doesn't exist\n", port);
+@@ -501,15 +502,15 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
+
+ hub->non_rem_dev = USB251XB_DEF_NON_REMOVABLE_DEVICES;
+ usb251xb_get_ports_field(hub, "non-removable-ports", data->port_cnt,
+- &hub->non_rem_dev);
++ true, &hub->non_rem_dev);
+
+ hub->port_disable_sp = USB251XB_DEF_PORT_DISABLE_SELF;
+ usb251xb_get_ports_field(hub, "sp-disabled-ports", data->port_cnt,
+- &hub->port_disable_sp);
++ true, &hub->port_disable_sp);
+
+ hub->port_disable_bp = USB251XB_DEF_PORT_DISABLE_BUS;
+ usb251xb_get_ports_field(hub, "bp-disabled-ports", data->port_cnt,
+- &hub->port_disable_bp);
++ true, &hub->port_disable_bp);
+
+ hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
+ if (!of_property_read_u32(np, "sp-max-total-current-microamp",
+@@ -573,9 +574,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
+ */
+ hub->port_swap = USB251XB_DEF_PORT_SWAP;
+ usb251xb_get_ports_field(hub, "swap-dx-lanes", data->port_cnt,
+- &hub->port_swap);
+- if (of_get_property(np, "swap-us-lanes", NULL))
+- hub->port_swap |= BIT(0);
++ false, &hub->port_swap);
+
+ /* The following parameters are currently not exposed to devicetree, but
+ * may be as soon as needed.
+diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
+index 59190d88fa9f..556bb4fa0bee 100644
+--- a/drivers/usb/storage/scsiglue.c
++++ b/drivers/usb/storage/scsiglue.c
+@@ -28,6 +28,8 @@
+ * status of a command.
+ */
+
++#include <linux/blkdev.h>
++#include <linux/dma-mapping.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+
+@@ -99,6 +101,7 @@ static int slave_alloc (struct scsi_device *sdev)
+ static int slave_configure(struct scsi_device *sdev)
+ {
+ struct us_data *us = host_to_us(sdev->host);
++ struct device *dev = us->pusb_dev->bus->sysdev;
+
+ /*
+ * Many devices have trouble transferring more than 32KB at a time,
+@@ -128,6 +131,14 @@ static int slave_configure(struct scsi_device *sdev)
+ blk_queue_max_hw_sectors(sdev->request_queue, 2048);
+ }
+
++ /*
++ * The max_hw_sectors should be up to maximum size of a mapping for
++ * the device. Otherwise, a DMA API might fail on swiotlb environment.
++ */
++ blk_queue_max_hw_sectors(sdev->request_queue,
++ min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
++ dma_max_mapping_size(dev) >> SECTOR_SHIFT));
++
+ /*
+ * Some USB host controllers can't do DMA; they have to use PIO.
+ * They indicate this by setting their dma_mask to NULL. For
+diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
+index bc57ae9e2963..cce9ace651a2 100644
+--- a/fs/9p/vfs_addr.c
++++ b/fs/9p/vfs_addr.c
+@@ -35,8 +35,9 @@
+ * @page: structure to page
+ *
+ */
+-static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
++static int v9fs_fid_readpage(void *data, struct page *page)
+ {
++ struct p9_fid *fid = data;
+ struct inode *inode = page->mapping->host;
+ struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE};
+ struct iov_iter to;
+@@ -107,7 +108,8 @@ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
+ if (ret == 0)
+ return ret;
+
+- ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp);
++ ret = read_cache_pages(mapping, pages, v9fs_fid_readpage,
++ filp->private_data);
+ p9_debug(P9_DEBUG_VFS, " = %d\n", ret);
+ return ret;
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index a2aabdb85226..8c9c7d76c900 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -394,10 +394,31 @@ static noinline int add_async_extent(struct async_chunk *cow,
+ return 0;
+ }
+
++/*
++ * Check if the inode has flags compatible with compression
++ */
++static inline bool inode_can_compress(struct inode *inode)
++{
++ if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW ||
++ BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
++ return false;
++ return true;
++}
++
++/*
++ * Check if the inode needs to be submitted to compression, based on mount
++ * options, defragmentation, properties or heuristics.
++ */
+ static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
+ {
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+
++ if (!inode_can_compress(inode)) {
++ WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
++ KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
++ btrfs_ino(BTRFS_I(inode)));
++ return 0;
++ }
+ /* force compress */
+ if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
+ return 1;
+@@ -1630,7 +1651,8 @@ int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page,
+ } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
+ ret = run_delalloc_nocow(inode, locked_page, start, end,
+ page_started, 0, nr_written);
+- } else if (!inode_need_compress(inode, start, end)) {
++ } else if (!inode_can_compress(inode) ||
++ !inode_need_compress(inode, start, end)) {
+ ret = cow_file_range(inode, locked_page, start, end, end,
+ page_started, nr_written, 1, NULL);
+ } else {
+diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
+index af109c0ba720..e0469816c678 100644
+--- a/fs/btrfs/props.c
++++ b/fs/btrfs/props.c
+@@ -337,7 +337,7 @@ static int inherit_props(struct btrfs_trans_handle *trans,
+ for (i = 0; i < ARRAY_SIZE(prop_handlers); i++) {
+ const struct prop_handler *h = &prop_handlers[i];
+ const char *value;
+- u64 num_bytes;
++ u64 num_bytes = 0;
+
+ if (!h->inheritable)
+ continue;
+diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
+index 114ebfe30929..3951d39b9b75 100644
+--- a/fs/dlm/lowcomms.c
++++ b/fs/dlm/lowcomms.c
+@@ -1628,8 +1628,10 @@ static void clean_writequeues(void)
+
+ static void work_stop(void)
+ {
+- destroy_workqueue(recv_workqueue);
+- destroy_workqueue(send_workqueue);
++ if (recv_workqueue)
++ destroy_workqueue(recv_workqueue);
++ if (send_workqueue)
++ destroy_workqueue(send_workqueue);
+ }
+
+ static int work_start(void)
+@@ -1689,13 +1691,17 @@ static void work_flush(void)
+ struct hlist_node *n;
+ struct connection *con;
+
+- flush_workqueue(recv_workqueue);
+- flush_workqueue(send_workqueue);
++ if (recv_workqueue)
++ flush_workqueue(recv_workqueue);
++ if (send_workqueue)
++ flush_workqueue(send_workqueue);
+ do {
+ ok = 1;
+ foreach_conn(stop_conn);
+- flush_workqueue(recv_workqueue);
+- flush_workqueue(send_workqueue);
++ if (recv_workqueue)
++ flush_workqueue(recv_workqueue);
++ if (send_workqueue)
++ flush_workqueue(send_workqueue);
+ for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
+ hlist_for_each_entry_safe(con, n,
+ &connection_hash[i], list) {
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index ed70b68b2b38..d0539ddad6e2 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -832,17 +832,6 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
+ return -EINVAL;
+ }
+
+- if (__is_set_ckpt_flags(*cp_block, CP_LARGE_NAT_BITMAP_FLAG)) {
+- if (crc_offset != CP_MIN_CHKSUM_OFFSET) {
+- f2fs_put_page(*cp_page, 1);
+- f2fs_msg(sbi->sb, KERN_WARNING,
+- "layout of large_nat_bitmap is deprecated, "
+- "run fsck to repair, chksum_offset: %zu",
+- crc_offset);
+- return -EINVAL;
+- }
+- }
+-
+ crc = f2fs_checkpoint_chksum(sbi, *cp_block);
+ if (crc != cur_cp_crc(*cp_block)) {
+ f2fs_put_page(*cp_page, 1);
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index eda4181d2092..923923603a7d 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2262,6 +2262,9 @@ static inline bool __should_serialize_io(struct inode *inode,
+ return false;
+ if (IS_NOQUOTA(inode))
+ return false;
++ /* to avoid deadlock in path of data flush */
++ if (F2FS_I(inode)->cp_task)
++ return false;
+ if (wbc->sync_mode != WB_SYNC_ALL)
+ return true;
+ if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 06b89a9862ab..cbdc2f88a98c 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -1207,6 +1207,7 @@ struct f2fs_sb_info {
+ /* for inode management */
+ struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */
+ spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */
++ struct mutex flush_lock; /* for flush exclusion */
+
+ /* for extent tree cache */
+ struct radix_tree_root extent_tree_root;/* cache extent cache entries */
+@@ -1766,8 +1767,12 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
+
+ if (!__allow_reserved_blocks(sbi, inode, true))
+ avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
+- if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+- avail_user_block_count -= sbi->unusable_block_count;
++ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
++ if (avail_user_block_count > sbi->unusable_block_count)
++ avail_user_block_count -= sbi->unusable_block_count;
++ else
++ avail_user_block_count = 0;
++ }
+ if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
+ diff = sbi->total_valid_block_count - avail_user_block_count;
+ if (diff > *count)
+@@ -1967,7 +1972,7 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
+ struct inode *inode, bool is_inode)
+ {
+ block_t valid_block_count;
+- unsigned int valid_node_count;
++ unsigned int valid_node_count, user_block_count;
+ int err;
+
+ if (is_inode) {
+@@ -1994,10 +1999,11 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
+
+ if (!__allow_reserved_blocks(sbi, inode, false))
+ valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
++ user_block_count = sbi->user_block_count;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
+- valid_block_count += sbi->unusable_block_count;
++ user_block_count -= sbi->unusable_block_count;
+
+- if (unlikely(valid_block_count > sbi->user_block_count)) {
++ if (unlikely(valid_block_count > user_block_count)) {
+ spin_unlock(&sbi->stat_lock);
+ goto enospc;
+ }
+@@ -2198,7 +2204,7 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
+ get_pages(sbi, F2FS_DIO_WRITE))
+ return false;
+
+- if (SM_I(sbi) && SM_I(sbi)->dcc_info &&
++ if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
+ atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
+ return false;
+
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 8dee063c833f..ce15fbcd7cff 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -546,9 +546,13 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
+ if (test_opt(sbi, DATA_FLUSH)) {
+ struct blk_plug plug;
+
++ mutex_lock(&sbi->flush_lock);
++
+ blk_start_plug(&plug);
+ f2fs_sync_dirty_inodes(sbi, FILE_INODE);
+ blk_finish_plug(&plug);
++
++ mutex_unlock(&sbi->flush_lock);
+ }
+ f2fs_sync_fs(sbi->sb, true);
+ stat_inc_bg_cp_count(sbi->stat_info);
+@@ -872,7 +876,9 @@ void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
+ int f2fs_disable_cp_again(struct f2fs_sb_info *sbi)
+ {
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+- block_t ovp = overprovision_segments(sbi) << sbi->log_blocks_per_seg;
++ int ovp_hole_segs =
++ (overprovision_segments(sbi) - reserved_segments(sbi));
++ block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
+ block_t holes[2] = {0, 0}; /* DATA and NODE */
+ struct seg_entry *se;
+ unsigned int segno;
+@@ -887,10 +893,10 @@ int f2fs_disable_cp_again(struct f2fs_sb_info *sbi)
+ }
+ mutex_unlock(&dirty_i->seglist_lock);
+
+- if (holes[DATA] > ovp || holes[NODE] > ovp)
++ if (holes[DATA] > ovp_holes || holes[NODE] > ovp_holes)
+ return -EAGAIN;
+ if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
+- dirty_segments(sbi) > overprovision_segments(sbi))
++ dirty_segments(sbi) > ovp_hole_segs)
+ return -EAGAIN;
+ return 0;
+ }
+@@ -1480,6 +1486,10 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
+ list_for_each_entry_safe(dc, tmp, pend_list, list) {
+ f2fs_bug_on(sbi, dc->state != D_PREP);
+
++ if (dpolicy->timeout != 0 &&
++ f2fs_time_over(sbi, dpolicy->timeout))
++ break;
++
+ if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
+ !is_idle(sbi, DISCARD_TIME)) {
+ io_interrupted = true;
+@@ -3393,6 +3403,11 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
+ seg_i = CURSEG_I(sbi, i);
+ segno = le32_to_cpu(ckpt->cur_data_segno[i]);
+ blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
++ if (blk_off > ENTRIES_IN_SUM) {
++ f2fs_bug_on(sbi, 1);
++ f2fs_put_page(page, 1);
++ return -EFAULT;
++ }
+ seg_i->next_segno = segno;
+ reset_curseg(sbi, i, 0);
+ seg_i->alloc_type = ckpt->alloc_type[i];
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 6b959bbb336a..4b47ac994daf 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -2718,6 +2718,15 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
+ return 1;
+ }
+
++ if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
++ le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
++ f2fs_msg(sbi->sb, KERN_WARNING,
++ "layout of large_nat_bitmap is deprecated, "
++ "run fsck to repair, chksum_offset: %u",
++ le32_to_cpu(ckpt->checksum_offset));
++ return 1;
++ }
++
+ if (unlikely(f2fs_cp_error(sbi))) {
+ f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
+ return 1;
+@@ -3287,6 +3296,7 @@ try_onemore:
+ INIT_LIST_HEAD(&sbi->inode_list[i]);
+ spin_lock_init(&sbi->inode_lock[i]);
+ }
++ mutex_init(&sbi->flush_lock);
+
+ f2fs_init_extent_cache_info(sbi);
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 4ef62a45045d..6c09cedcf17d 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -231,6 +231,7 @@ struct io_ring_ctx {
+ struct task_struct *sqo_thread; /* if using sq thread polling */
+ struct mm_struct *sqo_mm;
+ wait_queue_head_t sqo_wait;
++ struct completion sqo_thread_started;
+
+ struct {
+ /* CQ ring */
+@@ -330,6 +331,9 @@ struct io_kiocb {
+ #define REQ_F_SEQ_PREV 8 /* sequential with previous */
+ #define REQ_F_IO_DRAIN 16 /* drain existing IO first */
+ #define REQ_F_IO_DRAINED 32 /* drain done */
++#define REQ_F_LINK 64 /* linked sqes */
++#define REQ_F_LINK_DONE 128 /* linked sqes done */
++#define REQ_F_FAIL_LINK 256 /* fail rest of links */
+ u64 user_data;
+ u32 error; /* iopoll result from callback */
+ u32 sequence;
+@@ -403,6 +407,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
+ ctx->flags = p->flags;
+ init_waitqueue_head(&ctx->cq_wait);
+ init_completion(&ctx->ctx_done);
++ init_completion(&ctx->sqo_thread_started);
+ mutex_init(&ctx->uring_lock);
+ init_waitqueue_head(&ctx->wait);
+ for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) {
+@@ -423,7 +428,7 @@ static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
+ if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
+ return false;
+
+- return req->sequence > ctx->cached_cq_tail + ctx->sq_ring->dropped;
++ return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped;
+ }
+
+ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
+@@ -996,8 +1001,43 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
+ */
+ offset = buf_addr - imu->ubuf;
+ iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
+- if (offset)
+- iov_iter_advance(iter, offset);
++
++ if (offset) {
++ /*
++ * Don't use iov_iter_advance() here, as it's really slow for
++ * using the latter parts of a big fixed buffer - it iterates
++ * over each segment manually. We can cheat a bit here, because
++ * we know that:
++ *
++ * 1) it's a BVEC iter, we set it up
++ * 2) all bvecs are PAGE_SIZE in size, except potentially the
++ * first and last bvec
++ *
++ * So just find our index, and adjust the iterator afterwards.
++ * If the offset is within the first bvec (or the whole first
++ * bvec, just use iov_iter_advance(). This makes it easier
++ * since we can just skip the first segment, which may not
++ * be PAGE_SIZE aligned.
++ */
++ const struct bio_vec *bvec = imu->bvec;
++
++ if (offset <= bvec->bv_len) {
++ iov_iter_advance(iter, offset);
++ } else {
++ unsigned long seg_skip;
++
++ /* skip first vec */
++ offset -= bvec->bv_len;
++ seg_skip = 1 + (offset >> PAGE_SHIFT);
++
++ iter->bvec = bvec + seg_skip;
++ iter->nr_segs -= seg_skip;
++ iter->count -= (seg_skip << PAGE_SHIFT);
++ iter->iov_offset = offset & ~PAGE_MASK;
++ if (iter->iov_offset)
++ iter->count -= iter->iov_offset;
++ }
++ }
+
+ /* don't drop a reference to these pages */
+ iter->type |= ITER_BVEC_FLAG_NO_REF;
+@@ -1487,6 +1527,8 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ INIT_LIST_HEAD(&poll->wait.entry);
+ init_waitqueue_func_entry(&poll->wait, io_poll_wake);
+
++ INIT_LIST_HEAD(&req->list);
++
+ mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
+
+ spin_lock_irq(&ctx->completion_lock);
+@@ -1694,6 +1736,10 @@ restart:
+ /* async context always use a copy of the sqe */
+ kfree(sqe);
+
++ /* req from defer and link list needn't decrease async cnt */
++ if (req->flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
++ goto out;
++
+ if (!async_list)
+ break;
+ if (!list_empty(&req_list)) {
+@@ -1741,6 +1787,7 @@ restart:
+ }
+ }
+
++out:
+ if (cur_mm) {
+ set_fs(old_fs);
+ unuse_mm(cur_mm);
+@@ -1767,6 +1814,10 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
+ ret = true;
+ spin_lock(&list->lock);
+ list_add_tail(&req->list, &list->list);
++ /*
++ * Ensure we see a simultaneous modification from io_sq_wq_submit_work()
++ */
++ smp_mb();
+ if (!atomic_read(&list->cnt)) {
+ list_del_init(&req->list);
+ ret = false;
+@@ -2009,6 +2060,8 @@ static int io_sq_thread(void *data)
+ unsigned inflight;
+ unsigned long timeout;
+
++ complete(&ctx->sqo_thread_started);
++
+ old_fs = get_fs();
+ set_fs(USER_DS);
+
+@@ -2243,6 +2296,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
+ static void io_sq_thread_stop(struct io_ring_ctx *ctx)
+ {
+ if (ctx->sqo_thread) {
++ wait_for_completion(&ctx->sqo_thread_started);
+ /*
+ * The park is a bit of a work-around, without it we get
+ * warning spews on shutdown with SQPOLL set and affinity
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index b428c295d13f..5778d1347b35 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -288,10 +288,13 @@ struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
+ /*
+ * For queues with unlimited length lost events are not expected and
+ * can possibly have security implications. Avoid losing events when
+- * memory is short.
++ * memory is short. For the limited size queues, avoid OOM killer in the
++ * target monitoring memcg as it may have security repercussion.
+ */
+ if (group->max_events == UINT_MAX)
+ gfp |= __GFP_NOFAIL;
++ else
++ gfp |= __GFP_RETRY_MAYFAIL;
+
+ /* Whoever is interested in the event, pays for the allocation. */
+ memalloc_use_memcg(group->memcg);
+diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
+index 2fda08b2b885..d510223d302c 100644
+--- a/fs/notify/inotify/inotify_fsnotify.c
++++ b/fs/notify/inotify/inotify_fsnotify.c
+@@ -90,9 +90,13 @@ int inotify_handle_event(struct fsnotify_group *group,
+ i_mark = container_of(inode_mark, struct inotify_inode_mark,
+ fsn_mark);
+
+- /* Whoever is interested in the event, pays for the allocation. */
++ /*
++ * Whoever is interested in the event, pays for the allocation. Do not
++ * trigger OOM killer in the target monitoring memcg as it may have
++ * security repercussion.
++ */
+ memalloc_use_memcg(group->memcg);
+- event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT);
++ event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
+ memalloc_unuse_memcg();
+
+ if (unlikely(!event)) {
+diff --git a/fs/open.c b/fs/open.c
+index b5b80469b93d..a59abe3c669a 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -374,6 +374,25 @@ long do_faccessat(int dfd, const char __user *filename, int mode)
+ override_cred->cap_permitted;
+ }
+
++ /*
++ * The new set of credentials can *only* be used in
++ * task-synchronous circumstances, and does not need
++ * RCU freeing, unless somebody then takes a separate
++ * reference to it.
++ *
++ * NOTE! This is _only_ true because this credential
++ * is used purely for override_creds() that installs
++ * it as the subjective cred. Other threads will be
++ * accessing ->real_cred, not the subjective cred.
++ *
++ * If somebody _does_ make a copy of this (using the
++ * 'get_current_cred()' function), that will clear the
++ * non_rcu field, because now that other user may be
++ * expecting RCU freeing. But normal thread-synchronous
++ * cred accesses will keep things non-RCY.
++ */
++ override_cred->non_rcu = 1;
++
+ old_cred = override_creds(override_cred);
+ retry:
+ res = user_path_at(dfd, filename, lookup_flags, &path);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 255f6754c70d..03517154fe0f 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -1962,9 +1962,12 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
+ goto out;
+
+ if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
+- down_read(&mm->mmap_sem);
+- exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end);
+- up_read(&mm->mmap_sem);
++ status = down_read_killable(&mm->mmap_sem);
++ if (!status) {
++ exact_vma_exists = !!find_exact_vma(mm, vm_start,
++ vm_end);
++ up_read(&mm->mmap_sem);
++ }
+ }
+
+ mmput(mm);
+@@ -2010,8 +2013,11 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
+ if (rc)
+ goto out_mmput;
+
++ rc = down_read_killable(&mm->mmap_sem);
++ if (rc)
++ goto out_mmput;
++
+ rc = -ENOENT;
+- down_read(&mm->mmap_sem);
+ vma = find_exact_vma(mm, vm_start, vm_end);
+ if (vma && vma->vm_file) {
+ *path = vma->vm_file->f_path;
+@@ -2107,7 +2113,11 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
+ if (!mm)
+ goto out_put_task;
+
+- down_read(&mm->mmap_sem);
++ result = ERR_PTR(-EINTR);
++ if (down_read_killable(&mm->mmap_sem))
++ goto out_put_mm;
++
++ result = ERR_PTR(-ENOENT);
+ vma = find_exact_vma(mm, vm_start, vm_end);
+ if (!vma)
+ goto out_no_vma;
+@@ -2118,6 +2128,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
+
+ out_no_vma:
+ up_read(&mm->mmap_sem);
++out_put_mm:
+ mmput(mm);
+ out_put_task:
+ put_task_struct(task);
+@@ -2160,7 +2171,12 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
+ mm = get_task_mm(task);
+ if (!mm)
+ goto out_put_task;
+- down_read(&mm->mmap_sem);
++
++ ret = down_read_killable(&mm->mmap_sem);
++ if (ret) {
++ mmput(mm);
++ goto out_put_task;
++ }
+
+ nr_files = 0;
+
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 01d4eb0e6bd1..7f84d1477b5b 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -166,7 +166,11 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
+ if (!mm || !mmget_not_zero(mm))
+ return NULL;
+
+- down_read(&mm->mmap_sem);
++ if (down_read_killable(&mm->mmap_sem)) {
++ mmput(mm);
++ return ERR_PTR(-EINTR);
++ }
++
+ hold_task_mempolicy(priv);
+ priv->tail_vma = get_gate_vma(mm);
+
+@@ -828,7 +832,10 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
+
+ memset(&mss, 0, sizeof(mss));
+
+- down_read(&mm->mmap_sem);
++ ret = down_read_killable(&mm->mmap_sem);
++ if (ret)
++ goto out_put_mm;
++
+ hold_task_mempolicy(priv);
+
+ for (vma = priv->mm->mmap; vma; vma = vma->vm_next) {
+@@ -845,8 +852,9 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
+
+ release_task_mempolicy(priv);
+ up_read(&mm->mmap_sem);
+- mmput(mm);
+
++out_put_mm:
++ mmput(mm);
+ out_put_task:
+ put_task_struct(priv->task);
+ priv->task = NULL;
+@@ -1132,7 +1140,10 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
+ goto out_mm;
+ }
+
+- down_read(&mm->mmap_sem);
++ if (down_read_killable(&mm->mmap_sem)) {
++ count = -EINTR;
++ goto out_mm;
++ }
+ tlb_gather_mmu(&tlb, mm, 0, -1);
+ if (type == CLEAR_REFS_SOFT_DIRTY) {
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+@@ -1539,7 +1550,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
+ /* overflow ? */
+ if (end < start_vaddr || end > end_vaddr)
+ end = end_vaddr;
+- down_read(&mm->mmap_sem);
++ ret = down_read_killable(&mm->mmap_sem);
++ if (ret)
++ goto out_free;
+ ret = walk_page_range(start_vaddr, end, &pagemap_walk);
+ up_read(&mm->mmap_sem);
+ start_vaddr = end;
+diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
+index 36bf0f2e102e..7907e6419e57 100644
+--- a/fs/proc/task_nommu.c
++++ b/fs/proc/task_nommu.c
+@@ -211,7 +211,11 @@ static void *m_start(struct seq_file *m, loff_t *pos)
+ if (!mm || !mmget_not_zero(mm))
+ return NULL;
+
+- down_read(&mm->mmap_sem);
++ if (down_read_killable(&mm->mmap_sem)) {
++ mmput(mm);
++ return ERR_PTR(-EINTR);
++ }
++
+ /* start from the Nth VMA */
+ for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
+ if (n-- == 0)
+diff --git a/include/linux/cred.h b/include/linux/cred.h
+index 7eb43a038330..f7a30e0099be 100644
+--- a/include/linux/cred.h
++++ b/include/linux/cred.h
+@@ -145,7 +145,11 @@ struct cred {
+ struct user_struct *user; /* real user ID subscription */
+ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
+ struct group_info *group_info; /* supplementary groups for euid/fsgid */
+- struct rcu_head rcu; /* RCU deletion hook */
++ /* RCU deletion */
++ union {
++ int non_rcu; /* Can we skip RCU deletion? */
++ struct rcu_head rcu; /* RCU deletion hook */
++ };
+ } __randomize_layout;
+
+ extern void __put_cred(struct cred *);
+@@ -246,6 +250,7 @@ static inline const struct cred *get_cred(const struct cred *cred)
+ if (!cred)
+ return cred;
+ validate_creds(cred);
++ nonconst_cred->non_rcu = 0;
+ return get_new_cred(nonconst_cred);
+ }
+
+@@ -257,6 +262,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
+ if (!atomic_inc_not_zero(&nonconst_cred->usage))
+ return NULL;
+ validate_creds(cred);
++ nonconst_cred->non_rcu = 0;
+ return cred;
+ }
+
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 4a295e324ac5..b12c586fae28 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -1375,6 +1375,7 @@ extern int (*platform_notify_remove)(struct device *dev);
+ */
+ extern struct device *get_device(struct device *dev);
+ extern void put_device(struct device *dev);
++extern bool kill_device(struct device *dev);
+
+ #ifdef CONFIG_DEVTMPFS
+ extern int devtmpfs_create_node(struct device *dev);
+diff --git a/include/linux/hmm.h b/include/linux/hmm.h
+index 044a36d7c3f8..89508dc0795f 100644
+--- a/include/linux/hmm.h
++++ b/include/linux/hmm.h
+@@ -93,6 +93,7 @@ struct hmm {
+ struct mmu_notifier mmu_notifier;
+ struct rw_semaphore mirrors_sem;
+ wait_queue_head_t wq;
++ struct rcu_head rcu;
+ long notifiers;
+ bool dead;
+ };
+diff --git a/include/linux/host1x.h b/include/linux/host1x.h
+index cfff30b9a62e..e6eea45e1154 100644
+--- a/include/linux/host1x.h
++++ b/include/linux/host1x.h
+@@ -297,6 +297,8 @@ struct host1x_device {
+ struct list_head clients;
+
+ bool registered;
++
++ struct device_dma_parameters dma_parms;
+ };
+
+ static inline struct host1x_device *to_host1x_device(struct device *dev)
+diff --git a/include/linux/iova.h b/include/linux/iova.h
+index 781b96ac706f..a0637abffee8 100644
+--- a/include/linux/iova.h
++++ b/include/linux/iova.h
+@@ -155,6 +155,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
+ void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
+ void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
+ unsigned long start_pfn);
++bool has_iova_flush_queue(struct iova_domain *iovad);
+ int init_iova_flush_queue(struct iova_domain *iovad,
+ iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
+ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
+@@ -235,6 +236,11 @@ static inline void init_iova_domain(struct iova_domain *iovad,
+ {
+ }
+
++static inline bool has_iova_flush_queue(struct iova_domain *iovad)
++{
++ return false;
++}
++
+ static inline int init_iova_flush_queue(struct iova_domain *iovad,
+ iova_flush_cb flush_cb,
+ iova_entry_dtor entry_dtor)
+diff --git a/include/linux/swap.h b/include/linux/swap.h
+index 4bfb5c4ac108..6358a6185634 100644
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -175,8 +175,9 @@ enum {
+ SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
+ SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
+ SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
++ SWP_VALID = (1 << 13), /* swap is valid to be operated on? */
+ /* add others here before... */
+- SWP_SCANNING = (1 << 13), /* refcount in scan_swap_map */
++ SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
+ };
+
+ #define SWAP_CLUSTER_MAX 32UL
+@@ -460,7 +461,7 @@ extern unsigned int count_swap_pages(int, int);
+ extern sector_t map_swap_page(struct page *, struct block_device **);
+ extern sector_t swapdev_block(int, pgoff_t);
+ extern int page_swapcount(struct page *);
+-extern int __swap_count(struct swap_info_struct *si, swp_entry_t entry);
++extern int __swap_count(swp_entry_t entry);
+ extern int __swp_swapcount(swp_entry_t entry);
+ extern int swp_swapcount(swp_entry_t entry);
+ extern struct swap_info_struct *page_swap_info(struct page *);
+@@ -470,6 +471,12 @@ extern int try_to_free_swap(struct page *);
+ struct backing_dev_info;
+ extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
+ extern void exit_swap_address_space(unsigned int type);
++extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
++
++static inline void put_swap_device(struct swap_info_struct *si)
++{
++ rcu_read_unlock();
++}
+
+ #else /* CONFIG_SWAP */
+
+@@ -576,7 +583,7 @@ static inline int page_swapcount(struct page *page)
+ return 0;
+ }
+
+-static inline int __swap_count(struct swap_info_struct *si, swp_entry_t entry)
++static inline int __swap_count(swp_entry_t entry)
+ {
+ return 0;
+ }
+diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
+index 1050a75fb7ef..dcd776e77442 100644
+--- a/include/uapi/linux/videodev2.h
++++ b/include/uapi/linux/videodev2.h
+@@ -518,7 +518,13 @@ struct v4l2_pix_format {
+ #define V4L2_PIX_FMT_RGBX444 v4l2_fourcc('R', 'X', '1', '2') /* 16 rrrrgggg bbbbxxxx */
+ #define V4L2_PIX_FMT_ABGR444 v4l2_fourcc('A', 'B', '1', '2') /* 16 aaaabbbb ggggrrrr */
+ #define V4L2_PIX_FMT_XBGR444 v4l2_fourcc('X', 'B', '1', '2') /* 16 xxxxbbbb ggggrrrr */
+-#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('B', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */
++
++/*
++ * Originally this had 'BA12' as fourcc, but this clashed with the older
++ * V4L2_PIX_FMT_SGRBG12 which inexplicably used that same fourcc.
++ * So use 'GA12' instead for V4L2_PIX_FMT_BGRA444.
++ */
++#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('G', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */
+ #define V4L2_PIX_FMT_BGRX444 v4l2_fourcc('B', 'X', '1', '2') /* 16 bbbbgggg rrrrxxxx */
+ #define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */
+ #define V4L2_PIX_FMT_ARGB555 v4l2_fourcc('A', 'R', '1', '5') /* 16 ARGB-1-5-5-5 */
+diff --git a/kernel/cred.c b/kernel/cred.c
+index c73a87a4df13..153ae369e024 100644
+--- a/kernel/cred.c
++++ b/kernel/cred.c
+@@ -144,7 +144,10 @@ void __put_cred(struct cred *cred)
+ BUG_ON(cred == current->cred);
+ BUG_ON(cred == current->real_cred);
+
+- call_rcu(&cred->rcu, put_cred_rcu);
++ if (cred->non_rcu)
++ put_cred_rcu(&cred->rcu);
++ else
++ call_rcu(&cred->rcu, put_cred_rcu);
+ }
+ EXPORT_SYMBOL(__put_cred);
+
+@@ -256,6 +259,7 @@ struct cred *prepare_creds(void)
+ old = task->cred;
+ memcpy(new, old, sizeof(struct cred));
+
++ new->non_rcu = 0;
+ atomic_set(&new->usage, 1);
+ set_cred_subscribers(new, 0);
+ get_group_info(new->group_info);
+@@ -535,7 +539,19 @@ const struct cred *override_creds(const struct cred *new)
+
+ validate_creds(old);
+ validate_creds(new);
+- get_cred(new);
++
++ /*
++ * NOTE! This uses 'get_new_cred()' rather than 'get_cred()'.
++ *
++ * That means that we do not clear the 'non_rcu' flag, since
++ * we are only installing the cred into the thread-synchronous
++ * '->cred' pointer, not the '->real_cred' pointer that is
++ * visible to other threads under RCU.
++ *
++ * Also note that we did validate_creds() manually, not depending
++ * on the validation in 'get_cred()'.
++ */
++ get_new_cred((struct cred *)new);
+ alter_cred_subscribers(new, 1);
+ rcu_assign_pointer(current->cred, new);
+ alter_cred_subscribers(old, -1);
+@@ -672,6 +688,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
+ validate_creds(old);
+
+ *new = *old;
++ new->non_rcu = 0;
+ atomic_set(&new->usage, 1);
+ set_cred_subscribers(new, 0);
+ get_uid(new->user);
+diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
+index 7a723194ecbe..0207e3764d52 100644
+--- a/kernel/dma/remap.c
++++ b/kernel/dma/remap.c
+@@ -158,6 +158,9 @@ out:
+
+ bool dma_in_atomic_pool(void *start, size_t size)
+ {
++ if (unlikely(!atomic_pool))
++ return false;
++
+ return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
+ }
+
+diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
+index 9c49ec645d8b..bda006f8a88b 100644
+--- a/kernel/locking/lockdep_proc.c
++++ b/kernel/locking/lockdep_proc.c
+@@ -200,7 +200,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
+
+ static int lockdep_stats_show(struct seq_file *m, void *v)
+ {
+- struct lock_class *class;
+ unsigned long nr_unused = 0, nr_uncategorized = 0,
+ nr_irq_safe = 0, nr_irq_unsafe = 0,
+ nr_softirq_safe = 0, nr_softirq_unsafe = 0,
+@@ -210,6 +209,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
+ nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
+ sum_forward_deps = 0;
+
++#ifdef CONFIG_PROVE_LOCKING
++ struct lock_class *class;
++
+ list_for_each_entry(class, &all_lock_classes, lock_entry) {
+
+ if (class->usage_mask == 0)
+@@ -241,12 +243,12 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
+ if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
+ nr_hardirq_read_unsafe++;
+
+-#ifdef CONFIG_PROVE_LOCKING
+ sum_forward_deps += lockdep_count_forward_deps(class);
+-#endif
+ }
+ #ifdef CONFIG_DEBUG_LOCKDEP
+ DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
++#endif
++
+ #endif
+ seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
+ nr_lock_classes, MAX_LOCKDEP_KEYS);
+diff --git a/mm/gup.c b/mm/gup.c
+index ddde097cf9e4..d2c14fc4b5d4 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -585,11 +585,14 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
+ pgd = pgd_offset_k(address);
+ else
+ pgd = pgd_offset_gate(mm, address);
+- BUG_ON(pgd_none(*pgd));
++ if (pgd_none(*pgd))
++ return -EFAULT;
+ p4d = p4d_offset(pgd, address);
+- BUG_ON(p4d_none(*p4d));
++ if (p4d_none(*p4d))
++ return -EFAULT;
+ pud = pud_offset(p4d, address);
+- BUG_ON(pud_none(*pud));
++ if (pud_none(*pud))
++ return -EFAULT;
+ pmd = pmd_offset(pud, address);
+ if (!pmd_present(*pmd))
+ return -EFAULT;
+@@ -1696,7 +1699,8 @@ static inline pte_t gup_get_pte(pte_t *ptep)
+ }
+ #endif
+
+-static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
++static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
++ struct page **pages)
+ {
+ while ((*nr) - nr_start) {
+ struct page *page = pages[--(*nr)];
+diff --git a/mm/hmm.c b/mm/hmm.c
+index f702a3895d05..4c405dfbd2b3 100644
+--- a/mm/hmm.c
++++ b/mm/hmm.c
+@@ -104,6 +104,11 @@ error:
+ return NULL;
+ }
+
++static void hmm_free_rcu(struct rcu_head *rcu)
++{
++ kfree(container_of(rcu, struct hmm, rcu));
++}
++
+ static void hmm_free(struct kref *kref)
+ {
+ struct hmm *hmm = container_of(kref, struct hmm, kref);
+@@ -116,7 +121,7 @@ static void hmm_free(struct kref *kref)
+ mm->hmm = NULL;
+ spin_unlock(&mm->page_table_lock);
+
+- kfree(hmm);
++ mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu);
+ }
+
+ static inline void hmm_put(struct hmm *hmm)
+@@ -144,10 +149,14 @@ void hmm_mm_destroy(struct mm_struct *mm)
+
+ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
+ {
+- struct hmm *hmm = mm_get_hmm(mm);
++ struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
+ struct hmm_mirror *mirror;
+ struct hmm_range *range;
+
++ /* Bail out if hmm is in the process of being freed */
++ if (!kref_get_unless_zero(&hmm->kref))
++ return;
++
+ /* Report this HMM as dying. */
+ hmm->dead = true;
+
+@@ -185,13 +194,14 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
+ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
+ const struct mmu_notifier_range *nrange)
+ {
+- struct hmm *hmm = mm_get_hmm(nrange->mm);
++ struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
+ struct hmm_mirror *mirror;
+ struct hmm_update update;
+ struct hmm_range *range;
+ int ret = 0;
+
+- VM_BUG_ON(!hmm);
++ if (!kref_get_unless_zero(&hmm->kref))
++ return 0;
+
+ update.start = nrange->start;
+ update.end = nrange->end;
+@@ -239,9 +249,10 @@ out:
+ static void hmm_invalidate_range_end(struct mmu_notifier *mn,
+ const struct mmu_notifier_range *nrange)
+ {
+- struct hmm *hmm = mm_get_hmm(nrange->mm);
++ struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
+
+- VM_BUG_ON(!hmm);
++ if (!kref_get_unless_zero(&hmm->kref))
++ return;
+
+ mutex_lock(&hmm->lock);
+ hmm->notifiers--;
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 9dd581d11565..3e147ea83182 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -575,7 +575,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
+ if (in_irq()) {
+ object->pid = 0;
+ strncpy(object->comm, "hardirq", sizeof(object->comm));
+- } else if (in_softirq()) {
++ } else if (in_serving_softirq()) {
+ object->pid = 0;
+ strncpy(object->comm, "softirq", sizeof(object->comm));
+ } else {
+diff --git a/mm/memory.c b/mm/memory.c
+index ddf20bd0c317..b0efc69b2634 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2807,7 +2807,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
+ struct swap_info_struct *si = swp_swap_info(entry);
+
+ if (si->flags & SWP_SYNCHRONOUS_IO &&
+- __swap_count(si, entry) == 1) {
++ __swap_count(entry) == 1) {
+ /* skip swapcache */
+ page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
+ vmf->address);
+@@ -4349,7 +4349,9 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+ void *old_buf = buf;
+ int write = gup_flags & FOLL_WRITE;
+
+- down_read(&mm->mmap_sem);
++ if (down_read_killable(&mm->mmap_sem))
++ return 0;
++
+ /* ignore errors, just check how much was successfully transferred */
+ while (len) {
+ int bytes, ret, offset;
+diff --git a/mm/mincore.c b/mm/mincore.c
+index c3f058bd0faf..4fe91d497436 100644
+--- a/mm/mincore.c
++++ b/mm/mincore.c
+@@ -68,8 +68,16 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
+ */
+ if (xa_is_value(page)) {
+ swp_entry_t swp = radix_to_swp_entry(page);
+- page = find_get_page(swap_address_space(swp),
+- swp_offset(swp));
++ struct swap_info_struct *si;
++
++ /* Prevent swap device to being swapoff under us */
++ si = get_swap_device(swp);
++ if (si) {
++ page = find_get_page(swap_address_space(swp),
++ swp_offset(swp));
++ put_swap_device(si);
++ } else
++ page = NULL;
+ }
+ } else
+ page = find_get_page(mapping, pgoff);
+diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
+index 513b9607409d..b5670620aea0 100644
+--- a/mm/mmu_notifier.c
++++ b/mm/mmu_notifier.c
+@@ -274,7 +274,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
+ * thanks to mm_take_all_locks().
+ */
+ spin_lock(&mm->mmu_notifier_mm->lock);
+- hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
++ hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+
+ mm_drop_all_locks(mm);
+diff --git a/mm/nommu.c b/mm/nommu.c
+index d8c02fbe03b5..b2823519f8cd 100644
+--- a/mm/nommu.c
++++ b/mm/nommu.c
+@@ -1792,7 +1792,8 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
+ struct vm_area_struct *vma;
+ int write = gup_flags & FOLL_WRITE;
+
+- down_read(&mm->mmap_sem);
++ if (down_read_killable(&mm->mmap_sem))
++ return 0;
+
+ /* the access must start within one of the target process's mappings */
+ vma = find_vma(mm, addr);
+diff --git a/mm/swap.c b/mm/swap.c
+index 7ede3eddc12a..607c48229a1d 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -740,15 +740,20 @@ void release_pages(struct page **pages, int nr)
+ if (is_huge_zero_page(page))
+ continue;
+
+- /* Device public page can not be huge page */
+- if (is_device_public_page(page)) {
++ if (is_zone_device_page(page)) {
+ if (locked_pgdat) {
+ spin_unlock_irqrestore(&locked_pgdat->lru_lock,
+ flags);
+ locked_pgdat = NULL;
+ }
+- put_devmap_managed_page(page);
+- continue;
++ /*
++ * ZONE_DEVICE pages that return 'false' from
++ * put_devmap_managed_page() do not require special
++ * processing, and instead, expect a call to
++ * put_page_testzero().
++ */
++ if (put_devmap_managed_page(page))
++ continue;
+ }
+
+ page = compound_head(page);
+diff --git a/mm/swap_state.c b/mm/swap_state.c
+index 85245fdec8d9..61453f1faf72 100644
+--- a/mm/swap_state.c
++++ b/mm/swap_state.c
+@@ -310,8 +310,13 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
+ unsigned long addr)
+ {
+ struct page *page;
++ struct swap_info_struct *si;
+
++ si = get_swap_device(entry);
++ if (!si)
++ return NULL;
+ page = find_get_page(swap_address_space(entry), swp_offset(entry));
++ put_swap_device(si);
+
+ INC_CACHE_INFO(find_total);
+ if (page) {
+@@ -354,8 +359,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ struct vm_area_struct *vma, unsigned long addr,
+ bool *new_page_allocated)
+ {
+- struct page *found_page, *new_page = NULL;
+- struct address_space *swapper_space = swap_address_space(entry);
++ struct page *found_page = NULL, *new_page = NULL;
++ struct swap_info_struct *si;
+ int err;
+ *new_page_allocated = false;
+
+@@ -365,7 +370,12 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
+ * called after lookup_swap_cache() failed, re-calling
+ * that would confuse statistics.
+ */
+- found_page = find_get_page(swapper_space, swp_offset(entry));
++ si = get_swap_device(entry);
++ if (!si)
++ break;
++ found_page = find_get_page(swap_address_space(entry),
++ swp_offset(entry));
++ put_swap_device(si);
+ if (found_page)
+ break;
+
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 596ac98051c5..dbab16ddefa6 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -1079,12 +1079,11 @@ fail:
+ static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
+ {
+ struct swap_info_struct *p;
+- unsigned long offset, type;
++ unsigned long offset;
+
+ if (!entry.val)
+ goto out;
+- type = swp_type(entry);
+- p = swap_type_to_swap_info(type);
++ p = swp_swap_info(entry);
+ if (!p)
+ goto bad_nofile;
+ if (!(p->flags & SWP_USED))
+@@ -1187,6 +1186,69 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
+ return usage;
+ }
+
++/*
++ * Check whether swap entry is valid in the swap device. If so,
++ * return pointer to swap_info_struct, and keep the swap entry valid
++ * via preventing the swap device from being swapoff, until
++ * put_swap_device() is called. Otherwise return NULL.
++ *
++ * The entirety of the RCU read critical section must come before the
++ * return from or after the call to synchronize_rcu() in
++ * enable_swap_info() or swapoff(). So if "si->flags & SWP_VALID" is
++ * true, the si->map, si->cluster_info, etc. must be valid in the
++ * critical section.
++ *
++ * Notice that swapoff or swapoff+swapon can still happen before the
++ * rcu_read_lock() in get_swap_device() or after the rcu_read_unlock()
++ * in put_swap_device() if there isn't any other way to prevent
++ * swapoff, such as page lock, page table lock, etc. The caller must
++ * be prepared for that. For example, the following situation is
++ * possible.
++ *
++ * CPU1 CPU2
++ * do_swap_page()
++ * ... swapoff+swapon
++ * __read_swap_cache_async()
++ * swapcache_prepare()
++ * __swap_duplicate()
++ * // check swap_map
++ * // verify PTE not changed
++ *
++ * In __swap_duplicate(), the swap_map need to be checked before
++ * changing partly because the specified swap entry may be for another
++ * swap device which has been swapoff. And in do_swap_page(), after
++ * the page is read from the swap device, the PTE is verified not
++ * changed with the page table locked to check whether the swap device
++ * has been swapoff or swapoff+swapon.
++ */
++struct swap_info_struct *get_swap_device(swp_entry_t entry)
++{
++ struct swap_info_struct *si;
++ unsigned long offset;
++
++ if (!entry.val)
++ goto out;
++ si = swp_swap_info(entry);
++ if (!si)
++ goto bad_nofile;
++
++ rcu_read_lock();
++ if (!(si->flags & SWP_VALID))
++ goto unlock_out;
++ offset = swp_offset(entry);
++ if (offset >= si->max)
++ goto unlock_out;
++
++ return si;
++bad_nofile:
++ pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
++out:
++ return NULL;
++unlock_out:
++ rcu_read_unlock();
++ return NULL;
++}
++
+ static unsigned char __swap_entry_free(struct swap_info_struct *p,
+ swp_entry_t entry, unsigned char usage)
+ {
+@@ -1358,11 +1420,18 @@ int page_swapcount(struct page *page)
+ return count;
+ }
+
+-int __swap_count(struct swap_info_struct *si, swp_entry_t entry)
++int __swap_count(swp_entry_t entry)
+ {
++ struct swap_info_struct *si;
+ pgoff_t offset = swp_offset(entry);
++ int count = 0;
+
+- return swap_count(si->swap_map[offset]);
++ si = get_swap_device(entry);
++ if (si) {
++ count = swap_count(si->swap_map[offset]);
++ put_swap_device(si);
++ }
++ return count;
+ }
+
+ static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
+@@ -1387,9 +1456,11 @@ int __swp_swapcount(swp_entry_t entry)
+ int count = 0;
+ struct swap_info_struct *si;
+
+- si = __swap_info_get(entry);
+- if (si)
++ si = get_swap_device(entry);
++ if (si) {
+ count = swap_swapcount(si, entry);
++ put_swap_device(si);
++ }
+ return count;
+ }
+
+@@ -2335,9 +2406,9 @@ static int swap_node(struct swap_info_struct *p)
+ return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
+ }
+
+-static void _enable_swap_info(struct swap_info_struct *p, int prio,
+- unsigned char *swap_map,
+- struct swap_cluster_info *cluster_info)
++static void setup_swap_info(struct swap_info_struct *p, int prio,
++ unsigned char *swap_map,
++ struct swap_cluster_info *cluster_info)
+ {
+ int i;
+
+@@ -2362,7 +2433,11 @@ static void _enable_swap_info(struct swap_info_struct *p, int prio,
+ }
+ p->swap_map = swap_map;
+ p->cluster_info = cluster_info;
+- p->flags |= SWP_WRITEOK;
++}
++
++static void _enable_swap_info(struct swap_info_struct *p)
++{
++ p->flags |= SWP_WRITEOK | SWP_VALID;
+ atomic_long_add(p->pages, &nr_swap_pages);
+ total_swap_pages += p->pages;
+
+@@ -2389,7 +2464,17 @@ static void enable_swap_info(struct swap_info_struct *p, int prio,
+ frontswap_init(p->type, frontswap_map);
+ spin_lock(&swap_lock);
+ spin_lock(&p->lock);
+- _enable_swap_info(p, prio, swap_map, cluster_info);
++ setup_swap_info(p, prio, swap_map, cluster_info);
++ spin_unlock(&p->lock);
++ spin_unlock(&swap_lock);
++ /*
++ * Guarantee swap_map, cluster_info, etc. fields are valid
++ * between get/put_swap_device() if SWP_VALID bit is set
++ */
++ synchronize_rcu();
++ spin_lock(&swap_lock);
++ spin_lock(&p->lock);
++ _enable_swap_info(p);
+ spin_unlock(&p->lock);
+ spin_unlock(&swap_lock);
+ }
+@@ -2398,7 +2483,8 @@ static void reinsert_swap_info(struct swap_info_struct *p)
+ {
+ spin_lock(&swap_lock);
+ spin_lock(&p->lock);
+- _enable_swap_info(p, p->prio, p->swap_map, p->cluster_info);
++ setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
++ _enable_swap_info(p);
+ spin_unlock(&p->lock);
+ spin_unlock(&swap_lock);
+ }
+@@ -2501,6 +2587,17 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
+
+ reenable_swap_slots_cache_unlock();
+
++ spin_lock(&swap_lock);
++ spin_lock(&p->lock);
++ p->flags &= ~SWP_VALID; /* mark swap device as invalid */
++ spin_unlock(&p->lock);
++ spin_unlock(&swap_lock);
++ /*
++ * wait for swap operations protected by get/put_swap_device()
++ * to complete
++ */
++ synchronize_rcu();
++
+ flush_work(&p->discard_work);
+
+ destroy_swap_extents(p);
+@@ -3265,17 +3362,11 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
+ unsigned char has_cache;
+ int err = -EINVAL;
+
+- if (non_swap_entry(entry))
+- goto out;
+-
+- p = swp_swap_info(entry);
++ p = get_swap_device(entry);
+ if (!p)
+- goto bad_file;
+-
+- offset = swp_offset(entry);
+- if (unlikely(offset >= p->max))
+ goto out;
+
++ offset = swp_offset(entry);
+ ci = lock_cluster_or_swap_info(p, offset);
+
+ count = p->swap_map[offset];
+@@ -3321,11 +3412,9 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
+ unlock_out:
+ unlock_cluster_or_swap_info(p, ci);
+ out:
++ if (p)
++ put_swap_device(p);
+ return err;
+-
+-bad_file:
+- pr_err("swap_dup: %s%08lx\n", Bad_file, entry.val);
+- goto out;
+ }
+
+ /*
+@@ -3417,6 +3506,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
+ struct page *list_page;
+ pgoff_t offset;
+ unsigned char count;
++ int ret = 0;
+
+ /*
+ * When debugging, it's easier to use __GFP_ZERO here; but it's better
+@@ -3424,15 +3514,15 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
+ */
+ page = alloc_page(gfp_mask | __GFP_HIGHMEM);
+
+- si = swap_info_get(entry);
++ si = get_swap_device(entry);
+ if (!si) {
+ /*
+ * An acceptable race has occurred since the failing
+- * __swap_duplicate(): the swap entry has been freed,
+- * perhaps even the whole swap_map cleared for swapoff.
++ * __swap_duplicate(): the swap device may be swapoff
+ */
+ goto outer;
+ }
++ spin_lock(&si->lock);
+
+ offset = swp_offset(entry);
+
+@@ -3450,9 +3540,8 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
+ }
+
+ if (!page) {
+- unlock_cluster(ci);
+- spin_unlock(&si->lock);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto out;
+ }
+
+ /*
+@@ -3504,10 +3593,11 @@ out_unlock_cont:
+ out:
+ unlock_cluster(ci);
+ spin_unlock(&si->lock);
++ put_swap_device(si);
+ outer:
+ if (page)
+ __free_page(page);
+- return 0;
++ return ret;
+ }
+
+ /*
+diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
+index 46bce8389066..9db455d02255 100644
+--- a/net/rds/rdma_transport.c
++++ b/net/rds/rdma_transport.c
+@@ -112,7 +112,9 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
+ if (!conn)
+ break;
+ err = (int *)rdma_consumer_reject_data(cm_id, event, &len);
+- if (!err || (err && ((*err) == RDS_RDMA_REJ_INCOMPAT))) {
++ if (!err ||
++ (err && len >= sizeof(*err) &&
++ ((*err) <= RDS_RDMA_REJ_INCOMPAT))) {
+ pr_warn("RDS/RDMA: conn <%pI6c, %pI6c> rejected, dropping connection\n",
+ &conn->c_laddr, &conn->c_faddr);
+ conn->c_proposed_version = RDS_PROTOCOL_COMPAT_VERSION;
+@@ -122,7 +124,6 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
+ rdsdebug("Connection rejected: %s\n",
+ rdma_reject_msg(cm_id, event->status));
+ break;
+- /* FALLTHROUGH */
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
+index facbd603adf6..9ba47b0a47b9 100644
+--- a/scripts/basic/fixdep.c
++++ b/scripts/basic/fixdep.c
+@@ -99,6 +99,7 @@
+ #include <unistd.h>
+ #include <fcntl.h>
+ #include <string.h>
++#include <stdarg.h>
+ #include <stdlib.h>
+ #include <stdio.h>
+ #include <ctype.h>
+@@ -109,6 +110,36 @@ static void usage(void)
+ exit(1);
+ }
+
++/*
++ * In the intended usage of this program, the stdout is redirected to .*.cmd
++ * files. The return value of printf() and putchar() must be checked to catch
++ * any error, e.g. "No space left on device".
++ */
++static void xprintf(const char *format, ...)
++{
++ va_list ap;
++ int ret;
++
++ va_start(ap, format);
++ ret = vprintf(format, ap);
++ if (ret < 0) {
++ perror("fixdep");
++ exit(1);
++ }
++ va_end(ap);
++}
++
++static void xputchar(int c)
++{
++ int ret;
++
++ ret = putchar(c);
++ if (ret == EOF) {
++ perror("fixdep");
++ exit(1);
++ }
++}
++
+ /*
+ * Print out a dependency path from a symbol name
+ */
+@@ -116,7 +147,7 @@ static void print_dep(const char *m, int slen, const char *dir)
+ {
+ int c, prev_c = '/', i;
+
+- printf(" $(wildcard %s/", dir);
++ xprintf(" $(wildcard %s/", dir);
+ for (i = 0; i < slen; i++) {
+ c = m[i];
+ if (c == '_')
+@@ -124,10 +155,10 @@ static void print_dep(const char *m, int slen, const char *dir)
+ else
+ c = tolower(c);
+ if (c != '/' || prev_c != '/')
+- putchar(c);
++ xputchar(c);
+ prev_c = c;
+ }
+- printf(".h) \\\n");
++ xprintf(".h) \\\n");
+ }
+
+ struct item {
+@@ -324,13 +355,13 @@ static void parse_dep_file(char *m, const char *target)
+ */
+ if (!saw_any_target) {
+ saw_any_target = 1;
+- printf("source_%s := %s\n\n",
+- target, m);
+- printf("deps_%s := \\\n", target);
++ xprintf("source_%s := %s\n\n",
++ target, m);
++ xprintf("deps_%s := \\\n", target);
+ }
+ is_first_dep = 0;
+ } else {
+- printf(" %s \\\n", m);
++ xprintf(" %s \\\n", m);
+ }
+
+ buf = read_file(m);
+@@ -353,8 +384,8 @@ static void parse_dep_file(char *m, const char *target)
+ exit(1);
+ }
+
+- printf("\n%s: $(deps_%s)\n\n", target, target);
+- printf("$(deps_%s):\n", target);
++ xprintf("\n%s: $(deps_%s)\n\n", target, target);
++ xprintf("$(deps_%s):\n", target);
+ }
+
+ int main(int argc, char *argv[])
+@@ -369,7 +400,7 @@ int main(int argc, char *argv[])
+ target = argv[2];
+ cmdline = argv[3];
+
+- printf("cmd_%s := %s\n\n", target, cmdline);
++ xprintf("cmd_%s := %s\n\n", target, cmdline);
+
+ buf = read_file(depfile);
+ parse_dep_file(buf, target);
+diff --git a/scripts/genksyms/keywords.c b/scripts/genksyms/keywords.c
+index e93336baaaed..c586d32dd2c3 100644
+--- a/scripts/genksyms/keywords.c
++++ b/scripts/genksyms/keywords.c
+@@ -25,6 +25,10 @@ static struct resword {
+ { "__volatile__", VOLATILE_KEYW },
+ { "__builtin_va_list", VA_LIST_KEYW },
+
++ { "__int128", BUILTIN_INT_KEYW },
++ { "__int128_t", BUILTIN_INT_KEYW },
++ { "__uint128_t", BUILTIN_INT_KEYW },
++
+ // According to rth, c99 defines "_Bool", __restrict", __restrict__", "restrict". KAO
+ { "_Bool", BOOL_KEYW },
+ { "_restrict", RESTRICT_KEYW },
+diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y
+index 00a6d7e54971..1ebcf52cd0f9 100644
+--- a/scripts/genksyms/parse.y
++++ b/scripts/genksyms/parse.y
+@@ -76,6 +76,7 @@ static void record_compound(struct string_list **keyw,
+ %token ATTRIBUTE_KEYW
+ %token AUTO_KEYW
+ %token BOOL_KEYW
++%token BUILTIN_INT_KEYW
+ %token CHAR_KEYW
+ %token CONST_KEYW
+ %token DOUBLE_KEYW
+@@ -263,6 +264,7 @@ simple_type_specifier:
+ | VOID_KEYW
+ | BOOL_KEYW
+ | VA_LIST_KEYW
++ | BUILTIN_INT_KEYW
+ | TYPE { (*$1)->tag = SYM_TYPEDEF; $$ = $1; }
+ ;
+
+diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
+index e17837f1d3f2..ae6504d07fd6 100644
+--- a/scripts/kallsyms.c
++++ b/scripts/kallsyms.c
+@@ -150,6 +150,9 @@ static int read_symbol(FILE *in, struct sym_entry *s)
+ /* exclude debugging symbols */
+ else if (stype == 'N' || stype == 'n')
+ return -1;
++ /* exclude s390 kasan local symbols */
++ else if (!strncmp(sym, ".LASANPC", 8))
++ return -1;
+
+ /* include the type field in the symbol name, so that it gets
+ * compressed together */
+diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
+index 13c5e6c8829c..47fca2c69a73 100644
+--- a/scripts/recordmcount.h
++++ b/scripts/recordmcount.h
+@@ -325,7 +325,8 @@ static uint_t *sift_rel_mcount(uint_t *mlocp,
+ if (!mcountsym)
+ mcountsym = get_mcountsym(sym0, relp, str0);
+
+- if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
++ if (mcountsym && mcountsym == Elf_r_sym(relp) &&
++ !is_fake_mcount(relp)) {
+ uint_t const addend =
+ _w(_w(relp->r_offset) - recval + mcount_adjust);
+ mrelp->r_offset = _w(offbase
+diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
+index c6cb2d9b2905..107176069af3 100644
+--- a/security/Kconfig.hardening
++++ b/security/Kconfig.hardening
+@@ -61,6 +61,7 @@ choice
+ config GCC_PLUGIN_STRUCTLEAK_BYREF
+ bool "zero-init structs passed by reference (strong)"
+ depends on GCC_PLUGINS
++ depends on !(KASAN && KASAN_STACK=1)
+ select GCC_PLUGIN_STRUCTLEAK
+ help
+ Zero-initialize any structures on the stack that may
+@@ -70,9 +71,15 @@ choice
+ exposures, like CVE-2017-1000410:
+ https://git.kernel.org/linus/06e7e776ca4d3654
+
++ As a side-effect, this keeps a lot of variables on the
++ stack that can otherwise be optimized out, so combining
++ this with CONFIG_KASAN_STACK can lead to a stack overflow
++ and is disallowed.
++
+ config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
+ bool "zero-init anything passed by reference (very strong)"
+ depends on GCC_PLUGINS
++ depends on !(KASAN && KASAN_STACK=1)
+ select GCC_PLUGIN_STRUCTLEAK
+ help
+ Zero-initialize any stack variables that may be passed
+diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
+index e63a90ff2728..1f0a6eaa2d6a 100644
+--- a/security/selinux/ss/sidtab.c
++++ b/security/selinux/ss/sidtab.c
+@@ -286,6 +286,11 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
+ ++count;
+ }
+
++ /* bail out if we already reached max entries */
++ rc = -EOVERFLOW;
++ if (count >= SIDTAB_MAX)
++ goto out_unlock;
++
+ /* insert context into new entry */
+ rc = -ENOMEM;
+ dst = sidtab_do_lookup(s, count, 1);
+diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c
+index 7b977b753a03..7985dd8198b6 100644
+--- a/sound/ac97/bus.c
++++ b/sound/ac97/bus.c
+@@ -122,17 +122,12 @@ static int ac97_codec_add(struct ac97_controller *ac97_ctrl, int idx,
+ vendor_id);
+
+ ret = device_add(&codec->dev);
+- if (ret)
+- goto err_free_codec;
++ if (ret) {
++ put_device(&codec->dev);
++ return ret;
++ }
+
+ return 0;
+-err_free_codec:
+- of_node_put(codec->dev.of_node);
+- put_device(&codec->dev);
+- kfree(codec);
+- ac97_ctrl->codecs[idx] = NULL;
+-
+- return ret;
+ }
+
+ unsigned int snd_ac97_bus_scan_one(struct ac97_controller *adrv,
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 860543a4c840..12dd9b318db1 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -77,7 +77,7 @@ void snd_pcm_group_init(struct snd_pcm_group *group)
+ spin_lock_init(&group->lock);
+ mutex_init(&group->mutex);
+ INIT_LIST_HEAD(&group->substreams);
+- refcount_set(&group->refs, 0);
++ refcount_set(&group->refs, 1);
+ }
+
+ /* define group lock helpers */
+@@ -1096,8 +1096,7 @@ static void snd_pcm_group_unref(struct snd_pcm_group *group,
+
+ if (!group)
+ return;
+- do_free = refcount_dec_and_test(&group->refs) &&
+- list_empty(&group->substreams);
++ do_free = refcount_dec_and_test(&group->refs);
+ snd_pcm_group_unlock(group, substream->pcm->nonatomic);
+ if (do_free)
+ kfree(group);
+@@ -2020,6 +2019,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
+ snd_pcm_group_lock_irq(target_group, nonatomic);
+ snd_pcm_stream_lock(substream1);
+ snd_pcm_group_assign(substream1, target_group);
++ refcount_inc(&target_group->refs);
+ snd_pcm_stream_unlock(substream1);
+ snd_pcm_group_unlock_irq(target_group, nonatomic);
+ _end:
+@@ -2056,13 +2056,14 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
+ snd_pcm_group_lock_irq(group, nonatomic);
+
+ relink_to_local(substream);
++ refcount_dec(&group->refs);
+
+ /* detach the last stream, too */
+ if (list_is_singular(&group->substreams)) {
+ relink_to_local(list_first_entry(&group->substreams,
+ struct snd_pcm_substream,
+ link_list));
+- do_free = !refcount_read(&group->refs);
++ do_free = refcount_dec_and_test(&group->refs);
+ }
+
+ snd_pcm_group_unlock_irq(group, nonatomic);
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 50f86f458918..d438c450f04d 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -313,11 +313,10 @@ enum {
+
+ #define AZX_DCAPS_INTEL_SKYLAKE \
+ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
++ AZX_DCAPS_SYNC_WRITE |\
+ AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT)
+
+-#define AZX_DCAPS_INTEL_BROXTON \
+- (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
+- AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT)
++#define AZX_DCAPS_INTEL_BROXTON AZX_DCAPS_INTEL_SKYLAKE
+
+ /* quirks for ATI SB / AMD Hudson */
+ #define AZX_DCAPS_PRESET_ATI_SB \
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 4f8d0845ee1e..f299f137eaea 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -1083,6 +1083,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
+ */
+
+ static const struct hda_device_id snd_hda_id_conexant[] = {
++ HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
+ HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
+ HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
+ HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
+diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
+index 77a1d55334bb..53b53a9a4c6f 100644
+--- a/sound/usb/line6/podhd.c
++++ b/sound/usb/line6/podhd.c
+@@ -413,7 +413,7 @@ static const struct line6_properties podhd_properties_table[] = {
+ .name = "POD HD500",
+ .capabilities = LINE6_CAP_PCM
+ | LINE6_CAP_HWMON,
+- .altsetting = 1,
++ .altsetting = 0,
+ .ep_ctrl_r = 0x81,
+ .ep_ctrl_w = 0x01,
+ .ep_audio_r = 0x86,
+diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
+index a22b6e8fad46..7399eb7f1378 100644
+--- a/tools/iio/iio_utils.c
++++ b/tools/iio/iio_utils.c
+@@ -156,9 +156,9 @@ int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
+ *be = (endianchar == 'b');
+ *bytes = padint / 8;
+ if (*bits_used == 64)
+- *mask = ~0;
++ *mask = ~(0ULL);
+ else
+- *mask = (1ULL << *bits_used) - 1;
++ *mask = (1ULL << *bits_used) - 1ULL;
+
+ *is_signed = (signchar == 's');
+ if (fclose(sysfsfp)) {
+diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
+index cb7a47dfd8b6..49ddfa6f5a8c 100644
+--- a/tools/pci/pcitest.c
++++ b/tools/pci/pcitest.c
+@@ -36,15 +36,15 @@ struct pci_test {
+ unsigned long size;
+ };
+
+-static void run_test(struct pci_test *test)
++static int run_test(struct pci_test *test)
+ {
+- long ret;
++ int ret = -EINVAL;
+ int fd;
+
+ fd = open(test->device, O_RDWR);
+ if (fd < 0) {
+ perror("can't open PCI Endpoint Test device");
+- return;
++ return -ENODEV;
+ }
+
+ if (test->barnum >= 0 && test->barnum <= 5) {
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 1ae66f09dc7d..e28002d90573 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -1276,8 +1276,8 @@ static int add_default_attributes(void)
+ fprintf(stderr,
+ "Cannot set up top down events %s: %d\n",
+ str, err);
+- free(str);
+ parse_events_print_error(&errinfo, str);
++ free(str);
+ return -1;
+ }
+ } else {
+diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
+index 466621cd1017..8a9ff4b11df0 100644
+--- a/tools/perf/builtin-top.c
++++ b/tools/perf/builtin-top.c
+@@ -100,7 +100,7 @@ static void perf_top__resize(struct perf_top *top)
+
+ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
+ {
+- struct perf_evsel *evsel = hists_to_evsel(he->hists);
++ struct perf_evsel *evsel;
+ struct symbol *sym;
+ struct annotation *notes;
+ struct map *map;
+@@ -109,6 +109,8 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
+ if (!he || !he->ms.sym)
+ return -1;
+
++ evsel = hists_to_evsel(he->hists);
++
+ sym = he->ms.sym;
+ map = he->ms.map;
+
+@@ -225,7 +227,7 @@ static void perf_top__record_precise_ip(struct perf_top *top,
+ static void perf_top__show_details(struct perf_top *top)
+ {
+ struct hist_entry *he = top->sym_filter_entry;
+- struct perf_evsel *evsel = hists_to_evsel(he->hists);
++ struct perf_evsel *evsel;
+ struct annotation *notes;
+ struct symbol *symbol;
+ int more;
+@@ -233,6 +235,8 @@ static void perf_top__show_details(struct perf_top *top)
+ if (!he)
+ return;
+
++ evsel = hists_to_evsel(he->hists);
++
+ symbol = he->ms.sym;
+ notes = symbol__annotation(symbol);
+
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index 52fadc858ef0..909e68545bb8 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -997,10 +997,10 @@ static struct thread_trace *thread_trace__new(void)
+ {
+ struct thread_trace *ttrace = zalloc(sizeof(struct thread_trace));
+
+- if (ttrace)
++ if (ttrace) {
+ ttrace->files.max = -1;
+-
+- ttrace->syscall_stats = intlist__new(NULL);
++ ttrace->syscall_stats = intlist__new(NULL);
++ }
+
+ return ttrace;
+ }
+diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
+index ba87e6e8d18c..0a4301a5155c 100644
+--- a/tools/perf/tests/mmap-thread-lookup.c
++++ b/tools/perf/tests/mmap-thread-lookup.c
+@@ -53,7 +53,7 @@ static void *thread_fn(void *arg)
+ {
+ struct thread_data *td = arg;
+ ssize_t ret;
+- int go;
++ int go = 0;
+
+ if (thread_init(td))
+ return NULL;
+diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
+index 3421ecbdd3f0..c1dd9b54dc6e 100644
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -638,7 +638,11 @@ int hist_browser__run(struct hist_browser *browser, const char *help,
+ switch (key) {
+ case K_TIMER: {
+ u64 nr_entries;
+- hbt->timer(hbt->arg);
++
++ WARN_ON_ONCE(!hbt);
++
++ if (hbt)
++ hbt->timer(hbt->arg);
+
+ if (hist_browser__has_filter(browser) ||
+ symbol_conf.report_hierarchy)
+@@ -2819,7 +2823,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
+ {
+ struct hists *hists = evsel__hists(evsel);
+ struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env, annotation_opts);
+- struct branch_info *bi;
++ struct branch_info *bi = NULL;
+ #define MAX_OPTIONS 16
+ char *options[MAX_OPTIONS];
+ struct popup_action actions[MAX_OPTIONS];
+@@ -3085,7 +3089,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
+ goto skip_annotation;
+
+ if (sort__mode == SORT_MODE__BRANCH) {
+- bi = browser->he_selection->branch_info;
++
++ if (browser->he_selection)
++ bi = browser->he_selection->branch_info;
+
+ if (bi == NULL)
+ goto skip_annotation;
+@@ -3269,7 +3275,8 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
+
+ switch (key) {
+ case K_TIMER:
+- hbt->timer(hbt->arg);
++ if (hbt)
++ hbt->timer(hbt->arg);
+
+ if (!menu->lost_events_warned &&
+ menu->lost_events &&
+diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
+index c8ce13419d9b..b8dfcfe08bb1 100644
+--- a/tools/perf/util/annotate.c
++++ b/tools/perf/util/annotate.c
+@@ -1113,16 +1113,14 @@ static int disasm_line__parse(char *line, const char **namep, char **rawp)
+ *namep = strdup(name);
+
+ if (*namep == NULL)
+- goto out_free_name;
++ goto out;
+
+ (*rawp)[0] = tmp;
+ *rawp = ltrim(*rawp);
+
+ return 0;
+
+-out_free_name:
+- free((void *)namep);
+- *namep = NULL;
++out:
+ return -1;
+ }
+
+diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
+index e32dbffebb2f..625ad3639a7e 100644
+--- a/tools/perf/util/intel-bts.c
++++ b/tools/perf/util/intel-bts.c
+@@ -891,13 +891,12 @@ int intel_bts_process_auxtrace_info(union perf_event *event,
+ if (dump_trace)
+ return 0;
+
+- if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
++ if (session->itrace_synth_opts->set) {
+ bts->synth_opts = *session->itrace_synth_opts;
+ } else {
+ itrace_synth_opts__set_default(&bts->synth_opts,
+ session->itrace_synth_opts->default_no_sample);
+- if (session->itrace_synth_opts)
+- bts->synth_opts.thread_stack =
++ bts->synth_opts.thread_stack =
+ session->itrace_synth_opts->thread_stack;
+ }
+
+diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
+index ee71efb9db62..9c81ee092784 100644
+--- a/tools/perf/util/map.c
++++ b/tools/perf/util/map.c
+@@ -470,8 +470,11 @@ int map__fprintf_srccode(struct map *map, u64 addr,
+ goto out_free_line;
+
+ ret = fprintf(fp, "|%-8d %.*s", line, len, srccode);
+- state->srcfile = srcfile;
+- state->line = line;
++
++ if (state) {
++ state->srcfile = srcfile;
++ state->line = line;
++ }
+ return ret;
+
+ out_free_line:
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 54cf163347f7..2e61dd6a3574 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -1249,6 +1249,9 @@ static void dump_read(struct perf_evsel *evsel, union perf_event *event)
+ evsel ? perf_evsel__name(evsel) : "FAIL",
+ event->read.value);
+
++ if (!evsel)
++ return;
++
+ read_format = evsel->attr.read_format;
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+diff --git a/tools/testing/selftests/rseq/rseq-arm.h b/tools/testing/selftests/rseq/rseq-arm.h
+index 84f28f147fb6..5943c816c07c 100644
+--- a/tools/testing/selftests/rseq/rseq-arm.h
++++ b/tools/testing/selftests/rseq/rseq-arm.h
+@@ -6,6 +6,8 @@
+ */
+
+ /*
++ * - ARM little endian
++ *
+ * RSEQ_SIG uses the udf A32 instruction with an uncommon immediate operand
+ * value 0x5de3. This traps if user-space reaches this instruction by mistake,
+ * and the uncommon operand ensures the kernel does not move the instruction
+@@ -22,36 +24,40 @@
+ * def3 udf #243 ; 0xf3
+ * e7f5 b.n <7f5>
+ *
+- * pre-ARMv6 big endian code:
+- * e7f5 b.n <7f5>
+- * def3 udf #243 ; 0xf3
++ * - ARMv6+ big endian (BE8):
+ *
+ * ARMv6+ -mbig-endian generates mixed endianness code vs data: little-endian
+- * code and big-endian data. Ensure the RSEQ_SIG data signature matches code
+- * endianness. Prior to ARMv6, -mbig-endian generates big-endian code and data
+- * (which match), so there is no need to reverse the endianness of the data
+- * representation of the signature. However, the choice between BE32 and BE8
+- * is done by the linker, so we cannot know whether code and data endianness
+- * will be mixed before the linker is invoked.
++ * code and big-endian data. The data value of the signature needs to have its
++ * byte order reversed to generate the trap instruction:
++ *
++ * Data: 0xf3def5e7
++ *
++ * Translates to this A32 instruction pattern:
++ *
++ * e7f5def3 udf #24035 ; 0x5de3
++ *
++ * Translates to this T16 instruction pattern:
++ *
++ * def3 udf #243 ; 0xf3
++ * e7f5 b.n <7f5>
++ *
++ * - Prior to ARMv6 big endian (BE32):
++ *
++ * Prior to ARMv6, -mbig-endian generates big-endian code and data
++ * (which match), so the endianness of the data representation of the
++ * signature should not be reversed. However, the choice between BE32
++ * and BE8 is done by the linker, so we cannot know whether code and
++ * data endianness will be mixed before the linker is invoked. So rather
++ * than try to play tricks with the linker, the rseq signature is simply
++ * data (not a trap instruction) prior to ARMv6 on big endian. This is
++ * why the signature is expressed as data (.word) rather than as
++ * instruction (.inst) in assembler.
+ */
+
+-#define RSEQ_SIG_CODE 0xe7f5def3
+-
+-#ifndef __ASSEMBLER__
+-
+-#define RSEQ_SIG_DATA \
+- ({ \
+- int sig; \
+- asm volatile ("b 2f\n\t" \
+- "1: .inst " __rseq_str(RSEQ_SIG_CODE) "\n\t" \
+- "2:\n\t" \
+- "ldr %[sig], 1b\n\t" \
+- : [sig] "=r" (sig)); \
+- sig; \
+- })
+-
+-#define RSEQ_SIG RSEQ_SIG_DATA
+-
++#ifdef __ARMEB__
++#define RSEQ_SIG 0xf3def5e7 /* udf #24035 ; 0x5de3 (ARMv6+) */
++#else
++#define RSEQ_SIG 0xe7f5def3 /* udf #24035 ; 0x5de3 */
+ #endif
+
+ #define rseq_smp_mb() __asm__ __volatile__ ("dmb" ::: "memory", "cc")
+@@ -125,8 +131,7 @@ do { \
+ __rseq_str(table_label) ":\n\t" \
+ ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
+- ".arm\n\t" \
+- ".inst " __rseq_str(RSEQ_SIG_CODE) "\n\t" \
++ ".word " __rseq_str(RSEQ_SIG) "\n\t" \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "b %l[" __rseq_str(abort_label) "]\n\t"
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-07-28 16:23 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-07-28 16:23 UTC (permalink / raw
To: gentoo-commits
commit: 2a0f3a187826d46cc19428c9d4e4817256ecae34
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jul 28 16:23:33 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jul 28 16:23:33 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=2a0f3a18
Linux patch 5.2.4
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1003_linux-5.2.4.patch | 2928 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 2932 insertions(+)
diff --git a/0000_README b/0000_README
index 359d69d..ff4bd8b 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch: 1002_linux-5.2.3.patch
From: https://www.kernel.org
Desc: Linux 5.2.3
+Patch: 1003_linux-5.2.4.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.4
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1003_linux-5.2.4.patch b/1003_linux-5.2.4.patch
new file mode 100644
index 0000000..7901701
--- /dev/null
+++ b/1003_linux-5.2.4.patch
@@ -0,0 +1,2928 @@
+diff --git a/Makefile b/Makefile
+index bcb6a2465e21..68ee97784c4d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
+index 071e9d94eea7..daed44ee116d 100644
+--- a/arch/mips/jz4740/board-qi_lb60.c
++++ b/arch/mips/jz4740/board-qi_lb60.c
+@@ -466,27 +466,27 @@ static unsigned long pin_cfg_bias_disable[] = {
+ static struct pinctrl_map pin_map[] __initdata = {
+ /* NAND pin configuration */
+ PIN_MAP_MUX_GROUP_DEFAULT("jz4740-nand",
+- "10010000.jz4740-pinctrl", "nand", "nand-cs1"),
++ "10010000.pin-controller", "nand-cs1", "nand"),
+
+ /* fbdev pin configuration */
+ PIN_MAP_MUX_GROUP("jz4740-fb", PINCTRL_STATE_DEFAULT,
+- "10010000.jz4740-pinctrl", "lcd", "lcd-8bit"),
++ "10010000.pin-controller", "lcd-8bit", "lcd"),
+ PIN_MAP_MUX_GROUP("jz4740-fb", PINCTRL_STATE_SLEEP,
+- "10010000.jz4740-pinctrl", "lcd", "lcd-no-pins"),
++ "10010000.pin-controller", "lcd-no-pins", "lcd"),
+
+ /* MMC pin configuration */
+ PIN_MAP_MUX_GROUP_DEFAULT("jz4740-mmc.0",
+- "10010000.jz4740-pinctrl", "mmc", "mmc-1bit"),
++ "10010000.pin-controller", "mmc-1bit", "mmc"),
+ PIN_MAP_MUX_GROUP_DEFAULT("jz4740-mmc.0",
+- "10010000.jz4740-pinctrl", "mmc", "mmc-4bit"),
++ "10010000.pin-controller", "mmc-4bit", "mmc"),
+ PIN_MAP_CONFIGS_PIN_DEFAULT("jz4740-mmc.0",
+- "10010000.jz4740-pinctrl", "PD0", pin_cfg_bias_disable),
++ "10010000.pin-controller", "PD0", pin_cfg_bias_disable),
+ PIN_MAP_CONFIGS_PIN_DEFAULT("jz4740-mmc.0",
+- "10010000.jz4740-pinctrl", "PD2", pin_cfg_bias_disable),
++ "10010000.pin-controller", "PD2", pin_cfg_bias_disable),
+
+ /* PWM pin configuration */
+ PIN_MAP_MUX_GROUP_DEFAULT("jz4740-pwm",
+- "10010000.jz4740-pinctrl", "pwm4", "pwm4"),
++ "10010000.pin-controller", "pwm4", "pwm4"),
+ };
+
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 26d1eb83f72a..08f46951c430 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -607,15 +607,16 @@ struct kvm_vcpu_arch {
+
+ /*
+ * QEMU userspace and the guest each have their own FPU state.
+- * In vcpu_run, we switch between the user, maintained in the
+- * task_struct struct, and guest FPU contexts. While running a VCPU,
+- * the VCPU thread will have the guest FPU context.
++ * In vcpu_run, we switch between the user and guest FPU contexts.
++ * While running a VCPU, the VCPU thread will have the guest FPU
++ * context.
+ *
+ * Note that while the PKRU state lives inside the fpu registers,
+ * it is switched out separately at VMENTER and VMEXIT time. The
+ * "guest_fpu" state here contains the guest FPU context, with the
+ * host PRKU bits.
+ */
++ struct fpu user_fpu;
+ struct fpu *guest_fpu;
+
+ u64 xcr0;
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 7df4f46499e1..b101127e13b6 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -184,6 +184,7 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
+ {
+ vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
+ vmcs_write64(VMCS_LINK_POINTER, -1ull);
++ vmx->nested.need_vmcs12_sync = false;
+ }
+
+ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
+@@ -209,6 +210,8 @@ static void free_nested(struct kvm_vcpu *vcpu)
+ if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
+ return;
+
++ kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
++
+ vmx->nested.vmxon = false;
+ vmx->nested.smm.vmxon = false;
+ free_vpid(vmx->nested.vpid02);
+@@ -1321,6 +1324,9 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
+ u64 field_value;
+ struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
+
++ if (WARN_ON(!shadow_vmcs))
++ return;
++
+ preempt_disable();
+
+ vmcs_load(shadow_vmcs);
+@@ -1359,6 +1365,9 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
+ u64 field_value = 0;
+ struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
+
++ if (WARN_ON(!shadow_vmcs))
++ return;
++
+ vmcs_load(shadow_vmcs);
+
+ for (q = 0; q < ARRAY_SIZE(fields); q++) {
+@@ -4300,7 +4309,6 @@ static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
+ /* copy to memory all shadowed fields in case
+ they were modified */
+ copy_shadow_to_vmcs12(vmx);
+- vmx->nested.need_vmcs12_sync = false;
+ vmx_disable_shadow_vmcs(vmx);
+ }
+ vmx->nested.posted_intr_nv = -1;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index fafd81d2c9ea..a4eceb0b5dde 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8219,7 +8219,7 @@ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+ {
+ fpregs_lock();
+
+- copy_fpregs_to_fpstate(¤t->thread.fpu);
++ copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
+ /* PKRU is separately restored in kvm_x86_ops->run. */
+ __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
+ ~XFEATURE_MASK_PKRU);
+@@ -8236,7 +8236,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
+ fpregs_lock();
+
+ copy_fpregs_to_fpstate(vcpu->arch.guest_fpu);
+- copy_kernel_to_fpregs(¤t->thread.fpu.state);
++ copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
+
+ fpregs_mark_activate();
+ fpregs_unlock();
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index 3249738242b4..0434e2846028 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -14,6 +14,9 @@
+ #include <linux/rbtree.h>
+ #include <linux/blkdev.h>
+ #include <linux/blk-mq.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <linux/sched/mm.h>
+
+ #include "blk.h"
+
+@@ -373,22 +376,25 @@ static inline unsigned long *blk_alloc_zone_bitmap(int node,
+ * Allocate an array of struct blk_zone to get nr_zones zone information.
+ * The allocated array may be smaller than nr_zones.
+ */
+-static struct blk_zone *blk_alloc_zones(int node, unsigned int *nr_zones)
++static struct blk_zone *blk_alloc_zones(unsigned int *nr_zones)
+ {
+- size_t size = *nr_zones * sizeof(struct blk_zone);
+- struct page *page;
+- int order;
+-
+- for (order = get_order(size); order >= 0; order--) {
+- page = alloc_pages_node(node, GFP_NOIO | __GFP_ZERO, order);
+- if (page) {
+- *nr_zones = min_t(unsigned int, *nr_zones,
+- (PAGE_SIZE << order) / sizeof(struct blk_zone));
+- return page_address(page);
+- }
++ struct blk_zone *zones;
++ size_t nrz = min(*nr_zones, BLK_ZONED_REPORT_MAX_ZONES);
++
++ /*
++ * GFP_KERNEL here is meaningless as the caller task context has
++ * the PF_MEMALLOC_NOIO flag set in blk_revalidate_disk_zones()
++ * with memalloc_noio_save().
++ */
++ zones = kvcalloc(nrz, sizeof(struct blk_zone), GFP_KERNEL);
++ if (!zones) {
++ *nr_zones = 0;
++ return NULL;
+ }
+
+- return NULL;
++ *nr_zones = nrz;
++
++ return zones;
+ }
+
+ void blk_queue_free_zone_bitmaps(struct request_queue *q)
+@@ -415,6 +421,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
+ unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
+ unsigned int i, rep_nr_zones = 0, z = 0, nrz;
+ struct blk_zone *zones = NULL;
++ unsigned int noio_flag;
+ sector_t sector = 0;
+ int ret = 0;
+
+@@ -427,6 +434,12 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
+ return 0;
+ }
+
++ /*
++ * Ensure that all memory allocations in this context are done as
++ * if GFP_NOIO was specified.
++ */
++ noio_flag = memalloc_noio_save();
++
+ if (!blk_queue_is_zoned(q) || !nr_zones) {
+ nr_zones = 0;
+ goto update;
+@@ -443,7 +456,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
+
+ /* Get zone information and initialize seq_zones_bitmap */
+ rep_nr_zones = nr_zones;
+- zones = blk_alloc_zones(q->node, &rep_nr_zones);
++ zones = blk_alloc_zones(&rep_nr_zones);
+ if (!zones)
+ goto out;
+
+@@ -480,8 +493,9 @@ update:
+ blk_mq_unfreeze_queue(q);
+
+ out:
+- free_pages((unsigned long)zones,
+- get_order(rep_nr_zones * sizeof(struct blk_zone)));
++ memalloc_noio_restore(noio_flag);
++
++ kvfree(zones);
+ kfree(seq_zones_wlock);
+ kfree(seq_zones_bitmap);
+
+diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
+index bf4d4c80fbc6..a6fee5a6e9fb 100644
+--- a/drivers/dma-buf/dma-buf.c
++++ b/drivers/dma-buf/dma-buf.c
+@@ -1057,6 +1057,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence),
+ dma_fence_is_signaled(fence) ? "" : "un");
++ dma_fence_put(fence);
+ }
+ rcu_read_unlock();
+
+diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
+index 4d32e2c67862..4447e13d1e89 100644
+--- a/drivers/dma-buf/reservation.c
++++ b/drivers/dma-buf/reservation.c
+@@ -365,6 +365,10 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (!nshared) {
+ rcu_read_unlock();
++
++ dma_fence_put(fence_excl);
++ fence_excl = NULL;
++
+ nshared = krealloc(shared, sz, GFP_KERNEL);
+ if (nshared) {
+ shared = nshared;
+diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
+index 3bbf5804bd11..de4da2ed7955 100644
+--- a/drivers/gpio/gpio-davinci.c
++++ b/drivers/gpio/gpio-davinci.c
+@@ -238,8 +238,9 @@ static int davinci_gpio_probe(struct platform_device *pdev)
+ for (i = 0; i < nirq; i++) {
+ chips->irqs[i] = platform_get_irq(pdev, i);
+ if (chips->irqs[i] < 0) {
+- dev_info(dev, "IRQ not populated, err = %d\n",
+- chips->irqs[i]);
++ if (chips->irqs[i] != -EPROBE_DEFER)
++ dev_info(dev, "IRQ not populated, err = %d\n",
++ chips->irqs[i]);
+ return chips->irqs[i];
+ }
+ }
+diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
+index 9c9b965d7d6d..c9325efc1783 100644
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -118,15 +118,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
+ * Legacy handling of SPI active high chip select. If we have a
+ * property named "cs-gpios" we need to inspect the child node
+ * to determine if the flags should have inverted semantics.
+- *
+- * This does not apply to an SPI device named "spi-gpio", because
+- * these have traditionally obtained their own GPIOs by parsing
+- * the device tree directly and did not respect any "spi-cs-high"
+- * property on the SPI bus children.
+ */
+- if (IS_ENABLED(CONFIG_SPI_MASTER) &&
+- !strcmp(propname, "cs-gpios") &&
+- !of_device_is_compatible(np, "spi-gpio") &&
++ if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") &&
+ of_property_read_bool(np, "cs-gpios")) {
+ struct device_node *child;
+ u32 cs;
+@@ -161,6 +154,7 @@ static void of_gpio_flags_quirks(struct device_node *np,
+ of_node_full_name(child));
+ *flags |= OF_GPIO_ACTIVE_LOW;
+ }
++ of_node_put(child);
+ break;
+ }
+ }
+diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
+index b2f10b6ad6e5..bbb2575d4728 100644
+--- a/drivers/net/caif/caif_hsi.c
++++ b/drivers/net/caif/caif_hsi.c
+@@ -1455,7 +1455,7 @@ static void __exit cfhsi_exit_module(void)
+ rtnl_lock();
+ list_for_each_safe(list_node, n, &cfhsi_list) {
+ cfhsi = list_entry(list_node, struct cfhsi, list);
+- unregister_netdev(cfhsi->ndev);
++ unregister_netdevice(cfhsi->ndev);
+ }
+ rtnl_unlock();
+ }
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 063c7a671b41..2e8b1ab2c6f7 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -4711,6 +4711,8 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
+ err = PTR_ERR(chip->reset);
+ goto out;
+ }
++ if (chip->reset)
++ usleep_range(1000, 2000);
+
+ err = mv88e6xxx_detect(chip);
+ if (err)
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index c12c1bab0fe4..bf39fc83d577 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -285,6 +285,9 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
+ hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
+ sw_cons = txdata->tx_pkt_cons;
+
++ /* Ensure subsequent loads occur after hw_cons */
++ smp_rmb();
++
+ while (sw_cons != hw_cons) {
+ u16 pkt_cons;
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 9090c79387c1..7afae9d80e75 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -3022,7 +3022,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
+ int num_vnics = 1;
+
+ #ifdef CONFIG_RFS_ACCEL
+- if (bp->flags & BNXT_FLAG_RFS)
++ if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
+ num_vnics += bp->rx_nr_rings;
+ #endif
+
+@@ -7133,6 +7133,9 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
+ #ifdef CONFIG_RFS_ACCEL
+ int i, rc = 0;
+
++ if (bp->flags & BNXT_FLAG_CHIP_P5)
++ return 0;
++
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_vnic_info *vnic;
+ u16 vnic_id = i + 1;
+@@ -9592,7 +9595,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
+ return -ENOMEM;
+
+ vnics = 1;
+- if (bp->flags & BNXT_FLAG_RFS)
++ if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
+ vnics += rx_rings;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 41b50e6570ea..2369b4bd63e3 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -3083,39 +3083,42 @@ static void bcmgenet_timeout(struct net_device *dev)
+ netif_tx_wake_all_queues(dev);
+ }
+
+-#define MAX_MC_COUNT 16
++#define MAX_MDF_FILTER 17
+
+ static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
+ unsigned char *addr,
+- int *i,
+- int *mc)
++ int *i)
+ {
+- u32 reg;
+-
+ bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
+ UMAC_MDF_ADDR + (*i * 4));
+ bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
+ addr[4] << 8 | addr[5],
+ UMAC_MDF_ADDR + ((*i + 1) * 4));
+- reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
+- reg |= (1 << (MAX_MC_COUNT - *mc));
+- bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
+ *i += 2;
+- (*mc)++;
+ }
+
+ static void bcmgenet_set_rx_mode(struct net_device *dev)
+ {
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+- int i, mc;
++ int i, nfilter;
+ u32 reg;
+
+ netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
+
+- /* Promiscuous mode */
++ /* Number of filters needed */
++ nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
++
++ /*
++ * Turn on promicuous mode for three scenarios
++ * 1. IFF_PROMISC flag is set
++ * 2. IFF_ALLMULTI flag is set
++ * 3. The number of filters needed exceeds the number filters
++ * supported by the hardware.
++ */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+- if (dev->flags & IFF_PROMISC) {
++ if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
++ (nfilter > MAX_MDF_FILTER)) {
+ reg |= CMD_PROMISC;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
+@@ -3125,32 +3128,24 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ }
+
+- /* UniMac doesn't support ALLMULTI */
+- if (dev->flags & IFF_ALLMULTI) {
+- netdev_warn(dev, "ALLMULTI is not supported\n");
+- return;
+- }
+-
+ /* update MDF filter */
+ i = 0;
+- mc = 0;
+ /* Broadcast */
+- bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
++ bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
+ /* my own address.*/
+- bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
+- /* Unicast list*/
+- if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
+- return;
++ bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
+
+- if (!netdev_uc_empty(dev))
+- netdev_for_each_uc_addr(ha, dev)
+- bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+- /* Multicast */
+- if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
+- return;
++ /* Unicast */
++ netdev_for_each_uc_addr(ha, dev)
++ bcmgenet_set_mdf_addr(priv, ha->addr, &i);
+
++ /* Multicast */
+ netdev_for_each_mc_addr(ha, dev)
+- bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
++ bcmgenet_set_mdf_addr(priv, ha->addr, &i);
++
++ /* Enable filters */
++ reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
++ bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
+ }
+
+ /* Set the hardware MAC address. */
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index fe518c854d1f..c93a6f9b735b 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -4917,6 +4917,13 @@ static const struct dmi_system_id msi_blacklist[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
+ },
+ },
++ {
++ .ident = "ASUS P6T",
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
++ DMI_MATCH(DMI_BOARD_NAME, "P6T"),
++ },
++ },
+ {}
+ };
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+index cc6797e24571..cc227a7aa79f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
+@@ -294,6 +294,7 @@ enum {
+ MLX5E_RQ_STATE_ENABLED,
+ MLX5E_RQ_STATE_AM,
+ MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
++ MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
+ };
+
+ struct mlx5e_cq {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+index 476dd97f7f2f..f3d98748b211 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+@@ -142,22 +142,20 @@ static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq)
+ {
+ struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
+ u32 eqe_count;
+- int ret;
+
+ netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
+ eq->core.eqn, eq->core.cons_index, eq->core.irqn);
+
+ eqe_count = mlx5_eq_poll_irq_disabled(eq);
+- ret = eqe_count ? false : true;
+ if (!eqe_count) {
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+- return ret;
++ return -EIO;
+ }
+
+ netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n",
+ eqe_count, eq->core.eqn);
+ sq->channel->stats->eq_rearm++;
+- return ret;
++ return 0;
+ }
+
+ int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq)
+@@ -264,13 +262,13 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
+
+ err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
+ if (err)
+- break;
++ goto unlock;
+
+ err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq->sqn,
+ state,
+ netif_xmit_stopped(sq->txq));
+ if (err)
+- break;
++ goto unlock;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 8db9fdbc03ea..a44c24280128 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -855,6 +855,9 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
+ if (err)
+ goto err_destroy_rq;
+
++ if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full))
++ __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state);
++
+ if (params->rx_dim_enabled)
+ __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 13133e7f088e..8a5f9411cac6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -873,8 +873,14 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
+ if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
+ goto csum_unnecessary;
+
++ stats->csum_complete++;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
++
++ if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
++ return; /* CQE csum covers all received bytes */
++
++ /* csum might need some fixups ...*/
+ if (network_depth > ETH_HLEN)
+ /* CQE csum is calculated from the IP header and does
+ * not cover VLAN headers (if present). This will add
+@@ -885,7 +891,6 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
+ skb->csum);
+
+ mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
+- stats->csum_complete++;
+ return;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index acab26b88261..535221b5256b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1882,11 +1882,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
+ esw->enabled_vports = 0;
+ esw->mode = SRIOV_NONE;
+ esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
+- if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
+- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
+- esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
+- else
+- esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+
+ dev->priv.eswitch = esw;
+ return 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 47b446d30f71..c2beadc41c40 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -1840,6 +1840,12 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
+ {
+ int err;
+
++ if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
++ MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
++ esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
++ else
++ esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
++
+ err = esw_offloads_steering_init(esw, vf_nvports, total_nvports);
+ if (err)
+ return err;
+@@ -1901,6 +1907,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
+ esw_offloads_devcom_cleanup(esw);
+ esw_offloads_unload_all_reps(esw, num_vfs);
+ esw_offloads_steering_cleanup(esw);
++ esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+ }
+
+ static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+index 9ca492b430d8..603d294757b4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+@@ -698,7 +698,9 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
+
+ prof->init(mdev, netdev, prof, ipriv);
+
+- mlx5e_attach_netdev(epriv);
++ err = mlx5e_attach_netdev(epriv);
++ if (err)
++ goto detach;
+ netif_carrier_off(netdev);
+
+ /* set rdma_netdev func pointers */
+@@ -714,6 +716,11 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
+
+ return 0;
+
++detach:
++ prof->cleanup(epriv);
++ if (ipriv->sub_interface)
++ return err;
++ mlx5e_destroy_mdev_resources(mdev);
+ destroy_ht:
+ mlx5i_pkey_qpn_ht_cleanup(netdev);
+ return err;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
+index be69c1d7941a..48b5c847b642 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
+@@ -98,27 +98,12 @@ static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy,
+ */
+ if (entropy_flags.gre_calc_supported &&
+ reformat_type == MLX5_REFORMAT_TYPE_L2_TO_NVGRE) {
+- /* Other applications may change the global FW entropy
+- * calculations settings. Check that the current entropy value
+- * is the negative of the updated value.
+- */
+- if (entropy_flags.force_enabled &&
+- enable == entropy_flags.gre_calc_enabled) {
+- mlx5_core_warn(tun_entropy->mdev,
+- "Unexpected GRE entropy calc setting - expected %d",
+- !entropy_flags.gre_calc_enabled);
+- return -EOPNOTSUPP;
+- }
+- err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev, enable,
+- entropy_flags.force_supported);
++ if (!entropy_flags.force_supported)
++ return 0;
++ err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev,
++ enable, !enable);
+ if (err)
+ return err;
+- /* if we turn on the entropy we don't need to force it anymore */
+- if (entropy_flags.force_supported && enable) {
+- err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev, 1, 0);
+- if (err)
+- return err;
+- }
+ } else if (entropy_flags.calc_supported) {
+ /* Other applications may change the global FW entropy
+ * calculations settings. Check that the current entropy value
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+index 8601b3041acd..332195d96c62 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+@@ -805,6 +805,7 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_prio_qopt_offload *p);
+
+ /* spectrum_fid.c */
++bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
+ bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid);
+ struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index);
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+index b25048c6c761..21296fa7f7fb 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+@@ -408,14 +408,6 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
+ have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
+ &prio_map);
+
+- if (!have_dscp) {
+- err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+- MLXSW_REG_QPTS_TRUST_STATE_PCP);
+- if (err)
+- netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
+- return err;
+- }
+-
+ mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio,
+ &dscp_map);
+ err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port,
+@@ -432,6 +424,14 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
+ return err;
+ }
+
++ if (!have_dscp) {
++ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
++ MLXSW_REG_QPTS_TRUST_STATE_PCP);
++ if (err)
++ netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
++ return err;
++ }
++
+ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+ MLXSW_REG_QPTS_TRUST_STATE_DSCP);
+ if (err) {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+index 46baf3b44309..8df3cb21baa6 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+@@ -126,6 +126,16 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = {
+ [MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
+ };
+
++bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index)
++{
++ enum mlxsw_sp_fid_type fid_type = MLXSW_SP_FID_TYPE_DUMMY;
++ struct mlxsw_sp_fid_family *fid_family;
++
++ fid_family = mlxsw_sp->fid_core->fid_family_arr[fid_type];
++
++ return fid_family->start_index == fid_index;
++}
++
+ bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid)
+ {
+ return fid->fid_family->lag_vid_valid;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+index 50111f228d77..5ecb45118400 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+@@ -2468,6 +2468,9 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
+ goto just_remove;
+ }
+
++ if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
++ goto just_remove;
++
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
+ if (!mlxsw_sp_port_vlan) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
+@@ -2527,6 +2530,9 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
+ goto just_remove;
+ }
+
++ if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
++ goto just_remove;
++
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
+ if (!mlxsw_sp_port_vlan) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index d06a61f00e78..96637fcbe65d 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5157,6 +5157,143 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
+ /* disable aspm and clock request before access ephy */
+ rtl_hw_aspm_clkreq_enable(tp, false);
+ rtl_ephy_init(tp, e_info_8411_2);
++
++ /* The following Realtek-provided magic fixes an issue with the RX unit
++ * getting confused after the PHY having been powered-down.
++ */
++ r8168_mac_ocp_write(tp, 0xFC28, 0x0000);
++ r8168_mac_ocp_write(tp, 0xFC2A, 0x0000);
++ r8168_mac_ocp_write(tp, 0xFC2C, 0x0000);
++ r8168_mac_ocp_write(tp, 0xFC2E, 0x0000);
++ r8168_mac_ocp_write(tp, 0xFC30, 0x0000);
++ r8168_mac_ocp_write(tp, 0xFC32, 0x0000);
++ r8168_mac_ocp_write(tp, 0xFC34, 0x0000);
++ r8168_mac_ocp_write(tp, 0xFC36, 0x0000);
++ mdelay(3);
++ r8168_mac_ocp_write(tp, 0xFC26, 0x0000);
++
++ r8168_mac_ocp_write(tp, 0xF800, 0xE008);
++ r8168_mac_ocp_write(tp, 0xF802, 0xE00A);
++ r8168_mac_ocp_write(tp, 0xF804, 0xE00C);
++ r8168_mac_ocp_write(tp, 0xF806, 0xE00E);
++ r8168_mac_ocp_write(tp, 0xF808, 0xE027);
++ r8168_mac_ocp_write(tp, 0xF80A, 0xE04F);
++ r8168_mac_ocp_write(tp, 0xF80C, 0xE05E);
++ r8168_mac_ocp_write(tp, 0xF80E, 0xE065);
++ r8168_mac_ocp_write(tp, 0xF810, 0xC602);
++ r8168_mac_ocp_write(tp, 0xF812, 0xBE00);
++ r8168_mac_ocp_write(tp, 0xF814, 0x0000);
++ r8168_mac_ocp_write(tp, 0xF816, 0xC502);
++ r8168_mac_ocp_write(tp, 0xF818, 0xBD00);
++ r8168_mac_ocp_write(tp, 0xF81A, 0x074C);
++ r8168_mac_ocp_write(tp, 0xF81C, 0xC302);
++ r8168_mac_ocp_write(tp, 0xF81E, 0xBB00);
++ r8168_mac_ocp_write(tp, 0xF820, 0x080A);
++ r8168_mac_ocp_write(tp, 0xF822, 0x6420);
++ r8168_mac_ocp_write(tp, 0xF824, 0x48C2);
++ r8168_mac_ocp_write(tp, 0xF826, 0x8C20);
++ r8168_mac_ocp_write(tp, 0xF828, 0xC516);
++ r8168_mac_ocp_write(tp, 0xF82A, 0x64A4);
++ r8168_mac_ocp_write(tp, 0xF82C, 0x49C0);
++ r8168_mac_ocp_write(tp, 0xF82E, 0xF009);
++ r8168_mac_ocp_write(tp, 0xF830, 0x74A2);
++ r8168_mac_ocp_write(tp, 0xF832, 0x8CA5);
++ r8168_mac_ocp_write(tp, 0xF834, 0x74A0);
++ r8168_mac_ocp_write(tp, 0xF836, 0xC50E);
++ r8168_mac_ocp_write(tp, 0xF838, 0x9CA2);
++ r8168_mac_ocp_write(tp, 0xF83A, 0x1C11);
++ r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0);
++ r8168_mac_ocp_write(tp, 0xF83E, 0xE006);
++ r8168_mac_ocp_write(tp, 0xF840, 0x74F8);
++ r8168_mac_ocp_write(tp, 0xF842, 0x48C4);
++ r8168_mac_ocp_write(tp, 0xF844, 0x8CF8);
++ r8168_mac_ocp_write(tp, 0xF846, 0xC404);
++ r8168_mac_ocp_write(tp, 0xF848, 0xBC00);
++ r8168_mac_ocp_write(tp, 0xF84A, 0xC403);
++ r8168_mac_ocp_write(tp, 0xF84C, 0xBC00);
++ r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2);
++ r8168_mac_ocp_write(tp, 0xF850, 0x0C0A);
++ r8168_mac_ocp_write(tp, 0xF852, 0xE434);
++ r8168_mac_ocp_write(tp, 0xF854, 0xD3C0);
++ r8168_mac_ocp_write(tp, 0xF856, 0x49D9);
++ r8168_mac_ocp_write(tp, 0xF858, 0xF01F);
++ r8168_mac_ocp_write(tp, 0xF85A, 0xC526);
++ r8168_mac_ocp_write(tp, 0xF85C, 0x64A5);
++ r8168_mac_ocp_write(tp, 0xF85E, 0x1400);
++ r8168_mac_ocp_write(tp, 0xF860, 0xF007);
++ r8168_mac_ocp_write(tp, 0xF862, 0x0C01);
++ r8168_mac_ocp_write(tp, 0xF864, 0x8CA5);
++ r8168_mac_ocp_write(tp, 0xF866, 0x1C15);
++ r8168_mac_ocp_write(tp, 0xF868, 0xC51B);
++ r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0);
++ r8168_mac_ocp_write(tp, 0xF86C, 0xE013);
++ r8168_mac_ocp_write(tp, 0xF86E, 0xC519);
++ r8168_mac_ocp_write(tp, 0xF870, 0x74A0);
++ r8168_mac_ocp_write(tp, 0xF872, 0x48C4);
++ r8168_mac_ocp_write(tp, 0xF874, 0x8CA0);
++ r8168_mac_ocp_write(tp, 0xF876, 0xC516);
++ r8168_mac_ocp_write(tp, 0xF878, 0x74A4);
++ r8168_mac_ocp_write(tp, 0xF87A, 0x48C8);
++ r8168_mac_ocp_write(tp, 0xF87C, 0x48CA);
++ r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4);
++ r8168_mac_ocp_write(tp, 0xF880, 0xC512);
++ r8168_mac_ocp_write(tp, 0xF882, 0x1B00);
++ r8168_mac_ocp_write(tp, 0xF884, 0x9BA0);
++ r8168_mac_ocp_write(tp, 0xF886, 0x1B1C);
++ r8168_mac_ocp_write(tp, 0xF888, 0x483F);
++ r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2);
++ r8168_mac_ocp_write(tp, 0xF88C, 0x1B04);
++ r8168_mac_ocp_write(tp, 0xF88E, 0xC508);
++ r8168_mac_ocp_write(tp, 0xF890, 0x9BA0);
++ r8168_mac_ocp_write(tp, 0xF892, 0xC505);
++ r8168_mac_ocp_write(tp, 0xF894, 0xBD00);
++ r8168_mac_ocp_write(tp, 0xF896, 0xC502);
++ r8168_mac_ocp_write(tp, 0xF898, 0xBD00);
++ r8168_mac_ocp_write(tp, 0xF89A, 0x0300);
++ r8168_mac_ocp_write(tp, 0xF89C, 0x051E);
++ r8168_mac_ocp_write(tp, 0xF89E, 0xE434);
++ r8168_mac_ocp_write(tp, 0xF8A0, 0xE018);
++ r8168_mac_ocp_write(tp, 0xF8A2, 0xE092);
++ r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20);
++ r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0);
++ r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F);
++ r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4);
++ r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3);
++ r8168_mac_ocp_write(tp, 0xF8AE, 0xF007);
++ r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0);
++ r8168_mac_ocp_write(tp, 0xF8B2, 0xF103);
++ r8168_mac_ocp_write(tp, 0xF8B4, 0xC607);
++ r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00);
++ r8168_mac_ocp_write(tp, 0xF8B8, 0xC606);
++ r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00);
++ r8168_mac_ocp_write(tp, 0xF8BC, 0xC602);
++ r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00);
++ r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C);
++ r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28);
++ r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C);
++ r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00);
++ r8168_mac_ocp_write(tp, 0xF8C8, 0xC707);
++ r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00);
++ r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2);
++ r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1);
++ r8168_mac_ocp_write(tp, 0xF8D0, 0xC502);
++ r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00);
++ r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA);
++ r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0);
++ r8168_mac_ocp_write(tp, 0xF8D8, 0xC502);
++ r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00);
++ r8168_mac_ocp_write(tp, 0xF8DC, 0x0132);
++
++ r8168_mac_ocp_write(tp, 0xFC26, 0x8000);
++
++ r8168_mac_ocp_write(tp, 0xFC2A, 0x0743);
++ r8168_mac_ocp_write(tp, 0xFC2C, 0x0801);
++ r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9);
++ r8168_mac_ocp_write(tp, 0xFC30, 0x02FD);
++ r8168_mac_ocp_write(tp, 0xFC32, 0x0C25);
++ r8168_mac_ocp_write(tp, 0xFC34, 0x00A9);
++ r8168_mac_ocp_write(tp, 0xFC36, 0x012D);
++
+ rtl_hw_aspm_clkreq_enable(tp, true);
+ }
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index dbee9b0113e3..932e54e25b71 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -3048,17 +3048,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ /* Manage oversized TCP frames for GMAC4 device */
+ if (skb_is_gso(skb) && priv->tso) {
+- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+- /*
+- * There is no way to determine the number of TSO
+- * capable Queues. Let's use always the Queue 0
+- * because if TSO is supported then at least this
+- * one will be capable.
+- */
+- skb_set_queue_mapping(skb, 0);
+-
++ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+ return stmmac_tso_xmit(skb, dev);
+- }
+ }
+
+ if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
+@@ -3875,6 +3866,22 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ }
+ }
+
++static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
++ struct net_device *sb_dev)
++{
++ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
++ /*
++ * There is no way to determine the number of TSO
++ * capable Queues. Let's use always the Queue 0
++ * because if TSO is supported then at least this
++ * one will be capable.
++ */
++ return 0;
++ }
++
++ return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
++}
++
+ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
+ {
+ struct stmmac_priv *priv = netdev_priv(ndev);
+@@ -4091,6 +4098,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
+ .ndo_tx_timeout = stmmac_tx_timeout,
+ .ndo_do_ioctl = stmmac_ioctl,
+ .ndo_setup_tc = stmmac_setup_tc,
++ .ndo_select_queue = stmmac_select_queue,
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = stmmac_poll_controller,
+ #endif
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index afdcc5664ea6..3544e1991579 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -836,7 +836,6 @@ int netvsc_recv_callback(struct net_device *net,
+
+ if (unlikely(!skb)) {
+ ++net_device_ctx->eth_stats.rx_no_memory;
+- rcu_read_unlock();
+ return NVSP_STAT_FAIL;
+ }
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 75aebf65cd09..8f46aa1ddec0 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -865,6 +865,7 @@ static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
+
+ static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
+ {
++ skb->ip_summed = CHECKSUM_NONE;
+ memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
+ skb_pull(skb, hdr_len);
+ pskb_trim_unique(skb, skb->len - icv_len);
+@@ -1099,10 +1100,9 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ }
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+- if (!skb) {
+- *pskb = NULL;
++ *pskb = skb;
++ if (!skb)
+ return RX_HANDLER_CONSUMED;
+- }
+
+ pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
+ if (!pulled_sci) {
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index b6efd2d41dce..be0271a51b0a 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -515,7 +515,7 @@ static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value)
+
+ static void sfp_hwmon_to_rx_power(long *value)
+ {
+- *value = DIV_ROUND_CLOSEST(*value, 100);
++ *value = DIV_ROUND_CLOSEST(*value, 10);
+ }
+
+ static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset,
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 311b0cc6eb98..97fb0cb1b97a 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -165,23 +165,29 @@ static int vrf_ip6_local_out(struct net *net, struct sock *sk,
+ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
+ struct net_device *dev)
+ {
+- const struct ipv6hdr *iph = ipv6_hdr(skb);
++ const struct ipv6hdr *iph;
+ struct net *net = dev_net(skb->dev);
+- struct flowi6 fl6 = {
+- /* needed to match OIF rule */
+- .flowi6_oif = dev->ifindex,
+- .flowi6_iif = LOOPBACK_IFINDEX,
+- .daddr = iph->daddr,
+- .saddr = iph->saddr,
+- .flowlabel = ip6_flowinfo(iph),
+- .flowi6_mark = skb->mark,
+- .flowi6_proto = iph->nexthdr,
+- .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
+- };
++ struct flowi6 fl6;
+ int ret = NET_XMIT_DROP;
+ struct dst_entry *dst;
+ struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
+
++ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
++ goto err;
++
++ iph = ipv6_hdr(skb);
++
++ memset(&fl6, 0, sizeof(fl6));
++ /* needed to match OIF rule */
++ fl6.flowi6_oif = dev->ifindex;
++ fl6.flowi6_iif = LOOPBACK_IFINDEX;
++ fl6.daddr = iph->daddr;
++ fl6.saddr = iph->saddr;
++ fl6.flowlabel = ip6_flowinfo(iph);
++ fl6.flowi6_mark = skb->mark;
++ fl6.flowi6_proto = iph->nexthdr;
++ fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
++
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (dst == dst_null)
+ goto err;
+@@ -237,21 +243,27 @@ static int vrf_ip_local_out(struct net *net, struct sock *sk,
+ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
+ struct net_device *vrf_dev)
+ {
+- struct iphdr *ip4h = ip_hdr(skb);
++ struct iphdr *ip4h;
+ int ret = NET_XMIT_DROP;
+- struct flowi4 fl4 = {
+- /* needed to match OIF rule */
+- .flowi4_oif = vrf_dev->ifindex,
+- .flowi4_iif = LOOPBACK_IFINDEX,
+- .flowi4_tos = RT_TOS(ip4h->tos),
+- .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
+- .flowi4_proto = ip4h->protocol,
+- .daddr = ip4h->daddr,
+- .saddr = ip4h->saddr,
+- };
++ struct flowi4 fl4;
+ struct net *net = dev_net(vrf_dev);
+ struct rtable *rt;
+
++ if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
++ goto err;
++
++ ip4h = ip_hdr(skb);
++
++ memset(&fl4, 0, sizeof(fl4));
++ /* needed to match OIF rule */
++ fl4.flowi4_oif = vrf_dev->ifindex;
++ fl4.flowi4_iif = LOOPBACK_IFINDEX;
++ fl4.flowi4_tos = RT_TOS(ip4h->tos);
++ fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
++ fl4.flowi4_proto = ip4h->protocol;
++ fl4.daddr = ip4h->daddr;
++ fl4.saddr = ip4h->saddr;
++
+ rt = ip_route_output_flow(net, &fl4, NULL);
+ if (IS_ERR(rt))
+ goto err;
+diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
+index f9d1df0509c6..e73bf0193a8f 100644
+--- a/drivers/scsi/sd_zbc.c
++++ b/drivers/scsi/sd_zbc.c
+@@ -9,6 +9,8 @@
+ */
+
+ #include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <linux/sched/mm.h>
+
+ #include <asm/unaligned.h>
+
+@@ -50,7 +52,7 @@ static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
+ /**
+ * sd_zbc_do_report_zones - Issue a REPORT ZONES scsi command.
+ * @sdkp: The target disk
+- * @buf: Buffer to use for the reply
++ * @buf: vmalloc-ed buffer to use for the reply
+ * @buflen: the buffer size
+ * @lba: Start LBA of the report
+ * @partial: Do partial report
+@@ -79,7 +81,6 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
+ put_unaligned_be32(buflen, &cmd[10]);
+ if (partial)
+ cmd[14] = ZBC_REPORT_ZONE_PARTIAL;
+- memset(buf, 0, buflen);
+
+ result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
+ buf, buflen, &sshdr,
+@@ -103,6 +104,53 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
+ return 0;
+ }
+
++/*
++ * Maximum number of zones to get with one report zones command.
++ */
++#define SD_ZBC_REPORT_MAX_ZONES 8192U
++
++/**
++ * Allocate a buffer for report zones reply.
++ * @sdkp: The target disk
++ * @nr_zones: Maximum number of zones to report
++ * @buflen: Size of the buffer allocated
++ *
++ * Try to allocate a reply buffer for the number of requested zones.
++ * The size of the buffer allocated may be smaller than requested to
++ * satify the device constraint (max_hw_sectors, max_segments, etc).
++ *
++ * Return the address of the allocated buffer and update @buflen with
++ * the size of the allocated buffer.
++ */
++static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
++ unsigned int nr_zones, size_t *buflen)
++{
++ struct request_queue *q = sdkp->disk->queue;
++ size_t bufsize;
++ void *buf;
++
++ /*
++ * Report zone buffer size should be at most 64B times the number of
++ * zones requested plus the 64B reply header, but should be at least
++ * SECTOR_SIZE for ATA devices.
++ * Make sure that this size does not exceed the hardware capabilities.
++ * Furthermore, since the report zone command cannot be split, make
++ * sure that the allocated buffer can always be mapped by limiting the
++ * number of pages allocated to the HBA max segments limit.
++ */
++ nr_zones = min(nr_zones, SD_ZBC_REPORT_MAX_ZONES);
++ bufsize = roundup((nr_zones + 1) * 64, 512);
++ bufsize = min_t(size_t, bufsize,
++ queue_max_hw_sectors(q) << SECTOR_SHIFT);
++ bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT);
++
++ buf = vzalloc(bufsize);
++ if (buf)
++ *buflen = bufsize;
++
++ return buf;
++}
++
+ /**
+ * sd_zbc_report_zones - Disk report zones operation.
+ * @disk: The target disk
+@@ -118,30 +166,23 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
+ gfp_t gfp_mask)
+ {
+ struct scsi_disk *sdkp = scsi_disk(disk);
+- unsigned int i, buflen, nrz = *nr_zones;
++ unsigned int i, nrz = *nr_zones;
+ unsigned char *buf;
+- size_t offset = 0;
++ size_t buflen = 0, offset = 0;
+ int ret = 0;
+
+ if (!sd_is_zoned(sdkp))
+ /* Not a zoned device */
+ return -EOPNOTSUPP;
+
+- /*
+- * Get a reply buffer for the number of requested zones plus a header,
+- * without exceeding the device maximum command size. For ATA disks,
+- * buffers must be aligned to 512B.
+- */
+- buflen = min(queue_max_hw_sectors(disk->queue) << 9,
+- roundup((nrz + 1) * 64, 512));
+- buf = kmalloc(buflen, gfp_mask);
++ buf = sd_zbc_alloc_report_buffer(sdkp, nrz, &buflen);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
+ sectors_to_logical(sdkp->device, sector), true);
+ if (ret)
+- goto out_free_buf;
++ goto out;
+
+ nrz = min(nrz, get_unaligned_be32(&buf[0]) / 64);
+ for (i = 0; i < nrz; i++) {
+@@ -152,8 +193,8 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
+
+ *nr_zones = nrz;
+
+-out_free_buf:
+- kfree(buf);
++out:
++ kvfree(buf);
+
+ return ret;
+ }
+@@ -287,8 +328,6 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
+ return 0;
+ }
+
+-#define SD_ZBC_BUF_SIZE 131072U
+-
+ /**
+ * sd_zbc_check_zones - Check the device capacity and zone sizes
+ * @sdkp: Target disk
+@@ -304,22 +343,28 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
+ */
+ static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
+ {
++ size_t bufsize, buflen;
++ unsigned int noio_flag;
+ u64 zone_blocks = 0;
+ sector_t max_lba, block = 0;
+ unsigned char *buf;
+ unsigned char *rec;
+- unsigned int buf_len;
+- unsigned int list_length;
+ int ret;
+ u8 same;
+
++ /* Do all memory allocations as if GFP_NOIO was specified */
++ noio_flag = memalloc_noio_save();
++
+ /* Get a buffer */
+- buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
+- if (!buf)
+- return -ENOMEM;
++ buf = sd_zbc_alloc_report_buffer(sdkp, SD_ZBC_REPORT_MAX_ZONES,
++ &bufsize);
++ if (!buf) {
++ ret = -ENOMEM;
++ goto out;
++ }
+
+ /* Do a report zone to get max_lba and the same field */
+- ret = sd_zbc_do_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0, false);
++ ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, 0, false);
+ if (ret)
+ goto out_free;
+
+@@ -355,12 +400,12 @@ static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
+ do {
+
+ /* Parse REPORT ZONES header */
+- list_length = get_unaligned_be32(&buf[0]) + 64;
++ buflen = min_t(size_t, get_unaligned_be32(&buf[0]) + 64,
++ bufsize);
+ rec = buf + 64;
+- buf_len = min(list_length, SD_ZBC_BUF_SIZE);
+
+ /* Parse zone descriptors */
+- while (rec < buf + buf_len) {
++ while (rec < buf + buflen) {
+ u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
+
+ if (zone_blocks == 0) {
+@@ -376,8 +421,8 @@ static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
+ }
+
+ if (block < sdkp->capacity) {
+- ret = sd_zbc_do_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
+- block, true);
++ ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, block,
++ true);
+ if (ret)
+ goto out_free;
+ }
+@@ -408,7 +453,8 @@ out:
+ }
+
+ out_free:
+- kfree(buf);
++ memalloc_noio_restore(noio_flag);
++ kvfree(buf);
+
+ return ret;
+ }
+diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
+index c7843b149a1e..92042f073d58 100644
+--- a/fs/ext4/dir.c
++++ b/fs/ext4/dir.c
+@@ -109,7 +109,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
+ struct inode *inode = file_inode(file);
+ struct super_block *sb = inode->i_sb;
+ struct buffer_head *bh = NULL;
+- int dir_has_error = 0;
+ struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
+
+ if (IS_ENCRYPTED(inode)) {
+@@ -145,8 +144,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
+ return err;
+ }
+
+- offset = ctx->pos & (sb->s_blocksize - 1);
+-
+ while (ctx->pos < inode->i_size) {
+ struct ext4_map_blocks map;
+
+@@ -155,9 +152,18 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
+ goto errout;
+ }
+ cond_resched();
++ offset = ctx->pos & (sb->s_blocksize - 1);
+ map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
+ map.m_len = 1;
+ err = ext4_map_blocks(NULL, inode, &map, 0);
++ if (err == 0) {
++ /* m_len should never be zero but let's avoid
++ * an infinite loop if it somehow is */
++ if (map.m_len == 0)
++ map.m_len = 1;
++ ctx->pos += map.m_len * sb->s_blocksize;
++ continue;
++ }
+ if (err > 0) {
+ pgoff_t index = map.m_pblk >>
+ (PAGE_SHIFT - inode->i_blkbits);
+@@ -176,13 +182,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
+ }
+
+ if (!bh) {
+- if (!dir_has_error) {
+- EXT4_ERROR_FILE(file, 0,
+- "directory contains a "
+- "hole at offset %llu",
+- (unsigned long long) ctx->pos);
+- dir_has_error = 1;
+- }
+ /* corrupt size? Maybe no more blocks to read */
+ if (ctx->pos > inode->i_blocks << 9)
+ break;
+diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
+index 75a5309f2231..ef8fcf7d0d3b 100644
+--- a/fs/ext4/ext4_jbd2.h
++++ b/fs/ext4/ext4_jbd2.h
+@@ -361,20 +361,20 @@ static inline int ext4_journal_force_commit(journal_t *journal)
+ }
+
+ static inline int ext4_jbd2_inode_add_write(handle_t *handle,
+- struct inode *inode)
++ struct inode *inode, loff_t start_byte, loff_t length)
+ {
+ if (ext4_handle_valid(handle))
+- return jbd2_journal_inode_add_write(handle,
+- EXT4_I(inode)->jinode);
++ return jbd2_journal_inode_ranged_write(handle,
++ EXT4_I(inode)->jinode, start_byte, length);
+ return 0;
+ }
+
+ static inline int ext4_jbd2_inode_add_wait(handle_t *handle,
+- struct inode *inode)
++ struct inode *inode, loff_t start_byte, loff_t length)
+ {
+ if (ext4_handle_valid(handle))
+- return jbd2_journal_inode_add_wait(handle,
+- EXT4_I(inode)->jinode);
++ return jbd2_journal_inode_ranged_wait(handle,
++ EXT4_I(inode)->jinode, start_byte, length);
+ return 0;
+ }
+
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 2c5baa5e8291..f4a24a46245e 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -165,6 +165,10 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ return ret;
++
++ if (unlikely(IS_IMMUTABLE(inode)))
++ return -EPERM;
++
+ /*
+ * If we have encountered a bitmap-format file, the size limit
+ * is smaller than s_maxbytes, which is for extent-mapped files.
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index c7f77c643008..85c648289b57 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -731,10 +731,16 @@ out_sem:
+ !(flags & EXT4_GET_BLOCKS_ZERO) &&
+ !ext4_is_quota_file(inode) &&
+ ext4_should_order_data(inode)) {
++ loff_t start_byte =
++ (loff_t)map->m_lblk << inode->i_blkbits;
++ loff_t length = (loff_t)map->m_len << inode->i_blkbits;
++
+ if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
+- ret = ext4_jbd2_inode_add_wait(handle, inode);
++ ret = ext4_jbd2_inode_add_wait(handle, inode,
++ start_byte, length);
+ else
+- ret = ext4_jbd2_inode_add_write(handle, inode);
++ ret = ext4_jbd2_inode_add_write(handle, inode,
++ start_byte, length);
+ if (ret)
+ return ret;
+ }
+@@ -4085,7 +4091,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
+ err = 0;
+ mark_buffer_dirty(bh);
+ if (ext4_should_order_data(inode))
+- err = ext4_jbd2_inode_add_write(handle, inode);
++ err = ext4_jbd2_inode_add_write(handle, inode, from,
++ length);
+ }
+
+ unlock:
+@@ -5520,6 +5527,14 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ return -EIO;
+
++ if (unlikely(IS_IMMUTABLE(inode)))
++ return -EPERM;
++
++ if (unlikely(IS_APPEND(inode) &&
++ (ia_valid & (ATTR_MODE | ATTR_UID |
++ ATTR_GID | ATTR_TIMES_SET))))
++ return -EPERM;
++
+ error = setattr_prepare(dentry, attr);
+ if (error)
+ return error;
+@@ -6190,6 +6205,9 @@ vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
+ get_block_t *get_block;
+ int retries = 0;
+
++ if (unlikely(IS_IMMUTABLE(inode)))
++ return VM_FAULT_SIGBUS;
++
+ sb_start_pagefault(inode->i_sb);
+ file_update_time(vma->vm_file);
+
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index e486e49b31ed..7af835ac8d23 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -269,6 +269,29 @@ static int uuid_is_zero(__u8 u[16])
+ }
+ #endif
+
++/*
++ * If immutable is set and we are not clearing it, we're not allowed to change
++ * anything else in the inode. Don't error out if we're only trying to set
++ * immutable on an immutable file.
++ */
++static int ext4_ioctl_check_immutable(struct inode *inode, __u32 new_projid,
++ unsigned int flags)
++{
++ struct ext4_inode_info *ei = EXT4_I(inode);
++ unsigned int oldflags = ei->i_flags;
++
++ if (!(oldflags & EXT4_IMMUTABLE_FL) || !(flags & EXT4_IMMUTABLE_FL))
++ return 0;
++
++ if ((oldflags & ~EXT4_IMMUTABLE_FL) != (flags & ~EXT4_IMMUTABLE_FL))
++ return -EPERM;
++ if (ext4_has_feature_project(inode->i_sb) &&
++ __kprojid_val(ei->i_projid) != new_projid)
++ return -EPERM;
++
++ return 0;
++}
++
+ static int ext4_ioctl_setflags(struct inode *inode,
+ unsigned int flags)
+ {
+@@ -340,6 +363,20 @@ static int ext4_ioctl_setflags(struct inode *inode,
+ }
+ }
+
++ /*
++ * Wait for all pending directio and then flush all the dirty pages
++ * for this file. The flush marks all the pages readonly, so any
++ * subsequent attempt to write to the file (particularly mmap pages)
++ * will come through the filesystem and fail.
++ */
++ if (S_ISREG(inode->i_mode) && !IS_IMMUTABLE(inode) &&
++ (flags & EXT4_IMMUTABLE_FL)) {
++ inode_dio_wait(inode);
++ err = filemap_write_and_wait(inode->i_mapping);
++ if (err)
++ goto flags_out;
++ }
++
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+@@ -769,7 +806,11 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ return err;
+
+ inode_lock(inode);
+- err = ext4_ioctl_setflags(inode, flags);
++ err = ext4_ioctl_check_immutable(inode,
++ from_kprojid(&init_user_ns, ei->i_projid),
++ flags);
++ if (!err)
++ err = ext4_ioctl_setflags(inode, flags);
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return err;
+@@ -1139,6 +1180,9 @@ resizefs_out:
+ goto out;
+ flags = (ei->i_flags & ~EXT4_FL_XFLAG_VISIBLE) |
+ (flags & EXT4_FL_XFLAG_VISIBLE);
++ err = ext4_ioctl_check_immutable(inode, fa.fsx_projid, flags);
++ if (err)
++ goto out;
+ err = ext4_ioctl_setflags(inode, flags);
+ if (err)
+ goto out;
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index 1083a9f3f16a..c7ded4e2adff 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -390,7 +390,8 @@ data_copy:
+
+ /* Even in case of data=writeback it is reasonable to pin
+ * inode to transaction, to prevent unexpected data loss */
+- *err = ext4_jbd2_inode_add_write(handle, orig_inode);
++ *err = ext4_jbd2_inode_add_write(handle, orig_inode,
++ (loff_t)orig_page_offset << PAGE_SHIFT, replaced_size);
+
+ unlock_pages:
+ unlock_page(pagep[0]);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index cd01c4a67ffb..771fe02f317d 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -82,8 +82,18 @@ static struct buffer_head *ext4_append(handle_t *handle,
+ static int ext4_dx_csum_verify(struct inode *inode,
+ struct ext4_dir_entry *dirent);
+
++/*
++ * Hints to ext4_read_dirblock regarding whether we expect a directory
++ * block being read to be an index block, or a block containing
++ * directory entries (and if the latter, whether it was found via a
++ * logical block in an htree index block). This is used to control
++ * what sort of sanity checkinig ext4_read_dirblock() will do on the
++ * directory block read from the storage device. EITHER will means
++ * the caller doesn't know what kind of directory block will be read,
++ * so no specific verification will be done.
++ */
+ typedef enum {
+- EITHER, INDEX, DIRENT
++ EITHER, INDEX, DIRENT, DIRENT_HTREE
+ } dirblock_type_t;
+
+ #define ext4_read_dirblock(inode, block, type) \
+@@ -109,11 +119,14 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
+
+ return bh;
+ }
+- if (!bh) {
++ if (!bh && (type == INDEX || type == DIRENT_HTREE)) {
+ ext4_error_inode(inode, func, line, block,
+- "Directory hole found");
++ "Directory hole found for htree %s block",
++ (type == INDEX) ? "index" : "leaf");
+ return ERR_PTR(-EFSCORRUPTED);
+ }
++ if (!bh)
++ return NULL;
+ dirent = (struct ext4_dir_entry *) bh->b_data;
+ /* Determine whether or not we have an index block */
+ if (is_dx(inode)) {
+@@ -980,7 +993,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
+
+ dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
+ (unsigned long)block));
+- bh = ext4_read_dirblock(dir, block, DIRENT);
++ bh = ext4_read_dirblock(dir, block, DIRENT_HTREE);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
+
+@@ -1586,7 +1599,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
+ return (struct buffer_head *) frame;
+ do {
+ block = dx_get_block(frame->at);
+- bh = ext4_read_dirblock(dir, block, DIRENT);
++ bh = ext4_read_dirblock(dir, block, DIRENT_HTREE);
+ if (IS_ERR(bh))
+ goto errout;
+
+@@ -2170,6 +2183,11 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ blocks = dir->i_size >> sb->s_blocksize_bits;
+ for (block = 0; block < blocks; block++) {
+ bh = ext4_read_dirblock(dir, block, DIRENT);
++ if (bh == NULL) {
++ bh = ext4_bread(handle, dir, block,
++ EXT4_GET_BLOCKS_CREATE);
++ goto add_to_new_block;
++ }
+ if (IS_ERR(bh)) {
+ retval = PTR_ERR(bh);
+ bh = NULL;
+@@ -2190,6 +2208,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
+ brelse(bh);
+ }
+ bh = ext4_append(handle, dir, &block);
++add_to_new_block:
+ if (IS_ERR(bh)) {
+ retval = PTR_ERR(bh);
+ bh = NULL;
+@@ -2234,7 +2253,7 @@ again:
+ return PTR_ERR(frame);
+ entries = frame->entries;
+ at = frame->at;
+- bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT);
++ bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT_HTREE);
+ if (IS_ERR(bh)) {
+ err = PTR_ERR(bh);
+ bh = NULL;
+@@ -2782,7 +2801,10 @@ bool ext4_empty_dir(struct inode *inode)
+ EXT4_ERROR_INODE(inode, "invalid size");
+ return true;
+ }
+- bh = ext4_read_dirblock(inode, 0, EITHER);
++ /* The first directory block must not be a hole,
++ * so treat it as DIRENT_HTREE
++ */
++ bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
+ if (IS_ERR(bh))
+ return true;
+
+@@ -2804,6 +2826,10 @@ bool ext4_empty_dir(struct inode *inode)
+ brelse(bh);
+ lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
+ bh = ext4_read_dirblock(inode, lblock, EITHER);
++ if (bh == NULL) {
++ offset += sb->s_blocksize;
++ continue;
++ }
+ if (IS_ERR(bh))
+ return true;
+ de = (struct ext4_dir_entry_2 *) bh->b_data;
+@@ -3369,7 +3395,10 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
+ struct buffer_head *bh;
+
+ if (!ext4_has_inline_data(inode)) {
+- bh = ext4_read_dirblock(inode, 0, EITHER);
++ /* The first directory block must not be a hole, so
++ * treat it as DIRENT_HTREE
++ */
++ bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
+ if (IS_ERR(bh)) {
+ *retval = PTR_ERR(bh);
+ return NULL;
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index efd0ce9489ae..668f9021cf11 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -187,14 +187,15 @@ static int journal_wait_on_commit_record(journal_t *journal,
+ * use writepages() because with dealyed allocation we may be doing
+ * block allocation in writepages().
+ */
+-static int journal_submit_inode_data_buffers(struct address_space *mapping)
++static int journal_submit_inode_data_buffers(struct address_space *mapping,
++ loff_t dirty_start, loff_t dirty_end)
+ {
+ int ret;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = mapping->nrpages * 2,
+- .range_start = 0,
+- .range_end = i_size_read(mapping->host),
++ .range_start = dirty_start,
++ .range_end = dirty_end,
+ };
+
+ ret = generic_writepages(mapping, &wbc);
+@@ -218,6 +219,9 @@ static int journal_submit_data_buffers(journal_t *journal,
+
+ spin_lock(&journal->j_list_lock);
+ list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
++ loff_t dirty_start = jinode->i_dirty_start;
++ loff_t dirty_end = jinode->i_dirty_end;
++
+ if (!(jinode->i_flags & JI_WRITE_DATA))
+ continue;
+ mapping = jinode->i_vfs_inode->i_mapping;
+@@ -230,7 +234,8 @@ static int journal_submit_data_buffers(journal_t *journal,
+ * only allocated blocks here.
+ */
+ trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
+- err = journal_submit_inode_data_buffers(mapping);
++ err = journal_submit_inode_data_buffers(mapping, dirty_start,
++ dirty_end);
+ if (!ret)
+ ret = err;
+ spin_lock(&journal->j_list_lock);
+@@ -257,12 +262,16 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
+ /* For locking, see the comment in journal_submit_data_buffers() */
+ spin_lock(&journal->j_list_lock);
+ list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
++ loff_t dirty_start = jinode->i_dirty_start;
++ loff_t dirty_end = jinode->i_dirty_end;
++
+ if (!(jinode->i_flags & JI_WAIT_DATA))
+ continue;
+ jinode->i_flags |= JI_COMMIT_RUNNING;
+ spin_unlock(&journal->j_list_lock);
+- err = filemap_fdatawait_keep_errors(
+- jinode->i_vfs_inode->i_mapping);
++ err = filemap_fdatawait_range_keep_errors(
++ jinode->i_vfs_inode->i_mapping, dirty_start,
++ dirty_end);
+ if (!ret)
+ ret = err;
+ spin_lock(&journal->j_list_lock);
+@@ -282,6 +291,8 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
+ &jinode->i_transaction->t_inode_list);
+ } else {
+ jinode->i_transaction = NULL;
++ jinode->i_dirty_start = 0;
++ jinode->i_dirty_end = 0;
+ }
+ }
+ spin_unlock(&journal->j_list_lock);
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 43df0c943229..e0382067c824 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -94,6 +94,8 @@ EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
+ EXPORT_SYMBOL(jbd2_journal_force_commit);
+ EXPORT_SYMBOL(jbd2_journal_inode_add_write);
+ EXPORT_SYMBOL(jbd2_journal_inode_add_wait);
++EXPORT_SYMBOL(jbd2_journal_inode_ranged_write);
++EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait);
+ EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
+ EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
+ EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
+@@ -2574,6 +2576,8 @@ void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode)
+ jinode->i_next_transaction = NULL;
+ jinode->i_vfs_inode = inode;
+ jinode->i_flags = 0;
++ jinode->i_dirty_start = 0;
++ jinode->i_dirty_end = 0;
+ INIT_LIST_HEAD(&jinode->i_list);
+ }
+
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index 8ca4fddc705f..990e7b5062e7 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -2565,7 +2565,7 @@ void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
+ * File inode in the inode list of the handle's transaction
+ */
+ static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
+- unsigned long flags)
++ unsigned long flags, loff_t start_byte, loff_t end_byte)
+ {
+ transaction_t *transaction = handle->h_transaction;
+ journal_t *journal;
+@@ -2577,26 +2577,17 @@ static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
+ jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
+ transaction->t_tid);
+
+- /*
+- * First check whether inode isn't already on the transaction's
+- * lists without taking the lock. Note that this check is safe
+- * without the lock as we cannot race with somebody removing inode
+- * from the transaction. The reason is that we remove inode from the
+- * transaction only in journal_release_jbd_inode() and when we commit
+- * the transaction. We are guarded from the first case by holding
+- * a reference to the inode. We are safe against the second case
+- * because if jinode->i_transaction == transaction, commit code
+- * cannot touch the transaction because we hold reference to it,
+- * and if jinode->i_next_transaction == transaction, commit code
+- * will only file the inode where we want it.
+- */
+- if ((jinode->i_transaction == transaction ||
+- jinode->i_next_transaction == transaction) &&
+- (jinode->i_flags & flags) == flags)
+- return 0;
+-
+ spin_lock(&journal->j_list_lock);
+ jinode->i_flags |= flags;
++
++ if (jinode->i_dirty_end) {
++ jinode->i_dirty_start = min(jinode->i_dirty_start, start_byte);
++ jinode->i_dirty_end = max(jinode->i_dirty_end, end_byte);
++ } else {
++ jinode->i_dirty_start = start_byte;
++ jinode->i_dirty_end = end_byte;
++ }
++
+ /* Is inode already attached where we need it? */
+ if (jinode->i_transaction == transaction ||
+ jinode->i_next_transaction == transaction)
+@@ -2631,12 +2622,28 @@ done:
+ int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *jinode)
+ {
+ return jbd2_journal_file_inode(handle, jinode,
+- JI_WRITE_DATA | JI_WAIT_DATA);
++ JI_WRITE_DATA | JI_WAIT_DATA, 0, LLONG_MAX);
+ }
+
+ int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *jinode)
+ {
+- return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA);
++ return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA, 0,
++ LLONG_MAX);
++}
++
++int jbd2_journal_inode_ranged_write(handle_t *handle,
++ struct jbd2_inode *jinode, loff_t start_byte, loff_t length)
++{
++ return jbd2_journal_file_inode(handle, jinode,
++ JI_WRITE_DATA | JI_WAIT_DATA, start_byte,
++ start_byte + length - 1);
++}
++
++int jbd2_journal_inode_ranged_wait(handle_t *handle, struct jbd2_inode *jinode,
++ loff_t start_byte, loff_t length)
++{
++ return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA,
++ start_byte, start_byte + length - 1);
+ }
+
+ /*
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 56e18d7fbc5a..93baef66b942 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -344,6 +344,11 @@ struct queue_limits {
+
+ #ifdef CONFIG_BLK_DEV_ZONED
+
++/*
++ * Maximum number of zones to report with a single report zones command.
++ */
++#define BLK_ZONED_REPORT_MAX_ZONES 8192U
++
+ extern unsigned int blkdev_nr_zones(struct block_device *bdev);
+ extern int blkdev_report_zones(struct block_device *bdev,
+ sector_t sector, struct blk_zone *zones,
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index f7fdfe93e25d..79fec8a8413f 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2712,6 +2712,8 @@ extern int filemap_flush(struct address_space *);
+ extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
+ extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
+ loff_t lend);
++extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
++ loff_t start_byte, loff_t end_byte);
+
+ static inline int filemap_fdatawait(struct address_space *mapping)
+ {
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index 5c04181b7c6d..0e0393e7f41a 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -451,6 +451,22 @@ struct jbd2_inode {
+ * @i_flags: Flags of inode [j_list_lock]
+ */
+ unsigned long i_flags;
++
++ /**
++ * @i_dirty_start:
++ *
++ * Offset in bytes where the dirty range for this inode starts.
++ * [j_list_lock]
++ */
++ loff_t i_dirty_start;
++
++ /**
++ * @i_dirty_end:
++ *
++ * Inclusive offset in bytes where the dirty range for this inode
++ * ends. [j_list_lock]
++ */
++ loff_t i_dirty_end;
+ };
+
+ struct jbd2_revoke_table_s;
+@@ -1397,6 +1413,12 @@ extern int jbd2_journal_force_commit(journal_t *);
+ extern int jbd2_journal_force_commit_nested(journal_t *);
+ extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode);
+ extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode);
++extern int jbd2_journal_inode_ranged_write(handle_t *handle,
++ struct jbd2_inode *inode, loff_t start_byte,
++ loff_t length);
++extern int jbd2_journal_inode_ranged_wait(handle_t *handle,
++ struct jbd2_inode *inode, loff_t start_byte,
++ loff_t length);
+ extern int jbd2_journal_begin_ordered_truncate(journal_t *journal,
+ struct jbd2_inode *inode, loff_t new_size);
+ extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 5e74305e2e57..7e42efa143a0 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -749,7 +749,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
+ u8 swp[0x1];
+ u8 swp_csum[0x1];
+ u8 swp_lso[0x1];
+- u8 reserved_at_23[0xd];
++ u8 cqe_checksum_full[0x1];
++ u8 reserved_at_24[0xc];
+ u8 max_vxlan_udp_ports[0x8];
+ u8 reserved_at_38[0x6];
+ u8 max_geneve_opt_len[0x1];
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 2bca72f3028b..a9d3fbbab4c1 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -1049,6 +1049,11 @@ static inline int in_software_context(struct perf_event *event)
+ return event->ctx->pmu->task_ctx_nr == perf_sw_context;
+ }
+
++static inline int is_exclusive_pmu(struct pmu *pmu)
++{
++ return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
++}
++
+ extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
+
+ extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 12b31c602cb0..f8206d3fed2f 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -302,8 +302,9 @@ static inline bool dst_hold_safe(struct dst_entry *dst)
+ * @skb: buffer
+ *
+ * If dst is not yet refcounted and not destroyed, grab a ref on it.
++ * Returns true if dst is refcounted.
+ */
+-static inline void skb_dst_force(struct sk_buff *skb)
++static inline bool skb_dst_force(struct sk_buff *skb)
+ {
+ if (skb_dst_is_noref(skb)) {
+ struct dst_entry *dst = skb_dst(skb);
+@@ -314,6 +315,8 @@ static inline void skb_dst_force(struct sk_buff *skb)
+
+ skb->_skb_refdst = (unsigned long)dst;
+ }
++
++ return skb->_skb_refdst != 0UL;
+ }
+
+
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 582c0caa9811..2ee06191c488 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1063,7 +1063,8 @@ void tcp_get_default_congestion_control(struct net *net, char *name);
+ void tcp_get_available_congestion_control(char *buf, size_t len);
+ void tcp_get_allowed_congestion_control(char *buf, size_t len);
+ int tcp_set_allowed_congestion_control(char *allowed);
+-int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
++int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
++ bool reinit, bool cap_net_admin);
+ u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
+ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
+
+@@ -1675,6 +1676,11 @@ static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
+ return skb_rb_first(&sk->tcp_rtx_queue);
+ }
+
++static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
++{
++ return skb_rb_last(&sk->tcp_rtx_queue);
++}
++
+ static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
+ {
+ return skb_peek(&sk->sk_write_queue);
+diff --git a/include/net/tls.h b/include/net/tls.h
+index 53d96bca220d..889df0312cd1 100644
+--- a/include/net/tls.h
++++ b/include/net/tls.h
+@@ -313,6 +313,7 @@ struct tls_offload_context_rx {
+ (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
+ TLS_DRIVER_STATE_SIZE)
+
++void tls_ctx_free(struct tls_context *ctx);
+ int wait_on_pending_writer(struct sock *sk, long *timeo);
+ int tls_sk_query(struct sock *sk, int optname, char __user *optval,
+ int __user *optlen);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index f85929ce13be..f851934d55d4 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -2553,6 +2553,9 @@ unlock:
+ return ret;
+ }
+
++static bool exclusive_event_installable(struct perf_event *event,
++ struct perf_event_context *ctx);
++
+ /*
+ * Attach a performance event to a context.
+ *
+@@ -2567,6 +2570,8 @@ perf_install_in_context(struct perf_event_context *ctx,
+
+ lockdep_assert_held(&ctx->mutex);
+
++ WARN_ON_ONCE(!exclusive_event_installable(event, ctx));
++
+ if (event->cpu != -1)
+ event->cpu = cpu;
+
+@@ -4358,7 +4363,7 @@ static int exclusive_event_init(struct perf_event *event)
+ {
+ struct pmu *pmu = event->pmu;
+
+- if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
++ if (!is_exclusive_pmu(pmu))
+ return 0;
+
+ /*
+@@ -4389,7 +4394,7 @@ static void exclusive_event_destroy(struct perf_event *event)
+ {
+ struct pmu *pmu = event->pmu;
+
+- if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
++ if (!is_exclusive_pmu(pmu))
+ return;
+
+ /* see comment in exclusive_event_init() */
+@@ -4409,14 +4414,15 @@ static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
+ return false;
+ }
+
+-/* Called under the same ctx::mutex as perf_install_in_context() */
+ static bool exclusive_event_installable(struct perf_event *event,
+ struct perf_event_context *ctx)
+ {
+ struct perf_event *iter_event;
+ struct pmu *pmu = event->pmu;
+
+- if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
++ lockdep_assert_held(&ctx->mutex);
++
++ if (!is_exclusive_pmu(pmu))
+ return true;
+
+ list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
+@@ -4463,12 +4469,20 @@ static void _free_event(struct perf_event *event)
+ if (event->destroy)
+ event->destroy(event);
+
+- if (event->ctx)
+- put_ctx(event->ctx);
+-
++ /*
++ * Must be after ->destroy(), due to uprobe_perf_close() using
++ * hw.target.
++ */
+ if (event->hw.target)
+ put_task_struct(event->hw.target);
+
++ /*
++ * perf_event_free_task() relies on put_ctx() being 'last', in particular
++ * all task references must be cleaned up.
++ */
++ if (event->ctx)
++ put_ctx(event->ctx);
++
+ exclusive_event_destroy(event);
+ module_put(event->pmu->module);
+
+@@ -4648,8 +4662,17 @@ again:
+ mutex_unlock(&event->child_mutex);
+
+ list_for_each_entry_safe(child, tmp, &free_list, child_list) {
++ void *var = &child->ctx->refcount;
++
+ list_del(&child->child_list);
+ free_event(child);
++
++ /*
++ * Wake any perf_event_free_task() waiting for this event to be
++ * freed.
++ */
++ smp_mb(); /* pairs with wait_var_event() */
++ wake_up_var(var);
+ }
+
+ no_ctx:
+@@ -10922,11 +10945,6 @@ SYSCALL_DEFINE5(perf_event_open,
+ goto err_alloc;
+ }
+
+- if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
+- err = -EBUSY;
+- goto err_context;
+- }
+-
+ /*
+ * Look up the group leader (we will attach this event to it):
+ */
+@@ -11014,6 +11032,18 @@ SYSCALL_DEFINE5(perf_event_open,
+ move_group = 0;
+ }
+ }
++
++ /*
++ * Failure to create exclusive events returns -EBUSY.
++ */
++ err = -EBUSY;
++ if (!exclusive_event_installable(group_leader, ctx))
++ goto err_locked;
++
++ for_each_sibling_event(sibling, group_leader) {
++ if (!exclusive_event_installable(sibling, ctx))
++ goto err_locked;
++ }
+ } else {
+ mutex_lock(&ctx->mutex);
+ }
+@@ -11050,9 +11080,6 @@ SYSCALL_DEFINE5(perf_event_open,
+ * because we need to serialize with concurrent event creation.
+ */
+ if (!exclusive_event_installable(event, ctx)) {
+- /* exclusive and group stuff are assumed mutually exclusive */
+- WARN_ON_ONCE(move_group);
+-
+ err = -EBUSY;
+ goto err_locked;
+ }
+@@ -11519,11 +11546,11 @@ static void perf_free_event(struct perf_event *event,
+ }
+
+ /*
+- * Free an unexposed, unused context as created by inheritance by
+- * perf_event_init_task below, used by fork() in case of fail.
++ * Free a context as created by inheritance by perf_event_init_task() below,
++ * used by fork() in case of fail.
+ *
+- * Not all locks are strictly required, but take them anyway to be nice and
+- * help out with the lockdep assertions.
++ * Even though the task has never lived, the context and events have been
++ * exposed through the child_list, so we must take care tearing it all down.
+ */
+ void perf_event_free_task(struct task_struct *task)
+ {
+@@ -11553,7 +11580,23 @@ void perf_event_free_task(struct task_struct *task)
+ perf_free_event(event, ctx);
+
+ mutex_unlock(&ctx->mutex);
+- put_ctx(ctx);
++
++ /*
++ * perf_event_release_kernel() could've stolen some of our
++ * child events and still have them on its free_list. In that
++ * case we must wait for these events to have been freed (in
++ * particular all their references to this task must've been
++ * dropped).
++ *
++ * Without this copy_process() will unconditionally free this
++ * task (irrespective of its reference count) and
++ * _free_event()'s put_task_struct(event->hw.target) will be a
++ * use-after-free.
++ *
++ * Wait for all events to drop their context reference.
++ */
++ wait_var_event(&ctx->refcount, refcount_read(&ctx->refcount) == 1);
++ put_ctx(ctx); /* must be last */
+ }
+ }
+
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 6dd9a2274c80..861e26ee4c72 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -549,6 +549,28 @@ int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
+ }
+ EXPORT_SYMBOL(filemap_fdatawait_range);
+
++/**
++ * filemap_fdatawait_range_keep_errors - wait for writeback to complete
++ * @mapping: address space structure to wait for
++ * @start_byte: offset in bytes where the range starts
++ * @end_byte: offset in bytes where the range ends (inclusive)
++ *
++ * Walk the list of under-writeback pages of the given address space in the
++ * given range and wait for all of them. Unlike filemap_fdatawait_range(),
++ * this function does not clear error status of the address space.
++ *
++ * Use this function if callers don't handle errors themselves. Expected
++ * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
++ * fsfreeze(8)
++ */
++int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
++ loff_t start_byte, loff_t end_byte)
++{
++ __filemap_fdatawait_range(mapping, start_byte, end_byte);
++ return filemap_check_and_keep_errors(mapping);
++}
++EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
++
+ /**
+ * file_fdatawait_range - wait for writeback to complete
+ * @file: file pointing to address space structure to wait for
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index 21b74e7a7b2f..52c712984cc7 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -74,7 +74,6 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ struct net_bridge_fdb_entry *dst = NULL;
+ struct net_bridge_mdb_entry *mdst;
+ bool local_rcv, mcast_hit = false;
+- const unsigned char *dest;
+ struct net_bridge *br;
+ u16 vid = 0;
+
+@@ -92,10 +91,9 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
+
+ local_rcv = !!(br->dev->flags & IFF_PROMISC);
+- dest = eth_hdr(skb)->h_dest;
+- if (is_multicast_ether_addr(dest)) {
++ if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
+ /* by definition the broadcast is also a multicast address */
+- if (is_broadcast_ether_addr(dest)) {
++ if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
+ pkt_type = BR_PKT_BROADCAST;
+ local_rcv = true;
+ } else {
+@@ -145,7 +143,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ }
+ break;
+ case BR_PKT_UNICAST:
+- dst = br_fdb_find_rcu(br, dest, vid);
++ dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
+ default:
+ break;
+ }
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index de22c8fbbb15..3d8deac2353d 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -911,6 +911,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
+ int type;
+ int err = 0;
+ __be32 group;
++ u16 nsrcs;
+
+ ih = igmpv3_report_hdr(skb);
+ num = ntohs(ih->ngrec);
+@@ -924,8 +925,9 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
+ grec = (void *)(skb->data + len - sizeof(*grec));
+ group = grec->grec_mca;
+ type = grec->grec_type;
++ nsrcs = ntohs(grec->grec_nsrcs);
+
+- len += ntohs(grec->grec_nsrcs) * 4;
++ len += nsrcs * 4;
+ if (!ip_mc_may_pull(skb, len))
+ return -EINVAL;
+
+@@ -946,7 +948,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
+ src = eth_hdr(skb)->h_source;
+ if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
+ type == IGMPV3_MODE_IS_INCLUDE) &&
+- ntohs(grec->grec_nsrcs) == 0) {
++ nsrcs == 0) {
+ br_ip4_multicast_leave_group(br, port, group, vid, src);
+ } else {
+ err = br_ip4_multicast_add_group(br, port, group, vid,
+@@ -983,7 +985,8 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
+ len = skb_transport_offset(skb) + sizeof(*icmp6h);
+
+ for (i = 0; i < num; i++) {
+- __be16 *nsrcs, _nsrcs;
++ __be16 *_nsrcs, __nsrcs;
++ u16 nsrcs;
+
+ nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
+
+@@ -991,12 +994,13 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
+ nsrcs_offset + sizeof(_nsrcs))
+ return -EINVAL;
+
+- nsrcs = skb_header_pointer(skb, nsrcs_offset,
+- sizeof(_nsrcs), &_nsrcs);
+- if (!nsrcs)
++ _nsrcs = skb_header_pointer(skb, nsrcs_offset,
++ sizeof(__nsrcs), &__nsrcs);
++ if (!_nsrcs)
+ return -EINVAL;
+
+- grec_len = struct_size(grec, grec_src, ntohs(*nsrcs));
++ nsrcs = ntohs(*_nsrcs);
++ grec_len = struct_size(grec, grec_src, nsrcs);
+
+ if (!ipv6_mc_may_pull(skb, len + grec_len))
+ return -EINVAL;
+@@ -1021,7 +1025,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
+ src = eth_hdr(skb)->h_source;
+ if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
+ grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
+- ntohs(*nsrcs) == 0) {
++ nsrcs == 0) {
+ br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
+ vid, src);
+ } else {
+@@ -1275,7 +1279,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+ u16 vid)
+ {
+ unsigned int transport_len = ipv6_transport_len(skb);
+- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct mld_msg *mld;
+ struct net_bridge_mdb_entry *mp;
+ struct mld2_query *mld2q;
+@@ -1319,7 +1322,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
+
+ if (is_general_query) {
+ saddr.proto = htons(ETH_P_IPV6);
+- saddr.u.ip6 = ip6h->saddr;
++ saddr.u.ip6 = ipv6_hdr(skb)->saddr;
+
+ br_multicast_query_received(br, port, &br->ip6_other_query,
+ &saddr, max_delay);
+diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
+index 68a6922b4141..7796dd9d42d7 100644
+--- a/net/bridge/br_stp_bpdu.c
++++ b/net/bridge/br_stp_bpdu.c
+@@ -143,7 +143,6 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
+ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
+ struct net_device *dev)
+ {
+- const unsigned char *dest = eth_hdr(skb)->h_dest;
+ struct net_bridge_port *p;
+ struct net_bridge *br;
+ const unsigned char *buf;
+@@ -172,7 +171,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
+ if (p->state == BR_STATE_DISABLED)
+ goto out;
+
+- if (!ether_addr_equal(dest, br->group_addr))
++ if (!ether_addr_equal(eth_hdr(skb)->h_dest, br->group_addr))
+ goto out;
+
+ if (p->flags & BR_BPDU_GUARD) {
+diff --git a/net/core/filter.c b/net/core/filter.c
+index f615e42cf4ef..f681fb772940 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4332,7 +4332,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
+ TCP_CA_NAME_MAX-1));
+ name[TCP_CA_NAME_MAX-1] = 0;
+ ret = tcp_set_congestion_control(sk, name, false,
+- reinit);
++ reinit, true);
+ } else {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 9e7fc929bc50..5bb0a1aee50e 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -1122,6 +1122,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+
+ atomic_set(&neigh->probes,
+ NEIGH_VAR(neigh->parms, UCAST_PROBES));
++ neigh_del_timer(neigh);
+ neigh->nud_state = NUD_INCOMPLETE;
+ neigh->updated = now;
+ next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
+@@ -1138,6 +1139,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+ }
+ } else if (neigh->nud_state & NUD_STALE) {
+ neigh_dbg(2, "neigh %p is delayed\n", neigh);
++ neigh_del_timer(neigh);
+ neigh->nud_state = NUD_DELAY;
+ neigh->updated = jiffies;
+ neigh_add_timer(neigh, jiffies +
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index c6bd0f7a020a..c5ebfa199794 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -62,6 +62,11 @@
+ #include <net/net_namespace.h>
+ #include <net/addrconf.h>
+
++#define IPV6ONLY_FLAGS \
++ (IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
++ IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
++ IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
++
+ static struct ipv4_devconf ipv4_devconf = {
+ .data = {
+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
+@@ -468,6 +473,9 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
+ ifa->ifa_flags &= ~IFA_F_SECONDARY;
+ last_primary = &in_dev->ifa_list;
+
++ /* Don't set IPv6 only flags to IPv4 addresses */
++ ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
++
+ for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
+ ifap = &ifa1->ifa_next) {
+ if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index a57f0d69eadb..85107bf812f2 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1228,12 +1228,8 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
+ if (pmc) {
+ im->interface = pmc->interface;
+ if (im->sfmode == MCAST_INCLUDE) {
+- im->tomb = pmc->tomb;
+- pmc->tomb = NULL;
+-
+- im->sources = pmc->sources;
+- pmc->sources = NULL;
+-
++ swap(im->tomb, pmc->tomb);
++ swap(im->sources, pmc->sources);
+ for (psf = im->sources; psf; psf = psf->sf_next)
+ psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+ } else {
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 7dc9ab84bb69..5264f064a87e 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2614,6 +2614,8 @@ int tcp_disconnect(struct sock *sk, int flags)
+ tcp_saved_syn_free(tp);
+ tp->compressed_ack = 0;
+ tp->bytes_sent = 0;
++ tp->bytes_acked = 0;
++ tp->bytes_received = 0;
+ tp->bytes_retrans = 0;
+ tp->duplicate_sack[0].start_seq = 0;
+ tp->duplicate_sack[0].end_seq = 0;
+@@ -2768,7 +2770,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ name[val] = 0;
+
+ lock_sock(sk);
+- err = tcp_set_congestion_control(sk, name, true, true);
++ err = tcp_set_congestion_control(sk, name, true, true,
++ ns_capable(sock_net(sk)->user_ns,
++ CAP_NET_ADMIN));
+ release_sock(sk);
+ return err;
+ }
+diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
+index e1862b64a90f..c445a81d144e 100644
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -333,7 +333,8 @@ out:
+ * tcp_reinit_congestion_control (if the current congestion control was
+ * already initialized.
+ */
+-int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit)
++int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
++ bool reinit, bool cap_net_admin)
+ {
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct tcp_congestion_ops *ca;
+@@ -369,8 +370,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, boo
+ } else {
+ err = -EBUSY;
+ }
+- } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
+- ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) {
++ } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin)) {
+ err = -EPERM;
+ } else if (!try_module_get(ca->owner)) {
+ err = -EBUSY;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 0ebc33d1c9e5..7d0be046cbc1 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1286,6 +1286,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *buff;
+ int nsize, old_factor;
++ long limit;
+ int nlen;
+ u8 flags;
+
+@@ -1296,8 +1297,16 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
+ if (nsize < 0)
+ nsize = 0;
+
+- if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf &&
+- tcp_queue != TCP_FRAG_IN_WRITE_QUEUE)) {
++ /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
++ * We need some allowance to not penalize applications setting small
++ * SO_SNDBUF values.
++ * Also allow first and last skb in retransmit queue to be split.
++ */
++ limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
++ if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
++ tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
++ skb != tcp_rtx_queue_head(sk) &&
++ skb != tcp_rtx_queue_tail(sk))) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
+ return -ENOMEM;
+ }
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 9180c8b6f764..455f1292e479 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1104,8 +1104,24 @@ add:
+ err = call_fib6_entry_notifiers(info->nl_net,
+ FIB_EVENT_ENTRY_ADD,
+ rt, extack);
+- if (err)
++ if (err) {
++ struct fib6_info *sibling, *next_sibling;
++
++ /* If the route has siblings, then it first
++ * needs to be unlinked from them.
++ */
++ if (!rt->fib6_nsiblings)
++ return err;
++
++ list_for_each_entry_safe(sibling, next_sibling,
++ &rt->fib6_siblings,
++ fib6_siblings)
++ sibling->fib6_nsiblings--;
++ rt->fib6_nsiblings = 0;
++ list_del_init(&rt->fib6_siblings);
++ rt6_multipath_rebalance(next_sibling);
+ return err;
++ }
+
+ rcu_assign_pointer(rt->fib6_next, iter);
+ fib6_info_hold(rt);
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 97a843cf164c..5f5a0a42ce60 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2215,7 +2215,7 @@ static struct dst_entry *rt6_check(struct rt6_info *rt,
+ {
+ u32 rt_cookie = 0;
+
+- if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) ||
++ if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
+ rt_cookie != cookie)
+ return NULL;
+
+diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
+index b5b2be55ca82..2c440015ff0c 100644
+--- a/net/netfilter/nf_queue.c
++++ b/net/netfilter/nf_queue.c
+@@ -190,6 +190,11 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
+ goto err;
+ }
+
++ if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) {
++ status = -ENETDOWN;
++ goto err;
++ }
++
+ *entry = (struct nf_queue_entry) {
+ .skb = skb,
+ .state = *state,
+@@ -198,7 +203,6 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
+ };
+
+ nf_queue_entry_get_refs(entry);
+- skb_dst_force(skb);
+
+ switch (entry->state.pf) {
+ case AF_INET:
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index 86b87925ef34..c4f54ad2b98a 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -869,7 +869,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
+ unsigned short frametype, flags, window, timeout;
+ int ret;
+
+- skb->sk = NULL; /* Initially we don't know who it's for */
++ skb_orphan(skb);
+
+ /*
+ * skb->data points to the netrom frame start
+@@ -967,7 +967,9 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
+
+ window = skb->data[20];
+
++ sock_hold(make);
+ skb->sk = make;
++ skb->destructor = sock_efree;
+ make->sk_state = TCP_ESTABLISHED;
+
+ /* Fill in his circuit details */
+diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
+index 0a0c265baaa4..ce3382be937f 100644
+--- a/net/nfc/nci/data.c
++++ b/net/nfc/nci/data.c
+@@ -107,7 +107,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
+ conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+ if (!conn_info) {
+ rc = -EPROTO;
+- goto free_exit;
++ goto exit;
+ }
+
+ __skb_queue_head_init(&frags_q);
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index 151518dbabad..bd131469e4ca 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -166,8 +166,7 @@ static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ __be16 diff[] = { ~(hdr->h_proto), ethertype };
+
+- skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+- ~skb->csum);
++ skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
+ }
+
+ hdr->h_proto = ethertype;
+@@ -259,8 +258,7 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ __be32 diff[] = { ~(stack->label_stack_entry), lse };
+
+- skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+- ~skb->csum);
++ skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
+ }
+
+ stack->label_stack_entry = lse;
+diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
+index f9f4721cdfa7..d09eaf153544 100644
+--- a/net/rxrpc/af_rxrpc.c
++++ b/net/rxrpc/af_rxrpc.c
+@@ -545,6 +545,7 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
+
+ switch (rx->sk.sk_state) {
+ case RXRPC_UNBOUND:
++ case RXRPC_CLIENT_UNBOUND:
+ rx->srx.srx_family = AF_RXRPC;
+ rx->srx.srx_service = 0;
+ rx->srx.transport_type = SOCK_DGRAM;
+@@ -569,10 +570,9 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
+ }
+
+ rx->local = local;
+- rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
++ rx->sk.sk_state = RXRPC_CLIENT_BOUND;
+ /* Fall through */
+
+- case RXRPC_CLIENT_UNBOUND:
+ case RXRPC_CLIENT_BOUND:
+ if (!m->msg_name &&
+ test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) {
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index ad36bbcc583e..b67c456f26aa 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -2160,6 +2160,9 @@ replay:
+ tfilter_notify(net, skb, n, tp, block, q, parent, fh,
+ RTM_NEWTFILTER, false, rtnl_held);
+ tfilter_put(tp, fh);
++ /* q pointer is NULL for shared blocks */
++ if (q)
++ q->flags &= ~TCQ_F_CAN_BYPASS;
+ }
+
+ errout:
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index e2faf33d282b..d59fbcc745d1 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -596,8 +596,6 @@ static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
+ static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
+ u32 classid)
+ {
+- /* we cannot bypass queue discipline anymore */
+- sch->flags &= ~TCQ_F_CAN_BYPASS;
+ return 0;
+ }
+
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index 420bd8411677..68404a9d2ce4 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -824,8 +824,6 @@ static unsigned long sfq_find(struct Qdisc *sch, u32 classid)
+ static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
+ u32 classid)
+ {
+- /* we cannot bypass queue discipline anymore */
+- sch->flags &= ~TCQ_F_CAN_BYPASS;
+ return 0;
+ }
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 39ea0a37af09..f33aa9ee9e27 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4816,35 +4816,17 @@ out_nounlock:
+ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
+ int addr_len, int flags)
+ {
+- struct inet_sock *inet = inet_sk(sk);
+ struct sctp_af *af;
+- int err = 0;
++ int err = -EINVAL;
+
+ lock_sock(sk);
+-
+ pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
+ addr, addr_len);
+
+- /* We may need to bind the socket. */
+- if (!inet->inet_num) {
+- if (sk->sk_prot->get_port(sk, 0)) {
+- release_sock(sk);
+- return -EAGAIN;
+- }
+- inet->inet_sport = htons(inet->inet_num);
+- }
+-
+ /* Validate addr_len before calling common connect/connectx routine. */
+- af = addr_len < offsetofend(struct sockaddr, sa_family) ? NULL :
+- sctp_get_af_specific(addr->sa_family);
+- if (!af || addr_len < af->sockaddr_len) {
+- err = -EINVAL;
+- } else {
+- /* Pass correct addr len to common routine (so it knows there
+- * is only one address being passed.
+- */
++ af = sctp_get_af_specific(addr->sa_family);
++ if (af && addr_len >= af->sockaddr_len)
+ err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
+- }
+
+ release_sock(sk);
+ return err;
+diff --git a/net/sctp/stream.c b/net/sctp/stream.c
+index 93ed07877337..25946604af85 100644
+--- a/net/sctp/stream.c
++++ b/net/sctp/stream.c
+@@ -153,13 +153,20 @@ out:
+ int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
+ {
+ struct sctp_stream_out_ext *soute;
++ int ret;
+
+ soute = kzalloc(sizeof(*soute), GFP_KERNEL);
+ if (!soute)
+ return -ENOMEM;
+ SCTP_SO(stream, sid)->ext = soute;
+
+- return sctp_sched_init_sid(stream, sid, GFP_KERNEL);
++ ret = sctp_sched_init_sid(stream, sid, GFP_KERNEL);
++ if (ret) {
++ kfree(SCTP_SO(stream, sid)->ext);
++ SCTP_SO(stream, sid)->ext = NULL;
++ }
++
++ return ret;
+ }
+
+ void sctp_stream_free(struct sctp_stream *stream)
+diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
+index 1f9cf57d9754..eb8f24f420f0 100644
+--- a/net/tls/tls_device.c
++++ b/net/tls/tls_device.c
+@@ -61,7 +61,7 @@ static void tls_device_free_ctx(struct tls_context *ctx)
+ if (ctx->rx_conf == TLS_HW)
+ kfree(tls_offload_ctx_rx(ctx));
+
+- kfree(ctx);
++ tls_ctx_free(ctx);
+ }
+
+ static void tls_device_gc_task(struct work_struct *work)
+@@ -742,6 +742,11 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
+ }
+
+ crypto_info = &ctx->crypto_send.info;
++ if (crypto_info->version != TLS_1_2_VERSION) {
++ rc = -EOPNOTSUPP;
++ goto free_offload_ctx;
++ }
++
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128:
+ nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
+@@ -876,6 +881,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
+ struct net_device *netdev;
+ int rc = 0;
+
++ if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
++ return -EOPNOTSUPP;
++
+ /* We support starting offload on multiple sockets
+ * concurrently, so we only need a read lock here.
+ * This lock must precede get_netdev_for_sock to prevent races between
+diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
+index e2b69e805d46..4674e57e66b0 100644
+--- a/net/tls/tls_main.c
++++ b/net/tls/tls_main.c
+@@ -251,7 +251,7 @@ static void tls_write_space(struct sock *sk)
+ ctx->sk_write_space(sk);
+ }
+
+-static void tls_ctx_free(struct tls_context *ctx)
++void tls_ctx_free(struct tls_context *ctx)
+ {
+ if (!ctx)
+ return;
+@@ -643,7 +643,7 @@ static void tls_hw_sk_destruct(struct sock *sk)
+
+ ctx->sk_destruct(sk);
+ /* Free ctx */
+- kfree(ctx);
++ tls_ctx_free(ctx);
+ icsk->icsk_ulp_data = NULL;
+ }
+
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 455a782c7658..e2385183526e 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -1958,7 +1958,8 @@ bool tls_sw_stream_read(const struct sock *sk)
+ ingress_empty = list_empty(&psock->ingress_msg);
+ rcu_read_unlock();
+
+- return !ingress_empty || ctx->recv_pkt;
++ return !ingress_empty || ctx->recv_pkt ||
++ !skb_queue_empty(&ctx->rx_list);
+ }
+
+ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index 61cfd8f70989..d089eb706d18 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -3669,7 +3669,8 @@ int cmd_script(int argc, const char **argv)
+ goto out_delete;
+
+ uname(&uts);
+- if (!strcmp(uts.machine, session->header.env.arch) ||
++ if (data.is_pipe || /* assume pipe_mode indicates native_arch */
++ !strcmp(uts.machine, session->header.env.arch) ||
+ (!strcmp(uts.machine, "x86_64") &&
+ !strcmp(session->header.env.arch, "i386")))
+ native_arch = true;
+diff --git a/tools/testing/selftests/net/txring_overwrite.c b/tools/testing/selftests/net/txring_overwrite.c
+index fd8b1c663c39..7d9ea039450a 100644
+--- a/tools/testing/selftests/net/txring_overwrite.c
++++ b/tools/testing/selftests/net/txring_overwrite.c
+@@ -113,7 +113,7 @@ static int setup_tx(char **ring)
+
+ *ring = mmap(0, req.tp_block_size * req.tp_block_nr,
+ PROT_READ | PROT_WRITE, MAP_SHARED, fdt, 0);
+- if (!*ring)
++ if (*ring == MAP_FAILED)
+ error(1, errno, "mmap");
+
+ return fdt;
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-07-26 11:38 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-07-26 11:38 UTC (permalink / raw
To: gentoo-commits
commit: af0a097f185afe9da14dc965431666bbd47f1834
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jul 26 11:38:08 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jul 26 11:38:08 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=af0a097f
Linux patch 5.2.3
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1002_linux-5.2.3.patch | 19496 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 19500 insertions(+)
diff --git a/0000_README b/0000_README
index d2c1e9b..359d69d 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch: 1001_linux-5.2.2.patch
From: https://www.kernel.org
Desc: Linux 5.2.2
+Patch: 1002_linux-5.2.3.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.3
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1002_linux-5.2.3.patch b/1002_linux-5.2.3.patch
new file mode 100644
index 0000000..f6fb4de
--- /dev/null
+++ b/1002_linux-5.2.3.patch
@@ -0,0 +1,19496 @@
+diff --git a/Documentation/atomic_t.txt b/Documentation/atomic_t.txt
+index dca3fb0554db..65bb09a29324 100644
+--- a/Documentation/atomic_t.txt
++++ b/Documentation/atomic_t.txt
+@@ -194,6 +194,9 @@ These helper barriers exist because architectures have varying implicit
+ ordering on their SMP atomic primitives. For example our TSO architectures
+ provide full ordered atomics and these barriers are no-ops.
+
++NOTE: when the atomic RmW ops are fully ordered, they should also imply a
++compiler barrier.
++
+ Thus:
+
+ atomic_fetch_add();
+diff --git a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
+index 42cd81090a2c..3f3cfc1d8d4d 100644
+--- a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
++++ b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
+@@ -16,7 +16,7 @@ Required properties:
+
+ Optional properties:
+ - interrupts: interrupt line number for the SMI error/done interrupt
+-- clocks: phandle for up to three required clocks for the MDIO instance
++- clocks: phandle for up to four required clocks for the MDIO instance
+
+ The child nodes of the MDIO driver are the individual PHY devices
+ connected to this MDIO bus. They must have a "reg" property given the
+diff --git a/Documentation/scheduler/sched-pelt.c b/Documentation/scheduler/sched-pelt.c
+index e4219139386a..7238b355919c 100644
+--- a/Documentation/scheduler/sched-pelt.c
++++ b/Documentation/scheduler/sched-pelt.c
+@@ -20,7 +20,8 @@ void calc_runnable_avg_yN_inv(void)
+ int i;
+ unsigned int x;
+
+- printf("static const u32 runnable_avg_yN_inv[] = {");
++ /* To silence -Wunused-but-set-variable warnings. */
++ printf("static const u32 runnable_avg_yN_inv[] __maybe_unused = {");
+ for (i = 0; i < HALFLIFE; i++) {
+ x = ((1UL<<32)-1)*pow(y, i);
+
+diff --git a/Makefile b/Makefile
+index d6c65b678d21..bcb6a2465e21 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+index 3613f05f8a80..bfaa2de63a10 100644
+--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
++++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+@@ -64,7 +64,7 @@
+ gpio-sck = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+ gpio-miso = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ gpio-mosi = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+- cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
++ cs-gpios = <&gpio0 20 GPIO_ACTIVE_LOW>;
+ num-chipselects = <1>;
+
+ panel: display@0 {
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 697ea0510729..cf5f1dafcf74 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -260,7 +260,8 @@ config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+ config ZONE_DMA32
+- def_bool y
++ bool "Support DMA32 zone" if EXPERT
++ default y
+
+ config HAVE_GENERIC_GUP
+ def_bool y
+diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+index 4dcd0d36189a..f70cd83f2bed 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+@@ -328,7 +328,8 @@
+ regulator-max-microvolt = <1320000>;
+ enable-gpios = <&pmic 6 GPIO_ACTIVE_HIGH>;
+ regulator-ramp-delay = <80>;
+- regulator-enable-ramp-delay = <1000>;
++ regulator-enable-ramp-delay = <2000>;
++ regulator-settling-time-us = <160>;
+ };
+ };
+ };
+diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
+index 5d0181908f45..f187d4f3ade3 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
++++ b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
+@@ -633,17 +633,16 @@
+ };
+
+ vdd_gpu: regulator@6 {
+- compatible = "regulator-fixed";
++ compatible = "pwm-regulator";
+ reg = <6>;
+-
++ pwms = <&pwm 1 4880>;
+ regulator-name = "VDD_GPU";
+- regulator-min-microvolt = <5000000>;
+- regulator-max-microvolt = <5000000>;
+- regulator-enable-ramp-delay = <250>;
+-
+- gpio = <&pmic 6 GPIO_ACTIVE_HIGH>;
+- enable-active-high;
+-
++ regulator-min-microvolt = <710000>;
++ regulator-max-microvolt = <1320000>;
++ regulator-ramp-delay = <80>;
++ regulator-enable-ramp-delay = <2000>;
++ regulator-settling-time-us = <160>;
++ enable-gpios = <&pmic 6 GPIO_ACTIVE_HIGH>;
+ vin-supply = <&vdd_5v0_sys>;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+index a550c0a4d572..cd23bdbeda85 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+@@ -1258,7 +1258,7 @@
+ compatible = "nvidia,tegra210-agic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+- reg = <0x702f9000 0x2000>,
++ reg = <0x702f9000 0x1000>,
+ <0x702fa000 0x2000>;
+ interrupts = <GIC_SPI 102 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ clocks = <&tegra_car TEGRA210_CLK_APE>;
+diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
+index ecb0f67e5998..bdc1b6d7aff7 100644
+--- a/arch/arm64/crypto/sha1-ce-glue.c
++++ b/arch/arm64/crypto/sha1-ce-glue.c
+@@ -52,7 +52,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+ {
+ struct sha1_ce_state *sctx = shash_desc_ctx(desc);
+- bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
++ bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
+
+ if (!crypto_simd_usable())
+ return crypto_sha1_finup(desc, data, len, out);
+diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
+index 955c3c2d3f5a..604a01a4ede6 100644
+--- a/arch/arm64/crypto/sha2-ce-glue.c
++++ b/arch/arm64/crypto/sha2-ce-glue.c
+@@ -57,7 +57,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+ {
+ struct sha256_ce_state *sctx = shash_desc_ctx(desc);
+- bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
++ bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
+
+ if (!crypto_simd_usable()) {
+ if (len)
+diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
+index 2247908e55d6..79155a8cfe7c 100644
+--- a/arch/arm64/include/asm/arch_gicv3.h
++++ b/arch/arm64/include/asm/arch_gicv3.h
+@@ -152,7 +152,9 @@ static inline bool gic_prio_masking_enabled(void)
+
+ static inline void gic_pmr_mask_irqs(void)
+ {
+- BUILD_BUG_ON(GICD_INT_DEF_PRI <= GIC_PRIO_IRQOFF);
++ BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF |
++ GIC_PRIO_PSR_I_SET));
++ BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
+ gic_write_pmr(GIC_PRIO_IRQOFF);
+ }
+
+diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
+index 6dd8a8723525..ae7e605085d7 100644
+--- a/arch/arm64/include/asm/daifflags.h
++++ b/arch/arm64/include/asm/daifflags.h
+@@ -7,6 +7,7 @@
+
+ #include <linux/irqflags.h>
+
++#include <asm/arch_gicv3.h>
+ #include <asm/cpufeature.h>
+
+ #define DAIF_PROCCTX 0
+@@ -21,6 +22,11 @@ static inline void local_daif_mask(void)
+ :
+ :
+ : "memory");
++
++ /* Don't really care for a dsb here, we don't intend to enable IRQs */
++ if (system_uses_irq_prio_masking())
++ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
++
+ trace_hardirqs_off();
+ }
+
+@@ -32,7 +38,7 @@ static inline unsigned long local_daif_save(void)
+
+ if (system_uses_irq_prio_masking()) {
+ /* If IRQs are masked with PMR, reflect it in the flags */
+- if (read_sysreg_s(SYS_ICC_PMR_EL1) <= GIC_PRIO_IRQOFF)
++ if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON)
+ flags |= PSR_I_BIT;
+ }
+
+@@ -48,36 +54,44 @@ static inline void local_daif_restore(unsigned long flags)
+ if (!irq_disabled) {
+ trace_hardirqs_on();
+
+- if (system_uses_irq_prio_masking())
+- arch_local_irq_enable();
+- } else if (!(flags & PSR_A_BIT)) {
+- /*
+- * If interrupts are disabled but we can take
+- * asynchronous errors, we can take NMIs
+- */
+ if (system_uses_irq_prio_masking()) {
+- flags &= ~PSR_I_BIT;
++ gic_write_pmr(GIC_PRIO_IRQON);
++ dsb(sy);
++ }
++ } else if (system_uses_irq_prio_masking()) {
++ u64 pmr;
++
++ if (!(flags & PSR_A_BIT)) {
+ /*
+- * There has been concern that the write to daif
+- * might be reordered before this write to PMR.
+- * From the ARM ARM DDI 0487D.a, section D1.7.1
+- * "Accessing PSTATE fields":
+- * Writes to the PSTATE fields have side-effects on
+- * various aspects of the PE operation. All of these
+- * side-effects are guaranteed:
+- * - Not to be visible to earlier instructions in
+- * the execution stream.
+- * - To be visible to later instructions in the
+- * execution stream
+- *
+- * Also, writes to PMR are self-synchronizing, so no
+- * interrupts with a lower priority than PMR is signaled
+- * to the PE after the write.
+- *
+- * So we don't need additional synchronization here.
++ * If interrupts are disabled but we can take
++ * asynchronous errors, we can take NMIs
+ */
+- arch_local_irq_disable();
++ flags &= ~PSR_I_BIT;
++ pmr = GIC_PRIO_IRQOFF;
++ } else {
++ pmr = GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET;
+ }
++
++ /*
++ * There has been concern that the write to daif
++ * might be reordered before this write to PMR.
++ * From the ARM ARM DDI 0487D.a, section D1.7.1
++ * "Accessing PSTATE fields":
++ * Writes to the PSTATE fields have side-effects on
++ * various aspects of the PE operation. All of these
++ * side-effects are guaranteed:
++ * - Not to be visible to earlier instructions in
++ * the execution stream.
++ * - To be visible to later instructions in the
++ * execution stream
++ *
++ * Also, writes to PMR are self-synchronizing, so no
++ * interrupts with a lower priority than PMR is signaled
++ * to the PE after the write.
++ *
++ * So we don't need additional synchronization here.
++ */
++ gic_write_pmr(pmr);
+ }
+
+ write_sysreg(flags, daif);
+diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
+index 66853fde60f9..5b7cef8c9241 100644
+--- a/arch/arm64/include/asm/irqflags.h
++++ b/arch/arm64/include/asm/irqflags.h
+@@ -56,43 +56,46 @@ static inline void arch_local_irq_disable(void)
+ */
+ static inline unsigned long arch_local_save_flags(void)
+ {
+- unsigned long daif_bits;
+ unsigned long flags;
+
+- daif_bits = read_sysreg(daif);
+-
+- /*
+- * The asm is logically equivalent to:
+- *
+- * if (system_uses_irq_prio_masking())
+- * flags = (daif_bits & PSR_I_BIT) ?
+- * GIC_PRIO_IRQOFF :
+- * read_sysreg_s(SYS_ICC_PMR_EL1);
+- * else
+- * flags = daif_bits;
+- */
+ asm volatile(ALTERNATIVE(
+- "mov %0, %1\n"
+- "nop\n"
+- "nop",
+- __mrs_s("%0", SYS_ICC_PMR_EL1)
+- "ands %1, %1, " __stringify(PSR_I_BIT) "\n"
+- "csel %0, %0, %2, eq",
+- ARM64_HAS_IRQ_PRIO_MASKING)
+- : "=&r" (flags), "+r" (daif_bits)
+- : "r" ((unsigned long) GIC_PRIO_IRQOFF)
++ "mrs %0, daif",
++ __mrs_s("%0", SYS_ICC_PMR_EL1),
++ ARM64_HAS_IRQ_PRIO_MASKING)
++ : "=&r" (flags)
++ :
+ : "memory");
+
+ return flags;
+ }
+
++static inline int arch_irqs_disabled_flags(unsigned long flags)
++{
++ int res;
++
++ asm volatile(ALTERNATIVE(
++ "and %w0, %w1, #" __stringify(PSR_I_BIT),
++ "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON),
++ ARM64_HAS_IRQ_PRIO_MASKING)
++ : "=&r" (res)
++ : "r" ((int) flags)
++ : "memory");
++
++ return res;
++}
++
+ static inline unsigned long arch_local_irq_save(void)
+ {
+ unsigned long flags;
+
+ flags = arch_local_save_flags();
+
+- arch_local_irq_disable();
++ /*
++ * There are too many states with IRQs disabled, just keep the current
++ * state if interrupts are already disabled/masked.
++ */
++ if (!arch_irqs_disabled_flags(flags))
++ arch_local_irq_disable();
+
+ return flags;
+ }
+@@ -113,21 +116,5 @@ static inline void arch_local_irq_restore(unsigned long flags)
+ : "memory");
+ }
+
+-static inline int arch_irqs_disabled_flags(unsigned long flags)
+-{
+- int res;
+-
+- asm volatile(ALTERNATIVE(
+- "and %w0, %w1, #" __stringify(PSR_I_BIT) "\n"
+- "nop",
+- "cmp %w1, #" __stringify(GIC_PRIO_IRQOFF) "\n"
+- "cset %w0, ls",
+- ARM64_HAS_IRQ_PRIO_MASKING)
+- : "=&r" (res)
+- : "r" ((int) flags)
+- : "memory");
+-
+- return res;
+-}
+ #endif
+ #endif
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index c328191aa202..9f19c354b165 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -597,11 +597,12 @@ static inline void kvm_arm_vhe_guest_enter(void)
+ * will not signal the CPU of interrupts of lower priority, and the
+ * only way to get out will be via guest exceptions.
+ * Naturally, we want to avoid this.
++ *
++ * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
++ * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
+ */
+- if (system_uses_irq_prio_masking()) {
+- gic_write_pmr(GIC_PRIO_IRQON);
++ if (system_uses_irq_prio_masking())
+ dsb(sy);
+- }
+ }
+
+ static inline void kvm_arm_vhe_guest_exit(void)
+diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
+index dad858b6adc6..81693244f58d 100644
+--- a/arch/arm64/include/asm/ptrace.h
++++ b/arch/arm64/include/asm/ptrace.h
+@@ -24,9 +24,15 @@
+ * means masking more IRQs (or at least that the same IRQs remain masked).
+ *
+ * To mask interrupts, we clear the most significant bit of PMR.
++ *
++ * Some code sections either automatically switch back to PSR.I or explicitly
++ * require to not use priority masking. If bit GIC_PRIO_PSR_I_SET is included
++ * in the the priority mask, it indicates that PSR.I should be set and
++ * interrupt disabling temporarily does not rely on IRQ priorities.
+ */
+-#define GIC_PRIO_IRQON 0xf0
+-#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
++#define GIC_PRIO_IRQON 0xc0
++#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
++#define GIC_PRIO_PSR_I_SET (1 << 4)
+
+ /* Additional SPSR bits not exposed in the UABI */
+ #define PSR_IL_BIT (1 << 20)
+diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
+index 2804330c95dc..3a58e9db5cfe 100644
+--- a/arch/arm64/kernel/acpi.c
++++ b/arch/arm64/kernel/acpi.c
+@@ -152,10 +152,14 @@ static int __init acpi_fadt_sanity_check(void)
+ */
+ if (table->revision < 5 ||
+ (table->revision == 5 && fadt->minor_revision < 1)) {
+- pr_err("Unsupported FADT revision %d.%d, should be 5.1+\n",
++ pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
+ table->revision, fadt->minor_revision);
+- ret = -EINVAL;
+- goto out;
++
++ if (!fadt->arm_boot_flags) {
++ ret = -EINVAL;
++ goto out;
++ }
++ pr_err("FADT has ARM boot flags set, assuming 5.1\n");
+ }
+
+ if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 2df8d0a1d980..9cdc4592da3e 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -247,6 +247,7 @@ alternative_else_nop_endif
+ /*
+ * Registers that may be useful after this macro is invoked:
+ *
++ * x20 - ICC_PMR_EL1
+ * x21 - aborted SP
+ * x22 - aborted PC
+ * x23 - aborted PSTATE
+@@ -424,6 +425,38 @@ tsk .req x28 // current thread_info
+ irq_stack_exit
+ .endm
+
++#ifdef CONFIG_ARM64_PSEUDO_NMI
++ /*
++ * Set res to 0 if irqs were unmasked in interrupted context.
++ * Otherwise set res to non-0 value.
++ */
++ .macro test_irqs_unmasked res:req, pmr:req
++alternative_if ARM64_HAS_IRQ_PRIO_MASKING
++ sub \res, \pmr, #GIC_PRIO_IRQON
++alternative_else
++ mov \res, xzr
++alternative_endif
++ .endm
++#endif
++
++ .macro gic_prio_kentry_setup, tmp:req
++#ifdef CONFIG_ARM64_PSEUDO_NMI
++ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
++ mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
++ msr_s SYS_ICC_PMR_EL1, \tmp
++ alternative_else_nop_endif
++#endif
++ .endm
++
++ .macro gic_prio_irq_setup, pmr:req, tmp:req
++#ifdef CONFIG_ARM64_PSEUDO_NMI
++ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
++ orr \tmp, \pmr, #GIC_PRIO_PSR_I_SET
++ msr_s SYS_ICC_PMR_EL1, \tmp
++ alternative_else_nop_endif
++#endif
++ .endm
++
+ .text
+
+ /*
+@@ -602,6 +635,7 @@ el1_dbg:
+ cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
+ cinc x24, x24, eq // set bit '0'
+ tbz x24, #0, el1_inv // EL1 only
++ gic_prio_kentry_setup tmp=x3
+ mrs x0, far_el1
+ mov x2, sp // struct pt_regs
+ bl do_debug_exception
+@@ -619,20 +653,18 @@ ENDPROC(el1_sync)
+ .align 6
+ el1_irq:
+ kernel_entry 1
++ gic_prio_irq_setup pmr=x20, tmp=x1
+ enable_da_f
+-#ifdef CONFIG_TRACE_IRQFLAGS
++
+ #ifdef CONFIG_ARM64_PSEUDO_NMI
+-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+- ldr x20, [sp, #S_PMR_SAVE]
+-alternative_else
+- mov x20, #GIC_PRIO_IRQON
+-alternative_endif
+- cmp x20, #GIC_PRIO_IRQOFF
+- /* Irqs were disabled, don't trace */
+- b.ls 1f
++ test_irqs_unmasked res=x0, pmr=x20
++ cbz x0, 1f
++ bl asm_nmi_enter
++1:
+ #endif
++
++#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+-1:
+ #endif
+
+ irq_handler
+@@ -651,14 +683,23 @@ alternative_else_nop_endif
+ bl preempt_schedule_irq // irq en/disable is done inside
+ 1:
+ #endif
+-#ifdef CONFIG_TRACE_IRQFLAGS
++
+ #ifdef CONFIG_ARM64_PSEUDO_NMI
+ /*
+- * if IRQs were disabled when we received the interrupt, we have an NMI
+- * and we are not re-enabling interrupt upon eret. Skip tracing.
++ * When using IRQ priority masking, we can get spurious interrupts while
++ * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
++ * section with interrupts disabled. Skip tracing in those cases.
+ */
+- cmp x20, #GIC_PRIO_IRQOFF
+- b.ls 1f
++ test_irqs_unmasked res=x0, pmr=x20
++ cbz x0, 1f
++ bl asm_nmi_exit
++1:
++#endif
++
++#ifdef CONFIG_TRACE_IRQFLAGS
++#ifdef CONFIG_ARM64_PSEUDO_NMI
++ test_irqs_unmasked res=x0, pmr=x20
++ cbnz x0, 1f
+ #endif
+ bl trace_hardirqs_on
+ 1:
+@@ -776,6 +817,7 @@ el0_ia:
+ * Instruction abort handling
+ */
+ mrs x26, far_el1
++ gic_prio_kentry_setup tmp=x0
+ enable_da_f
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+@@ -821,6 +863,7 @@ el0_sp_pc:
+ * Stack or PC alignment exception handling
+ */
+ mrs x26, far_el1
++ gic_prio_kentry_setup tmp=x0
+ enable_da_f
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+@@ -855,11 +898,12 @@ el0_dbg:
+ * Debug exception handling
+ */
+ tbnz x24, #0, el0_inv // EL0 only
++ gic_prio_kentry_setup tmp=x3
+ mrs x0, far_el1
+ mov x1, x25
+ mov x2, sp
+ bl do_debug_exception
+- enable_daif
++ enable_da_f
+ ct_user_exit
+ b ret_to_user
+ el0_inv:
+@@ -876,7 +920,9 @@ ENDPROC(el0_sync)
+ el0_irq:
+ kernel_entry 0
+ el0_irq_naked:
++ gic_prio_irq_setup pmr=x20, tmp=x0
+ enable_da_f
++
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+ #endif
+@@ -898,6 +944,7 @@ ENDPROC(el0_irq)
+ el1_error:
+ kernel_entry 1
+ mrs x1, esr_el1
++ gic_prio_kentry_setup tmp=x2
+ enable_dbg
+ mov x0, sp
+ bl do_serror
+@@ -908,10 +955,11 @@ el0_error:
+ kernel_entry 0
+ el0_error_naked:
+ mrs x1, esr_el1
++ gic_prio_kentry_setup tmp=x2
+ enable_dbg
+ mov x0, sp
+ bl do_serror
+- enable_daif
++ enable_da_f
+ ct_user_exit
+ b ret_to_user
+ ENDPROC(el0_error)
+@@ -932,6 +980,7 @@ work_pending:
+ */
+ ret_to_user:
+ disable_daif
++ gic_prio_kentry_setup tmp=x3
+ ldr x1, [tsk, #TSK_TI_FLAGS]
+ and x2, x1, #_TIF_WORK_MASK
+ cbnz x2, work_pending
+@@ -948,6 +997,7 @@ ENDPROC(ret_to_user)
+ */
+ .align 6
+ el0_svc:
++ gic_prio_kentry_setup tmp=x1
+ mov x0, sp
+ bl el0_svc_handler
+ b ret_to_user
+diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
+index c70034fbd4ce..4a8506c3869b 100644
+--- a/arch/arm64/kernel/irq.c
++++ b/arch/arm64/kernel/irq.c
+@@ -16,8 +16,10 @@
+ #include <linux/smp.h>
+ #include <linux/init.h>
+ #include <linux/irqchip.h>
++#include <linux/kprobes.h>
+ #include <linux/seq_file.h>
+ #include <linux/vmalloc.h>
++#include <asm/daifflags.h>
+ #include <asm/vmap_stack.h>
+
+ unsigned long irq_err_count;
+@@ -65,3 +67,18 @@ void __init init_IRQ(void)
+ if (!handle_arch_irq)
+ panic("No interrupt controller found.");
+ }
++
++/*
++ * Stubs to make nmi_enter/exit() code callable from ASM
++ */
++asmlinkage void notrace asm_nmi_enter(void)
++{
++ nmi_enter();
++}
++NOKPROBE_SYMBOL(asm_nmi_enter);
++
++asmlinkage void notrace asm_nmi_exit(void)
++{
++ nmi_exit();
++}
++NOKPROBE_SYMBOL(asm_nmi_exit);
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 9856395ccdb7..6a869d9f304f 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -83,7 +83,7 @@ static void __cpu_do_idle_irqprio(void)
+ * be raised.
+ */
+ pmr = gic_read_pmr();
+- gic_write_pmr(GIC_PRIO_IRQON);
++ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+
+ __cpu_do_idle();
+
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 6dcf9607d770..a1aed6a1b8da 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -181,11 +181,13 @@ static void init_gic_priority_masking(void)
+
+ WARN_ON(!(cpuflags & PSR_I_BIT));
+
+- gic_write_pmr(GIC_PRIO_IRQOFF);
+-
+ /* We can only unmask PSR.I if we can take aborts */
+- if (!(cpuflags & PSR_A_BIT))
++ if (!(cpuflags & PSR_A_BIT)) {
++ gic_write_pmr(GIC_PRIO_IRQOFF);
+ write_sysreg(cpuflags & ~PSR_I_BIT, daif);
++ } else {
++ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
++ }
+ }
+
+ /*
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
+index b0041812bca9..58f281b6ca4a 100644
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -604,7 +604,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
+ * Naturally, we want to avoid this.
+ */
+ if (system_uses_irq_prio_masking()) {
+- gic_write_pmr(GIC_PRIO_IRQON);
++ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+ dsb(sy);
+ }
+
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index 749c9b269f08..f3c795278def 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -180,8 +180,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
+ {
+ unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
+
+- if (IS_ENABLED(CONFIG_ZONE_DMA32))
+- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
++#ifdef CONFIG_ZONE_DMA32
++ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
++#endif
+ max_zone_pfns[ZONE_NORMAL] = max;
+
+ free_area_init_nodes(max_zone_pfns);
+diff --git a/arch/parisc/kernel/kprobes.c b/arch/parisc/kernel/kprobes.c
+index d58960b33bda..5d7f2692ac5a 100644
+--- a/arch/parisc/kernel/kprobes.c
++++ b/arch/parisc/kernel/kprobes.c
+@@ -133,6 +133,9 @@ int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs)
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ struct kprobe *p = kprobe_running();
+
++ if (!p)
++ return 0;
++
+ if (regs->iaoq[0] != (unsigned long)p->ainsn.insn+4)
+ return 0;
+
+diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
+index a3d2fb4e6dd2..94106ec64fec 100644
+--- a/arch/parisc/kernel/ptrace.c
++++ b/arch/parisc/kernel/ptrace.c
+@@ -167,6 +167,9 @@ long arch_ptrace(struct task_struct *child, long request,
+ if ((addr & (sizeof(unsigned long)-1)) ||
+ addr >= sizeof(struct pt_regs))
+ break;
++ if (addr == PT_IAOQ0 || addr == PT_IAOQ1) {
++ data |= 3; /* ensure userspace privilege */
++ }
+ if ((addr >= PT_GR1 && addr <= PT_GR31) ||
+ addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
+ (addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
+@@ -228,16 +231,18 @@ long arch_ptrace(struct task_struct *child, long request,
+
+ static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
+ {
+- if (offset < 0)
+- return sizeof(struct pt_regs);
+- else if (offset <= 32*4) /* gr[0..31] */
+- return offset * 2 + 4;
+- else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
+- return offset + 32*4;
+- else if (offset < sizeof(struct pt_regs)/2 + 32*4)
+- return offset * 2 + 4 - 32*8;
++ compat_ulong_t pos;
++
++ if (offset < 32*4) /* gr[0..31] */
++ pos = offset * 2 + 4;
++ else if (offset < 32*4+32*8) /* fr[0] ... fr[31] */
++ pos = (offset - 32*4) + PT_FR0;
++ else if (offset < sizeof(struct pt_regs)/2 + 32*4) /* sr[0] ... ipsw */
++ pos = (offset - 32*4 - 32*8) * 2 + PT_SR0 + 4;
+ else
+- return sizeof(struct pt_regs);
++ pos = sizeof(struct pt_regs);
++
++ return pos;
+ }
+
+ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+@@ -281,9 +286,12 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ addr = translate_usr_offset(addr);
+ if (addr >= sizeof(struct pt_regs))
+ break;
++ if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) {
++ data |= 3; /* ensure userspace privilege */
++ }
+ if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
+ /* Special case, fp regs are 64 bits anyway */
+- *(__u64 *) ((char *) task_regs(child) + addr) = data;
++ *(__u32 *) ((char *) task_regs(child) + addr) = data;
+ ret = 0;
+ }
+ else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
+@@ -496,7 +504,8 @@ static void set_reg(struct pt_regs *regs, int num, unsigned long val)
+ return;
+ case RI(iaoq[0]):
+ case RI(iaoq[1]):
+- regs->iaoq[num - RI(iaoq[0])] = val;
++ /* set 2 lowest bits to ensure userspace privilege: */
++ regs->iaoq[num - RI(iaoq[0])] = val | 3;
+ return;
+ case RI(sar): regs->sar = val;
+ return;
+diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
+index 3f53be60fb01..64145751b2fd 100644
+--- a/arch/powerpc/include/asm/pgtable.h
++++ b/arch/powerpc/include/asm/pgtable.h
+@@ -140,6 +140,20 @@ static inline void pte_frag_set(mm_context_t *ctx, void *p)
+ }
+ #endif
+
++#ifdef CONFIG_PPC64
++#define is_ioremap_addr is_ioremap_addr
++static inline bool is_ioremap_addr(const void *x)
++{
++#ifdef CONFIG_MMU
++ unsigned long addr = (unsigned long)x;
++
++ return addr >= IOREMAP_BASE && addr < IOREMAP_END;
++#else
++ return false;
++#endif
++}
++#endif /* CONFIG_PPC64 */
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* _ASM_POWERPC_PGTABLE_H */
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 73ba246ca11d..6c51aa845bce 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1746,7 +1746,7 @@ handle_page_fault:
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_page_fault
+ cmpdi r3,0
+- beq+ 12f
++ beq+ ret_from_except_lite
+ bl save_nvgprs
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+@@ -1761,7 +1761,12 @@ handle_dabr_fault:
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_break
+-12: b ret_from_except_lite
++ /*
++ * do_break() may have changed the NV GPRS while handling a breakpoint.
++ * If so, we need to restore them with their updated values. Don't use
++ * ret_from_except_lite here.
++ */
++ b ret_from_except
+
+
+ #ifdef CONFIG_PPC_BOOK3S_64
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index ed446b7ea164..6d600c977c09 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -168,6 +168,7 @@ static unsigned long __prombss prom_tce_alloc_end;
+
+ #ifdef CONFIG_PPC_PSERIES
+ static bool __prombss prom_radix_disable;
++static bool __prombss prom_xive_disable;
+ #endif
+
+ struct platform_support {
+@@ -804,6 +805,12 @@ static void __init early_cmdline_parse(void)
+ }
+ if (prom_radix_disable)
+ prom_debug("Radix disabled from cmdline\n");
++
++ opt = prom_strstr(prom_cmd_line, "xive=off");
++ if (opt) {
++ prom_xive_disable = true;
++ prom_debug("XIVE disabled from cmdline\n");
++ }
+ #endif /* CONFIG_PPC_PSERIES */
+ }
+
+@@ -1212,10 +1219,17 @@ static void __init prom_parse_xive_model(u8 val,
+ switch (val) {
+ case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
+ prom_debug("XIVE - either mode supported\n");
+- support->xive = true;
++ support->xive = !prom_xive_disable;
+ break;
+ case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
+ prom_debug("XIVE - exploitation mode supported\n");
++ if (prom_xive_disable) {
++ /*
++ * If we __have__ to do XIVE, we're better off ignoring
++ * the command line rather than not booting.
++ */
++ prom_printf("WARNING: Ignoring cmdline option xive=off\n");
++ }
+ support->xive = true;
+ break;
+ case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
+diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
+index 7a919e9a3400..cbdf86228eaa 100644
+--- a/arch/powerpc/kernel/swsusp_32.S
++++ b/arch/powerpc/kernel/swsusp_32.S
+@@ -25,11 +25,19 @@
+ #define SL_IBAT2 0x48
+ #define SL_DBAT3 0x50
+ #define SL_IBAT3 0x58
+-#define SL_TB 0x60
+-#define SL_R2 0x68
+-#define SL_CR 0x6c
+-#define SL_LR 0x70
+-#define SL_R12 0x74 /* r12 to r31 */
++#define SL_DBAT4 0x60
++#define SL_IBAT4 0x68
++#define SL_DBAT5 0x70
++#define SL_IBAT5 0x78
++#define SL_DBAT6 0x80
++#define SL_IBAT6 0x88
++#define SL_DBAT7 0x90
++#define SL_IBAT7 0x98
++#define SL_TB 0xa0
++#define SL_R2 0xa8
++#define SL_CR 0xac
++#define SL_LR 0xb0
++#define SL_R12 0xb4 /* r12 to r31 */
+ #define SL_SIZE (SL_R12 + 80)
+
+ .section .data
+@@ -114,6 +122,41 @@ _GLOBAL(swsusp_arch_suspend)
+ mfibatl r4,3
+ stw r4,SL_IBAT3+4(r11)
+
++BEGIN_MMU_FTR_SECTION
++ mfspr r4,SPRN_DBAT4U
++ stw r4,SL_DBAT4(r11)
++ mfspr r4,SPRN_DBAT4L
++ stw r4,SL_DBAT4+4(r11)
++ mfspr r4,SPRN_DBAT5U
++ stw r4,SL_DBAT5(r11)
++ mfspr r4,SPRN_DBAT5L
++ stw r4,SL_DBAT5+4(r11)
++ mfspr r4,SPRN_DBAT6U
++ stw r4,SL_DBAT6(r11)
++ mfspr r4,SPRN_DBAT6L
++ stw r4,SL_DBAT6+4(r11)
++ mfspr r4,SPRN_DBAT7U
++ stw r4,SL_DBAT7(r11)
++ mfspr r4,SPRN_DBAT7L
++ stw r4,SL_DBAT7+4(r11)
++ mfspr r4,SPRN_IBAT4U
++ stw r4,SL_IBAT4(r11)
++ mfspr r4,SPRN_IBAT4L
++ stw r4,SL_IBAT4+4(r11)
++ mfspr r4,SPRN_IBAT5U
++ stw r4,SL_IBAT5(r11)
++ mfspr r4,SPRN_IBAT5L
++ stw r4,SL_IBAT5+4(r11)
++ mfspr r4,SPRN_IBAT6U
++ stw r4,SL_IBAT6(r11)
++ mfspr r4,SPRN_IBAT6L
++ stw r4,SL_IBAT6+4(r11)
++ mfspr r4,SPRN_IBAT7U
++ stw r4,SL_IBAT7(r11)
++ mfspr r4,SPRN_IBAT7L
++ stw r4,SL_IBAT7+4(r11)
++END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
++
+ #if 0
+ /* Backup various CPU config stuffs */
+ bl __save_cpu_setup
+@@ -279,27 +322,41 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ mtibatu 3,r4
+ lwz r4,SL_IBAT3+4(r11)
+ mtibatl 3,r4
+-#endif
+-
+ BEGIN_MMU_FTR_SECTION
+- li r4,0
++ lwz r4,SL_DBAT4(r11)
+ mtspr SPRN_DBAT4U,r4
++ lwz r4,SL_DBAT4+4(r11)
+ mtspr SPRN_DBAT4L,r4
++ lwz r4,SL_DBAT5(r11)
+ mtspr SPRN_DBAT5U,r4
++ lwz r4,SL_DBAT5+4(r11)
+ mtspr SPRN_DBAT5L,r4
++ lwz r4,SL_DBAT6(r11)
+ mtspr SPRN_DBAT6U,r4
++ lwz r4,SL_DBAT6+4(r11)
+ mtspr SPRN_DBAT6L,r4
++ lwz r4,SL_DBAT7(r11)
+ mtspr SPRN_DBAT7U,r4
++ lwz r4,SL_DBAT7+4(r11)
+ mtspr SPRN_DBAT7L,r4
++ lwz r4,SL_IBAT4(r11)
+ mtspr SPRN_IBAT4U,r4
++ lwz r4,SL_IBAT4+4(r11)
+ mtspr SPRN_IBAT4L,r4
++ lwz r4,SL_IBAT5(r11)
+ mtspr SPRN_IBAT5U,r4
++ lwz r4,SL_IBAT5+4(r11)
+ mtspr SPRN_IBAT5L,r4
++ lwz r4,SL_IBAT6(r11)
+ mtspr SPRN_IBAT6U,r4
++ lwz r4,SL_IBAT6+4(r11)
+ mtspr SPRN_IBAT6L,r4
++ lwz r4,SL_IBAT7(r11)
+ mtspr SPRN_IBAT7U,r4
++ lwz r4,SL_IBAT7+4(r11)
+ mtspr SPRN_IBAT7L,r4
+ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
++#endif
+
+ /* Flush all TLBs */
+ lis r4,0x1000
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 76b1801aa44a..ec1804f822af 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -3603,6 +3603,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
+
+ vcpu->arch.slb_max = 0;
+ dec = mfspr(SPRN_DEC);
++ if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
++ dec = (s32) dec;
+ tb = mftb();
+ vcpu->arch.dec_expires = dec + tb;
+ vcpu->cpu = -1;
+@@ -4122,8 +4124,15 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
+
+ preempt_enable();
+
+- /* cancel pending decrementer exception if DEC is now positive */
+- if (get_tb() < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu))
++ /*
++ * cancel pending decrementer exception if DEC is now positive, or if
++ * entering a nested guest in which case the decrementer is now owned
++ * by L2 and the L1 decrementer is provided in hdec_expires
++ */
++ if (kvmppc_core_pending_dec(vcpu) &&
++ ((get_tb() < vcpu->arch.dec_expires) ||
++ (trap == BOOK3S_INTERRUPT_SYSCALL &&
++ kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
+ kvmppc_core_dequeue_dec(vcpu);
+
+ trace_kvm_guest_exit(vcpu);
+diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
+index 229496e2652e..0db937497169 100644
+--- a/arch/powerpc/kvm/book3s_hv_tm.c
++++ b/arch/powerpc/kvm/book3s_hv_tm.c
+@@ -128,7 +128,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+ }
+ /* Set CR0 to indicate previous transactional state */
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
++ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
+ /* L=1 => tresume, L=0 => tsuspend */
+ if (instr & (1 << 21)) {
+ if (MSR_TM_SUSPENDED(msr))
+@@ -172,7 +172,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+
+ /* Set CR0 to indicate previous transactional state */
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
++ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
+ vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
+ return RESUME_GUEST;
+
+@@ -202,7 +202,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+
+ /* Set CR0 to indicate previous transactional state */
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
++ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
+ vcpu->arch.shregs.msr = msr | MSR_TS_S;
+ return RESUME_GUEST;
+ }
+diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
+index d53188dee18f..35cb96cfc258 100644
+--- a/arch/powerpc/mm/pgtable_32.c
++++ b/arch/powerpc/mm/pgtable_32.c
+@@ -360,7 +360,7 @@ void mark_initmem_nx(void)
+ unsigned long numpages = PFN_UP((unsigned long)_einittext) -
+ PFN_DOWN((unsigned long)_sinittext);
+
+- if (v_block_mapped((unsigned long)_stext) + 1)
++ if (v_block_mapped((unsigned long)_stext + 1))
+ mmu_mark_initmem_nx();
+ else
+ change_page_attr(page, numpages, PAGE_KERNEL);
+diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
+index 6bbcbec97712..bd6085b470b7 100644
+--- a/arch/powerpc/platforms/powermac/sleep.S
++++ b/arch/powerpc/platforms/powermac/sleep.S
+@@ -33,10 +33,18 @@
+ #define SL_IBAT2 0x48
+ #define SL_DBAT3 0x50
+ #define SL_IBAT3 0x58
+-#define SL_TB 0x60
+-#define SL_R2 0x68
+-#define SL_CR 0x6c
+-#define SL_R12 0x70 /* r12 to r31 */
++#define SL_DBAT4 0x60
++#define SL_IBAT4 0x68
++#define SL_DBAT5 0x70
++#define SL_IBAT5 0x78
++#define SL_DBAT6 0x80
++#define SL_IBAT6 0x88
++#define SL_DBAT7 0x90
++#define SL_IBAT7 0x98
++#define SL_TB 0xa0
++#define SL_R2 0xa8
++#define SL_CR 0xac
++#define SL_R12 0xb0 /* r12 to r31 */
+ #define SL_SIZE (SL_R12 + 80)
+
+ .section .text
+@@ -121,6 +129,41 @@ _GLOBAL(low_sleep_handler)
+ mfibatl r4,3
+ stw r4,SL_IBAT3+4(r1)
+
++BEGIN_MMU_FTR_SECTION
++ mfspr r4,SPRN_DBAT4U
++ stw r4,SL_DBAT4(r1)
++ mfspr r4,SPRN_DBAT4L
++ stw r4,SL_DBAT4+4(r1)
++ mfspr r4,SPRN_DBAT5U
++ stw r4,SL_DBAT5(r1)
++ mfspr r4,SPRN_DBAT5L
++ stw r4,SL_DBAT5+4(r1)
++ mfspr r4,SPRN_DBAT6U
++ stw r4,SL_DBAT6(r1)
++ mfspr r4,SPRN_DBAT6L
++ stw r4,SL_DBAT6+4(r1)
++ mfspr r4,SPRN_DBAT7U
++ stw r4,SL_DBAT7(r1)
++ mfspr r4,SPRN_DBAT7L
++ stw r4,SL_DBAT7+4(r1)
++ mfspr r4,SPRN_IBAT4U
++ stw r4,SL_IBAT4(r1)
++ mfspr r4,SPRN_IBAT4L
++ stw r4,SL_IBAT4+4(r1)
++ mfspr r4,SPRN_IBAT5U
++ stw r4,SL_IBAT5(r1)
++ mfspr r4,SPRN_IBAT5L
++ stw r4,SL_IBAT5+4(r1)
++ mfspr r4,SPRN_IBAT6U
++ stw r4,SL_IBAT6(r1)
++ mfspr r4,SPRN_IBAT6L
++ stw r4,SL_IBAT6+4(r1)
++ mfspr r4,SPRN_IBAT7U
++ stw r4,SL_IBAT7(r1)
++ mfspr r4,SPRN_IBAT7L
++ stw r4,SL_IBAT7+4(r1)
++END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
++
+ /* Backup various CPU config stuffs */
+ bl __save_cpu_setup
+
+@@ -321,22 +364,37 @@ grackle_wake_up:
+ mtibatl 3,r4
+
+ BEGIN_MMU_FTR_SECTION
+- li r4,0
++ lwz r4,SL_DBAT4(r1)
+ mtspr SPRN_DBAT4U,r4
++ lwz r4,SL_DBAT4+4(r1)
+ mtspr SPRN_DBAT4L,r4
++ lwz r4,SL_DBAT5(r1)
+ mtspr SPRN_DBAT5U,r4
++ lwz r4,SL_DBAT5+4(r1)
+ mtspr SPRN_DBAT5L,r4
++ lwz r4,SL_DBAT6(r1)
+ mtspr SPRN_DBAT6U,r4
++ lwz r4,SL_DBAT6+4(r1)
+ mtspr SPRN_DBAT6L,r4
++ lwz r4,SL_DBAT7(r1)
+ mtspr SPRN_DBAT7U,r4
++ lwz r4,SL_DBAT7+4(r1)
+ mtspr SPRN_DBAT7L,r4
++ lwz r4,SL_IBAT4(r1)
+ mtspr SPRN_IBAT4U,r4
++ lwz r4,SL_IBAT4+4(r1)
+ mtspr SPRN_IBAT4L,r4
++ lwz r4,SL_IBAT5(r1)
+ mtspr SPRN_IBAT5U,r4
++ lwz r4,SL_IBAT5+4(r1)
+ mtspr SPRN_IBAT5L,r4
++ lwz r4,SL_IBAT6(r1)
+ mtspr SPRN_IBAT6U,r4
++ lwz r4,SL_IBAT6+4(r1)
+ mtspr SPRN_IBAT6L,r4
++ lwz r4,SL_IBAT7(r1)
+ mtspr SPRN_IBAT7U,r4
++ lwz r4,SL_IBAT7+4(r1)
+ mtspr SPRN_IBAT7L,r4
+ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+
+diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
+index 2f4479b94ac3..fd14a6237954 100644
+--- a/arch/powerpc/platforms/powernv/idle.c
++++ b/arch/powerpc/platforms/powernv/idle.c
+@@ -758,7 +758,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
+ mtspr(SPRN_PTCR, sprs.ptcr);
+ mtspr(SPRN_RPR, sprs.rpr);
+ mtspr(SPRN_TSCR, sprs.tscr);
+- mtspr(SPRN_LDBAR, sprs.ldbar);
+
+ if (pls >= pnv_first_tb_loss_level) {
+ /* TB loss */
+@@ -790,6 +789,7 @@ core_woken:
+ mtspr(SPRN_MMCR0, sprs.mmcr0);
+ mtspr(SPRN_MMCR1, sprs.mmcr1);
+ mtspr(SPRN_MMCR2, sprs.mmcr2);
++ mtspr(SPRN_LDBAR, sprs.ldbar);
+
+ mtspr(SPRN_SPRG3, local_paca->sprg_vdso);
+
+diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
+index c321fdbc2200..e607141e0b39 100644
+--- a/arch/powerpc/platforms/powernv/npu-dma.c
++++ b/arch/powerpc/platforms/powernv/npu-dma.c
+@@ -28,9 +28,22 @@ static DEFINE_SPINLOCK(npu_context_lock);
+ static struct pci_dev *get_pci_dev(struct device_node *dn)
+ {
+ struct pci_dn *pdn = PCI_DN(dn);
++ struct pci_dev *pdev;
+
+- return pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
++ pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
+ pdn->busno, pdn->devfn);
++
++ /*
++ * pci_get_domain_bus_and_slot() increased the reference count of
++ * the PCI device, but callers don't need that actually as the PE
++ * already holds a reference to the device. Since callers aren't
++ * aware of the reference count change, call pci_dev_put() now to
++ * avoid leaks.
++ */
++ if (pdev)
++ pci_dev_put(pdev);
++
++ return pdev;
+ }
+
+ /* Given a NPU device get the associated PCI device. */
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index 10cc42b9e541..0f72c7484824 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -2456,6 +2456,14 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
+ if (!pnv_iommu_bypass_disabled)
+ pnv_pci_ioda2_set_bypass(pe, true);
+
++ /*
++ * Set table base for the case of IOMMU DMA use. Usually this is done
++ * from dma_dev_setup() which is not called when a device is returned
++ * from VFIO so do it here.
++ */
++ if (pe->pdev)
++ set_iommu_table_base(&pe->pdev->dev, tbl);
++
+ return 0;
+ }
+
+@@ -2543,6 +2551,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
+ pnv_pci_ioda2_unset_window(&pe->table_group, 0);
+ if (pe->pbus)
+ pnv_ioda_setup_bus_dma(pe, pe->pbus);
++ else if (pe->pdev)
++ set_iommu_table_base(&pe->pdev->dev, NULL);
+ iommu_tce_table_put(tbl);
+ }
+
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index 2ec43b4639a0..46d0d35b9ca4 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -976,6 +976,9 @@ static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
+ if (!memblock_size)
+ return -EINVAL;
+
++ if (!pr->old_prop)
++ return 0;
++
+ p = (__be32 *) pr->old_prop->value;
+ if (!p)
+ return -EINVAL;
+diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
+index cafb5c4df26b..8ef9cf4ebb1c 100644
+--- a/arch/powerpc/sysdev/xive/spapr.c
++++ b/arch/powerpc/sysdev/xive/spapr.c
+@@ -16,6 +16,7 @@
+ #include <linux/cpumask.h>
+ #include <linux/mm.h>
+ #include <linux/delay.h>
++#include <linux/libfdt.h>
+
+ #include <asm/prom.h>
+ #include <asm/io.h>
+@@ -659,6 +660,55 @@ static bool xive_get_max_prio(u8 *max_prio)
+ return true;
+ }
+
++static const u8 *get_vec5_feature(unsigned int index)
++{
++ unsigned long root, chosen;
++ int size;
++ const u8 *vec5;
++
++ root = of_get_flat_dt_root();
++ chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
++ if (chosen == -FDT_ERR_NOTFOUND)
++ return NULL;
++
++ vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
++ if (!vec5)
++ return NULL;
++
++ if (size <= index)
++ return NULL;
++
++ return vec5 + index;
++}
++
++static bool xive_spapr_disabled(void)
++{
++ const u8 *vec5_xive;
++
++ vec5_xive = get_vec5_feature(OV5_INDX(OV5_XIVE_SUPPORT));
++ if (vec5_xive) {
++ u8 val;
++
++ val = *vec5_xive & OV5_FEAT(OV5_XIVE_SUPPORT);
++ switch (val) {
++ case OV5_FEAT(OV5_XIVE_EITHER):
++ case OV5_FEAT(OV5_XIVE_LEGACY):
++ break;
++ case OV5_FEAT(OV5_XIVE_EXPLOIT):
++ /* Hypervisor only supports XIVE */
++ if (xive_cmdline_disabled)
++ pr_warn("WARNING: Ignoring cmdline option xive=off\n");
++ return false;
++ default:
++ pr_warn("%s: Unknown xive support option: 0x%x\n",
++ __func__, val);
++ break;
++ }
++ }
++
++ return xive_cmdline_disabled;
++}
++
+ bool __init xive_spapr_init(void)
+ {
+ struct device_node *np;
+@@ -671,7 +721,7 @@ bool __init xive_spapr_init(void)
+ const __be32 *reg;
+ int i;
+
+- if (xive_cmdline_disabled)
++ if (xive_spapr_disabled())
+ return false;
+
+ pr_devel("%s()\n", __func__);
+diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
+index 85e6984c560b..a6ea07f2aa84 100644
+--- a/arch/x86/events/amd/uncore.c
++++ b/arch/x86/events/amd/uncore.c
+@@ -202,15 +202,22 @@ static int amd_uncore_event_init(struct perf_event *event)
+ hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
+ hwc->idx = -1;
+
++ if (event->cpu < 0)
++ return -EINVAL;
++
+ /*
+ * SliceMask and ThreadMask need to be set for certain L3 events in
+ * Family 17h. For other events, the two fields do not affect the count.
+ */
+- if (l3_mask)
+- hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
++ if (l3_mask && is_llc_event(event)) {
++ int thread = 2 * (cpu_data(event->cpu).cpu_core_id % 4);
+
+- if (event->cpu < 0)
+- return -EINVAL;
++ if (smp_num_siblings > 1)
++ thread += cpu_data(event->cpu).apicid & 1;
++
++ hwc->config |= (1ULL << (AMD64_L3_THREAD_SHIFT + thread) &
++ AMD64_L3_THREAD_MASK) | AMD64_L3_SLICE_MASK;
++ }
+
+ uncore = event_to_amd_uncore(event);
+ if (!uncore)
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index a5436cee20b1..2889dd023566 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -20,6 +20,7 @@
+ #include <asm/intel-family.h>
+ #include <asm/apic.h>
+ #include <asm/cpu_device_id.h>
++#include <asm/hypervisor.h>
+
+ #include "../perf_event.h"
+
+@@ -2160,12 +2161,10 @@ static void intel_pmu_disable_event(struct perf_event *event)
+ cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
+ cpuc->intel_cp_status &= ~(1ull << hwc->idx);
+
+- if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
++ if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
+ intel_pmu_disable_fixed(hwc);
+- return;
+- }
+-
+- x86_pmu_disable_event(event);
++ else
++ x86_pmu_disable_event(event);
+
+ /*
+ * Needs to be called after x86_pmu_disable_event,
+@@ -4054,6 +4053,13 @@ static bool check_msr(unsigned long msr, u64 mask)
+ {
+ u64 val_old, val_new, val_tmp;
+
++ /*
++ * Disable the check for real HW, so we don't
++ * mess with potentionaly enabled registers:
++ */
++ if (hypervisor_is_type(X86_HYPER_NATIVE))
++ return true;
++
+ /*
+ * Read the current value, change it and read it back to see if it
+ * matches, this is needed to detect certain hardware emulators
+@@ -4439,6 +4445,7 @@ __init int intel_pmu_init(void)
+ struct event_constraint *c;
+ unsigned int unused;
+ struct extra_reg *er;
++ bool pmem = false;
+ int version, i;
+ char *name;
+
+@@ -4890,9 +4897,10 @@ __init int intel_pmu_init(void)
+ name = "knights-landing";
+ break;
+
++ case INTEL_FAM6_SKYLAKE_X:
++ pmem = true;
+ case INTEL_FAM6_SKYLAKE_MOBILE:
+ case INTEL_FAM6_SKYLAKE_DESKTOP:
+- case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_FAM6_KABYLAKE_MOBILE:
+ case INTEL_FAM6_KABYLAKE_DESKTOP:
+ x86_add_quirk(intel_pebs_isolation_quirk);
+@@ -4925,8 +4933,7 @@ __init int intel_pmu_init(void)
+ x86_pmu.cpu_events = hsw_events_attrs;
+ mem_attr = hsw_mem_events_attrs;
+ tsx_attr = hsw_tsx_events_attrs;
+- intel_pmu_pebs_data_source_skl(
+- boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
++ intel_pmu_pebs_data_source_skl(pmem);
+
+ if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ x86_pmu.flags |= PMU_FL_TFA;
+@@ -4940,7 +4947,11 @@ __init int intel_pmu_init(void)
+ name = "skylake";
+ break;
+
++ case INTEL_FAM6_ICELAKE_X:
++ case INTEL_FAM6_ICELAKE_XEON_D:
++ pmem = true;
+ case INTEL_FAM6_ICELAKE_MOBILE:
++ case INTEL_FAM6_ICELAKE_DESKTOP:
+ x86_pmu.late_ack = true;
+ memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+@@ -4963,7 +4974,7 @@ __init int intel_pmu_init(void)
+ x86_pmu.cpu_events = get_icl_events_attrs();
+ x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02);
+ x86_pmu.lbr_pt_coexist = true;
+- intel_pmu_pebs_data_source_skl(false);
++ intel_pmu_pebs_data_source_skl(pmem);
+ pr_cont("Icelake events, ");
+ name = "icelake";
+ break;
+diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
+index 9e3fbd47cb56..089bfcdf2f7f 100644
+--- a/arch/x86/events/intel/uncore.c
++++ b/arch/x86/events/intel/uncore.c
+@@ -1400,6 +1400,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init),
++ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI, icl_uncore_init),
+ {},
+ };
+
+diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
+index 79eb2e21e4f0..28499e39679f 100644
+--- a/arch/x86/events/intel/uncore.h
++++ b/arch/x86/events/intel/uncore.h
+@@ -419,6 +419,16 @@ static inline bool is_freerunning_event(struct perf_event *event)
+ (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
+ }
+
++/* Check and reject invalid config */
++static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
++ struct perf_event *event)
++{
++ if (is_freerunning_event(event))
++ return 0;
++
++ return -EINVAL;
++}
++
+ static inline void uncore_disable_box(struct intel_uncore_box *box)
+ {
+ if (box->pmu->type->ops->disable_box)
+diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
+index b10e04387f38..8e4e8e423839 100644
+--- a/arch/x86/events/intel/uncore_snbep.c
++++ b/arch/x86/events/intel/uncore_snbep.c
+@@ -3585,6 +3585,7 @@ static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
+
+ static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
+ .read_counter = uncore_msr_read_counter,
++ .hw_config = uncore_freerunning_hw_config,
+ };
+
+ static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
+diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
+index 1608050e9df9..dd92d8b438d4 100644
+--- a/arch/x86/hyperv/hv_init.c
++++ b/arch/x86/hyperv/hv_init.c
+@@ -111,8 +111,17 @@ static int hv_cpu_init(unsigned int cpu)
+ if (!hv_vp_assist_page)
+ return 0;
+
+- if (!*hvp)
+- *hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
++ /*
++ * The VP ASSIST PAGE is an "overlay" page (see Hyper-V TLFS's Section
++ * 5.2.1 "GPA Overlay Pages"). Here it must be zeroed out to make sure
++ * we always write the EOI MSR in hv_apic_eoi_write() *after* the
++ * EOI optimization is disabled in hv_cpu_die(), otherwise a CPU may
++ * not be stopped in the case of CPU offlining and the VM will hang.
++ */
++ if (!*hvp) {
++ *hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO,
++ PAGE_KERNEL);
++ }
+
+ if (*hvp) {
+ u64 val;
+diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
+index ea3d95275b43..115127c7ad28 100644
+--- a/arch/x86/include/asm/atomic.h
++++ b/arch/x86/include/asm/atomic.h
+@@ -54,7 +54,7 @@ static __always_inline void arch_atomic_add(int i, atomic_t *v)
+ {
+ asm volatile(LOCK_PREFIX "addl %1,%0"
+ : "+m" (v->counter)
+- : "ir" (i));
++ : "ir" (i) : "memory");
+ }
+
+ /**
+@@ -68,7 +68,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
+ {
+ asm volatile(LOCK_PREFIX "subl %1,%0"
+ : "+m" (v->counter)
+- : "ir" (i));
++ : "ir" (i) : "memory");
+ }
+
+ /**
+@@ -95,7 +95,7 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
+ static __always_inline void arch_atomic_inc(atomic_t *v)
+ {
+ asm volatile(LOCK_PREFIX "incl %0"
+- : "+m" (v->counter));
++ : "+m" (v->counter) :: "memory");
+ }
+ #define arch_atomic_inc arch_atomic_inc
+
+@@ -108,7 +108,7 @@ static __always_inline void arch_atomic_inc(atomic_t *v)
+ static __always_inline void arch_atomic_dec(atomic_t *v)
+ {
+ asm volatile(LOCK_PREFIX "decl %0"
+- : "+m" (v->counter));
++ : "+m" (v->counter) :: "memory");
+ }
+ #define arch_atomic_dec arch_atomic_dec
+
+diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
+index dadc20adba21..5e86c0d68ac1 100644
+--- a/arch/x86/include/asm/atomic64_64.h
++++ b/arch/x86/include/asm/atomic64_64.h
+@@ -45,7 +45,7 @@ static __always_inline void arch_atomic64_add(long i, atomic64_t *v)
+ {
+ asm volatile(LOCK_PREFIX "addq %1,%0"
+ : "=m" (v->counter)
+- : "er" (i), "m" (v->counter));
++ : "er" (i), "m" (v->counter) : "memory");
+ }
+
+ /**
+@@ -59,7 +59,7 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
+ {
+ asm volatile(LOCK_PREFIX "subq %1,%0"
+ : "=m" (v->counter)
+- : "er" (i), "m" (v->counter));
++ : "er" (i), "m" (v->counter) : "memory");
+ }
+
+ /**
+@@ -87,7 +87,7 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
+ {
+ asm volatile(LOCK_PREFIX "incq %0"
+ : "=m" (v->counter)
+- : "m" (v->counter));
++ : "m" (v->counter) : "memory");
+ }
+ #define arch_atomic64_inc arch_atomic64_inc
+
+@@ -101,7 +101,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
+ {
+ asm volatile(LOCK_PREFIX "decq %0"
+ : "=m" (v->counter)
+- : "m" (v->counter));
++ : "m" (v->counter) : "memory");
+ }
+ #define arch_atomic64_dec arch_atomic64_dec
+
+diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
+index 14de0432d288..84f848c2541a 100644
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -80,8 +80,8 @@ do { \
+ })
+
+ /* Atomic operations are already serializing on x86 */
+-#define __smp_mb__before_atomic() barrier()
+-#define __smp_mb__after_atomic() barrier()
++#define __smp_mb__before_atomic() do { } while (0)
++#define __smp_mb__after_atomic() do { } while (0)
+
+ #include <asm-generic/barrier.h>
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+index 75f27ee2c263..1017b9c7dfe0 100644
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -239,12 +239,14 @@
+ #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
+ #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
+ #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
++#define X86_FEATURE_FDP_EXCPTN_ONLY ( 9*32+ 6) /* "" FPU data pointer updated only on x87 exceptions */
+ #define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
+ #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
+ #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
+ #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
+ #define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
+ #define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
++#define X86_FEATURE_ZERO_FCS_FDS ( 9*32+13) /* "" Zero out FPU CS and FPU DS */
+ #define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
+ #define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
+ #define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+index 310118805f57..f60ddd655c78 100644
+--- a/arch/x86/include/asm/intel-family.h
++++ b/arch/x86/include/asm/intel-family.h
+@@ -56,6 +56,7 @@
+ #define INTEL_FAM6_ICELAKE_XEON_D 0x6C
+ #define INTEL_FAM6_ICELAKE_DESKTOP 0x7D
+ #define INTEL_FAM6_ICELAKE_MOBILE 0x7E
++#define INTEL_FAM6_ICELAKE_NNPI 0x9D
+
+ /* "Small Core" Processors (Atom) */
+
+diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
+index 395d46f78582..c7503be92f35 100644
+--- a/arch/x86/kernel/cpu/cacheinfo.c
++++ b/arch/x86/kernel/cpu/cacheinfo.c
+@@ -658,8 +658,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
+ if (c->x86 < 0x17) {
+ /* LLC is at the node level. */
+ per_cpu(cpu_llc_id, cpu) = node_id;
+- } else if (c->x86 == 0x17 &&
+- c->x86_model >= 0 && c->x86_model <= 0x1F) {
++ } else if (c->x86 == 0x17 && c->x86_model <= 0x1F) {
+ /*
+ * LLC is at the core complex level.
+ * Core complex ID is ApicId[3] for these processors.
+diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
+index d0dfb892c72f..aed45b8895d5 100644
+--- a/arch/x86/kernel/cpu/mkcapflags.sh
++++ b/arch/x86/kernel/cpu/mkcapflags.sh
+@@ -4,6 +4,8 @@
+ # Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h
+ #
+
++set -e
++
+ IN=$1
+ OUT=$2
+
+diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
+index 1bfe5c6e6cfe..afac7ccce72f 100644
+--- a/arch/x86/kernel/mpparse.c
++++ b/arch/x86/kernel/mpparse.c
+@@ -546,17 +546,15 @@ void __init default_get_smp_config(unsigned int early)
+ * local APIC has default address
+ */
+ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
+- return;
++ goto out;
+ }
+
+ pr_info("Default MP configuration #%d\n", mpf->feature1);
+ construct_default_ISA_mptable(mpf->feature1);
+
+ } else if (mpf->physptr) {
+- if (check_physptr(mpf, early)) {
+- early_memunmap(mpf, sizeof(*mpf));
+- return;
+- }
++ if (check_physptr(mpf, early))
++ goto out;
+ } else
+ BUG();
+
+@@ -565,7 +563,7 @@ void __init default_get_smp_config(unsigned int early)
+ /*
+ * Only use the first configuration found.
+ */
+-
++out:
+ early_memunmap(mpf, sizeof(*mpf));
+ }
+
+diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
+index 2abf27d7df6b..4f36d3241faf 100644
+--- a/arch/x86/kernel/stacktrace.c
++++ b/arch/x86/kernel/stacktrace.c
+@@ -129,11 +129,9 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ break;
+ if ((unsigned long)fp < regs->sp)
+ break;
+- if (frame.ret_addr) {
+- if (!consume_entry(cookie, frame.ret_addr, false))
+- return;
+- }
+- if (fp == frame.next_fp)
++ if (!frame.ret_addr)
++ break;
++ if (!consume_entry(cookie, frame.ret_addr, false))
+ break;
+ fp = frame.next_fp;
+ }
+diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
+index 132d149494d6..cab14ec1b3af 100644
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -128,8 +128,8 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
+ intr ? kvm_perf_overflow_intr :
+ kvm_perf_overflow, pmc);
+ if (IS_ERR(event)) {
+- printk_once("kvm_pmu: event creation failed %ld\n",
+- PTR_ERR(event));
++ pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
++ PTR_ERR(event), pmc->idx);
+ return;
+ }
+
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 46af3a5e9209..7df4f46499e1 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2234,13 +2234,9 @@ static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
+
+ set_cr4_guest_host_mask(vmx);
+
+- if (kvm_mpx_supported()) {
+- if (vmx->nested.nested_run_pending &&
+- (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+- vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+- else
+- vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
+- }
++ if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
++ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
++ vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+ }
+
+ /*
+@@ -2283,6 +2279,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
+ vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
+ }
++ if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
++ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
++ vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
+ vmx_set_rflags(vcpu, vmcs12->guest_rflags);
+
+ /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
+@@ -2878,9 +2877,6 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+ */
+ vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
+ CPU_BASED_TPR_SHADOW);
+- } else {
+- printk("bad virtual-APIC page address\n");
+- dump_vmcs();
+ }
+ }
+
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index d98eac371c0a..306ed28569c0 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1718,7 +1718,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
+ &msr_info->data);
+ case MSR_IA32_XSS:
+- if (!vmx_xsaves_supported())
++ if (!vmx_xsaves_supported() ||
++ (!msr_info->host_initiated &&
++ !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
++ guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
+ return 1;
+ msr_info->data = vcpu->arch.ia32_xss;
+ break;
+@@ -1896,9 +1899,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ MSR_TYPE_W);
+ break;
+ case MSR_IA32_CR_PAT:
++ if (!kvm_pat_valid(data))
++ return 1;
++
+ if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
+- if (!kvm_pat_valid(data))
+- return 1;
+ vmcs_write64(GUEST_IA32_PAT, data);
+ vcpu->arch.pat = data;
+ break;
+@@ -1932,7 +1936,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ return 1;
+ return vmx_set_vmx_msr(vcpu, msr_index, data);
+ case MSR_IA32_XSS:
+- if (!vmx_xsaves_supported())
++ if (!vmx_xsaves_supported() ||
++ (!msr_info->host_initiated &&
++ !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
++ guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
+ return 1;
+ /*
+ * The only supported bit as of Skylake is bit 8, but
+@@ -6109,28 +6116,21 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
+
+ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
+ {
+- u32 exit_intr_info = 0;
+- u16 basic_exit_reason = (u16)vmx->exit_reason;
+-
+- if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
+- || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
++ if (vmx->exit_reason != EXIT_REASON_EXCEPTION_NMI)
+ return;
+
+- if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
+- exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+- vmx->exit_intr_info = exit_intr_info;
++ vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+
+ /* if exit due to PF check for async PF */
+- if (is_page_fault(exit_intr_info))
++ if (is_page_fault(vmx->exit_intr_info))
+ vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
+
+ /* Handle machine checks before interrupts are enabled */
+- if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
+- is_machine_check(exit_intr_info))
++ if (is_machine_check(vmx->exit_intr_info))
+ kvm_machine_check();
+
+ /* We need to handle NMIs before interrupts are enabled */
+- if (is_nmi(exit_intr_info)) {
++ if (is_nmi(vmx->exit_intr_info)) {
+ kvm_before_interrupt(&vmx->vcpu);
+ asm("int $2");
+ kvm_after_interrupt(&vmx->vcpu);
+@@ -6533,6 +6533,9 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ vmx->idt_vectoring_info = 0;
+
+ vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
++ if ((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
++ kvm_machine_check();
++
+ if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
+ return;
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index e5db3856b194..404e776aa36d 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -5398,8 +5398,14 @@ static void bfq_update_inject_limit(struct bfq_data *bfqd,
+ * total service time, and there seem to be the right
+ * conditions to do it, or we can lower the last base value
+ * computed.
++ *
++ * NOTE: (bfqd->rq_in_driver == 1) means that there is no I/O
++ * request in flight, because this function is in the code
++ * path that handles the completion of a request of bfqq, and,
++ * in particular, this function is executed before
++ * bfqd->rq_in_driver is decremented in such a code path.
+ */
+- if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 0) ||
++ if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) ||
+ tot_time_ns < bfqq->last_serv_time_ns) {
+ bfqq->last_serv_time_ns = tot_time_ns;
+ /*
+diff --git a/block/bio.c b/block/bio.c
+index 67bba12d273b..121caeea3e00 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -16,6 +16,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/cgroup.h>
+ #include <linux/blk-cgroup.h>
++#include <linux/highmem.h>
+
+ #include <trace/events/block.h>
+ #include "blk.h"
+@@ -1479,8 +1480,22 @@ void bio_unmap_user(struct bio *bio)
+ bio_put(bio);
+ }
+
++static void bio_invalidate_vmalloc_pages(struct bio *bio)
++{
++#ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
++ if (bio->bi_private && !op_is_write(bio_op(bio))) {
++ unsigned long i, len = 0;
++
++ for (i = 0; i < bio->bi_vcnt; i++)
++ len += bio->bi_io_vec[i].bv_len;
++ invalidate_kernel_vmap_range(bio->bi_private, len);
++ }
++#endif
++}
++
+ static void bio_map_kern_endio(struct bio *bio)
+ {
++ bio_invalidate_vmalloc_pages(bio);
+ bio_put(bio);
+ }
+
+@@ -1501,6 +1516,8 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
+ unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long start = kaddr >> PAGE_SHIFT;
+ const int nr_pages = end - start;
++ bool is_vmalloc = is_vmalloc_addr(data);
++ struct page *page;
+ int offset, i;
+ struct bio *bio;
+
+@@ -1508,6 +1525,11 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+
++ if (is_vmalloc) {
++ flush_kernel_vmap_range(data, len);
++ bio->bi_private = data;
++ }
++
+ offset = offset_in_page(kaddr);
+ for (i = 0; i < nr_pages; i++) {
+ unsigned int bytes = PAGE_SIZE - offset;
+@@ -1518,7 +1540,11 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
+ if (bytes > len)
+ bytes = len;
+
+- if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
++ if (!is_vmalloc)
++ page = virt_to_page(data);
++ else
++ page = vmalloc_to_page(data);
++ if (bio_add_pc_page(q, bio, page, bytes,
+ offset) < bytes) {
+ /* we don't support partial mappings */
+ bio_put(bio);
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 1f7127b03490..e4715b35d42c 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -1006,8 +1006,12 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
+ }
+ next:
+ if (has_stats) {
+- off += scnprintf(buf+off, size-off, "\n");
+- seq_commit(sf, off);
++ if (off < size - 1) {
++ off += scnprintf(buf+off, size-off, "\n");
++ seq_commit(sf, off);
++ } else {
++ seq_commit(sf, -1);
++ }
+ }
+ }
+
+diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
+index d22e61bced86..d973c38ee4fd 100644
+--- a/block/blk-iolatency.c
++++ b/block/blk-iolatency.c
+@@ -618,44 +618,26 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
+
+ inflight = atomic_dec_return(&rqw->inflight);
+ WARN_ON_ONCE(inflight < 0);
+- if (iolat->min_lat_nsec == 0)
+- goto next;
+- iolatency_record_time(iolat, &bio->bi_issue, now,
+- issue_as_root);
+- window_start = atomic64_read(&iolat->window_start);
+- if (now > window_start &&
+- (now - window_start) >= iolat->cur_win_nsec) {
+- if (atomic64_cmpxchg(&iolat->window_start,
+- window_start, now) == window_start)
+- iolatency_check_latencies(iolat, now);
++ /*
++ * If bi_status is BLK_STS_AGAIN, the bio wasn't actually
++ * submitted, so do not account for it.
++ */
++ if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
++ iolatency_record_time(iolat, &bio->bi_issue, now,
++ issue_as_root);
++ window_start = atomic64_read(&iolat->window_start);
++ if (now > window_start &&
++ (now - window_start) >= iolat->cur_win_nsec) {
++ if (atomic64_cmpxchg(&iolat->window_start,
++ window_start, now) == window_start)
++ iolatency_check_latencies(iolat, now);
++ }
+ }
+-next:
+ wake_up(&rqw->wait);
+ blkg = blkg->parent;
+ }
+ }
+
+-static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
+-{
+- struct blkcg_gq *blkg;
+-
+- blkg = bio->bi_blkg;
+- while (blkg && blkg->parent) {
+- struct rq_wait *rqw;
+- struct iolatency_grp *iolat;
+-
+- iolat = blkg_to_lat(blkg);
+- if (!iolat)
+- goto next;
+-
+- rqw = &iolat->rq_wait;
+- atomic_dec(&rqw->inflight);
+- wake_up(&rqw->wait);
+-next:
+- blkg = blkg->parent;
+- }
+-}
+-
+ static void blkcg_iolatency_exit(struct rq_qos *rqos)
+ {
+ struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
+@@ -667,7 +649,6 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
+
+ static struct rq_qos_ops blkcg_iolatency_ops = {
+ .throttle = blkcg_iolatency_throttle,
+- .cleanup = blkcg_iolatency_cleanup,
+ .done_bio = blkcg_iolatency_done_bio,
+ .exit = blkcg_iolatency_exit,
+ };
+@@ -778,8 +759,10 @@ static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
+
+ if (!oldval && val)
+ return 1;
+- if (oldval && !val)
++ if (oldval && !val) {
++ blkcg_clear_delay(blkg);
+ return -1;
++ }
+ return 0;
+ }
+
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 9ea7c0ecad10..8ab6c8153223 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -881,13 +881,10 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
+ unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
+ u64 tmp;
+
+- jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
+-
+- /* Slice has just started. Consider one slice interval */
+- if (!jiffy_elapsed)
+- jiffy_elapsed_rnd = tg->td->throtl_slice;
++ jiffy_elapsed = jiffies - tg->slice_start[rw];
+
+- jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
++ /* Round up to the next throttle slice, wait time must be nonzero */
++ jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
+
+ /*
+ * jiffy_elapsed_rnd should not be a big value as minimum iops can be
+diff --git a/block/blk-zoned.c b/block/blk-zoned.c
+index ae7e91bd0618..3249738242b4 100644
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
+ static inline unsigned int __blkdev_nr_zones(struct request_queue *q,
+ sector_t nr_sectors)
+ {
+- unsigned long zone_sectors = blk_queue_zone_sectors(q);
++ sector_t zone_sectors = blk_queue_zone_sectors(q);
+
+ return (nr_sectors + zone_sectors - 1) >> ilog2(zone_sectors);
+ }
+diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
+index be70ca6c85d3..1f1f004dc757 100644
+--- a/crypto/asymmetric_keys/Kconfig
++++ b/crypto/asymmetric_keys/Kconfig
+@@ -15,6 +15,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+ select MPILIB
+ select CRYPTO_HASH_INFO
+ select CRYPTO_AKCIPHER
++ select CRYPTO_HASH
+ help
+ This option provides support for asymmetric public key type handling.
+ If signature generation and/or verification are to be used,
+@@ -65,6 +66,7 @@ config TPM_KEY_PARSER
+ config PKCS7_MESSAGE_PARSER
+ tristate "PKCS#7 message parser"
+ depends on X509_CERTIFICATE_PARSER
++ select CRYPTO_HASH
+ select ASN1
+ select OID_REGISTRY
+ help
+@@ -87,6 +89,7 @@ config SIGNED_PE_FILE_VERIFICATION
+ bool "Support for PE file signature verification"
+ depends on PKCS7_MESSAGE_PARSER=y
+ depends on SYSTEM_DATA_VERIFICATION
++ select CRYPTO_HASH
+ select ASN1
+ select OID_REGISTRY
+ help
+diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
+index 2db7eac4bf3b..2e2c25c62be6 100644
+--- a/crypto/chacha20poly1305.c
++++ b/crypto/chacha20poly1305.c
+@@ -61,6 +61,8 @@ struct chachapoly_req_ctx {
+ unsigned int cryptlen;
+ /* Actual AD, excluding IV */
+ unsigned int assoclen;
++ /* request flags, with MAY_SLEEP cleared if needed */
++ u32 flags;
+ union {
+ struct poly_req poly;
+ struct chacha_req chacha;
+@@ -70,8 +72,12 @@ struct chachapoly_req_ctx {
+ static inline void async_done_continue(struct aead_request *req, int err,
+ int (*cont)(struct aead_request *))
+ {
+- if (!err)
++ if (!err) {
++ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
++
++ rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ err = cont(req);
++ }
+
+ if (err != -EINPROGRESS && err != -EBUSY)
+ aead_request_complete(req, err);
+@@ -138,7 +144,7 @@ static int chacha_decrypt(struct aead_request *req)
+ dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
+ }
+
+- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
++ skcipher_request_set_callback(&creq->req, rctx->flags,
+ chacha_decrypt_done, req);
+ skcipher_request_set_tfm(&creq->req, ctx->chacha);
+ skcipher_request_set_crypt(&creq->req, src, dst,
+@@ -182,7 +188,7 @@ static int poly_tail(struct aead_request *req)
+ memcpy(&preq->tail.cryptlen, &len, sizeof(len));
+ sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail));
+
+- ahash_request_set_callback(&preq->req, aead_request_flags(req),
++ ahash_request_set_callback(&preq->req, rctx->flags,
+ poly_tail_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, preq->src,
+@@ -213,7 +219,7 @@ static int poly_cipherpad(struct aead_request *req)
+ sg_init_table(preq->src, 1);
+ sg_set_buf(preq->src, &preq->pad, padlen);
+
+- ahash_request_set_callback(&preq->req, aead_request_flags(req),
++ ahash_request_set_callback(&preq->req, rctx->flags,
+ poly_cipherpad_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
+@@ -244,7 +250,7 @@ static int poly_cipher(struct aead_request *req)
+ sg_init_table(rctx->src, 2);
+ crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
+
+- ahash_request_set_callback(&preq->req, aead_request_flags(req),
++ ahash_request_set_callback(&preq->req, rctx->flags,
+ poly_cipher_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
+@@ -274,7 +280,7 @@ static int poly_adpad(struct aead_request *req)
+ sg_init_table(preq->src, 1);
+ sg_set_buf(preq->src, preq->pad, padlen);
+
+- ahash_request_set_callback(&preq->req, aead_request_flags(req),
++ ahash_request_set_callback(&preq->req, rctx->flags,
+ poly_adpad_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
+@@ -298,7 +304,7 @@ static int poly_ad(struct aead_request *req)
+ struct poly_req *preq = &rctx->u.poly;
+ int err;
+
+- ahash_request_set_callback(&preq->req, aead_request_flags(req),
++ ahash_request_set_callback(&preq->req, rctx->flags,
+ poly_ad_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
+@@ -325,7 +331,7 @@ static int poly_setkey(struct aead_request *req)
+ sg_init_table(preq->src, 1);
+ sg_set_buf(preq->src, rctx->key, sizeof(rctx->key));
+
+- ahash_request_set_callback(&preq->req, aead_request_flags(req),
++ ahash_request_set_callback(&preq->req, rctx->flags,
+ poly_setkey_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+ ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
+@@ -349,7 +355,7 @@ static int poly_init(struct aead_request *req)
+ struct poly_req *preq = &rctx->u.poly;
+ int err;
+
+- ahash_request_set_callback(&preq->req, aead_request_flags(req),
++ ahash_request_set_callback(&preq->req, rctx->flags,
+ poly_init_done, req);
+ ahash_request_set_tfm(&preq->req, ctx->poly);
+
+@@ -387,7 +393,7 @@ static int poly_genkey(struct aead_request *req)
+
+ chacha_iv(creq->iv, req, 0);
+
+- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
++ skcipher_request_set_callback(&creq->req, rctx->flags,
+ poly_genkey_done, req);
+ skcipher_request_set_tfm(&creq->req, ctx->chacha);
+ skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
+@@ -427,7 +433,7 @@ static int chacha_encrypt(struct aead_request *req)
+ dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
+ }
+
+- skcipher_request_set_callback(&creq->req, aead_request_flags(req),
++ skcipher_request_set_callback(&creq->req, rctx->flags,
+ chacha_encrypt_done, req);
+ skcipher_request_set_tfm(&creq->req, ctx->chacha);
+ skcipher_request_set_crypt(&creq->req, src, dst,
+@@ -445,6 +451,7 @@ static int chachapoly_encrypt(struct aead_request *req)
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+
+ rctx->cryptlen = req->cryptlen;
++ rctx->flags = aead_request_flags(req);
+
+ /* encrypt call chain:
+ * - chacha_encrypt/done()
+@@ -466,6 +473,7 @@ static int chachapoly_decrypt(struct aead_request *req)
+ struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
+
+ rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
++ rctx->flags = aead_request_flags(req);
+
+ /* decrypt call chain:
+ * - poly_genkey/done()
+diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
+index 6425b9cd718e..dad9e1f91a78 100644
+--- a/crypto/ghash-generic.c
++++ b/crypto/ghash-generic.c
+@@ -31,6 +31,7 @@ static int ghash_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+ {
+ struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
++ be128 k;
+
+ if (keylen != GHASH_BLOCK_SIZE) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+@@ -39,7 +40,12 @@ static int ghash_setkey(struct crypto_shash *tfm,
+
+ if (ctx->gf128)
+ gf128mul_free_4k(ctx->gf128);
+- ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
++
++ BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE);
++ memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */
++ ctx->gf128 = gf128mul_init_4k_lle(&k);
++ memzero_explicit(&k, GHASH_BLOCK_SIZE);
++
+ if (!ctx->gf128)
+ return -ENOMEM;
+
+diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
+index 16f612b6dbca..a9cc0b2aa0d6 100644
+--- a/crypto/serpent_generic.c
++++ b/crypto/serpent_generic.c
+@@ -225,7 +225,13 @@
+ x4 ^= x2; \
+ })
+
+-static void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, u32 r3, u32 r4, u32 *k)
++/*
++ * both gcc and clang have misoptimized this function in the past,
++ * producing horrible object code from spilling temporary variables
++ * on the stack. Forcing this part out of line avoids that.
++ */
++static noinline void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2,
++ u32 r3, u32 r4, u32 *k)
+ {
+ k += 100;
+ S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24);
+diff --git a/crypto/testmgr.c b/crypto/testmgr.c
+index 658a7eeebab2..292d28caf00f 100644
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -1279,6 +1279,7 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
+ req, tsgl, hashstate);
+ if (err)
+ return err;
++ cond_resched();
+ }
+ }
+ #endif
+@@ -1493,6 +1494,7 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
+ err = test_hash_vec(driver, &vecs[i], i, req, tsgl, hashstate);
+ if (err)
+ goto out;
++ cond_resched();
+ }
+ err = test_hash_vs_generic_impl(driver, generic_driver, maxkeysize, req,
+ tsgl, hashstate);
+@@ -1755,6 +1757,7 @@ static int test_aead_vec(const char *driver, int enc,
+ &cfg, req, tsgls);
+ if (err)
+ return err;
++ cond_resched();
+ }
+ }
+ #endif
+@@ -1994,6 +1997,7 @@ static int test_aead(const char *driver, int enc,
+ tsgls);
+ if (err)
+ return err;
++ cond_resched();
+ }
+ return 0;
+ }
+@@ -2336,6 +2340,7 @@ static int test_skcipher_vec(const char *driver, int enc,
+ &cfg, req, tsgls);
+ if (err)
+ return err;
++ cond_resched();
+ }
+ }
+ #endif
+@@ -2535,6 +2540,7 @@ static int test_skcipher(const char *driver, int enc,
+ tsgls);
+ if (err)
+ return err;
++ cond_resched();
+ }
+ return 0;
+ }
+diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
+index 831660179662..c8652f91054e 100644
+--- a/drivers/acpi/acpica/acevents.h
++++ b/drivers/acpi/acpica/acevents.h
+@@ -69,7 +69,8 @@ acpi_status
+ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked);
+
+ acpi_status
+-acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
++acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info,
++ u8 clear_on_enable);
+
+ acpi_status
+ acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
+diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
+index 62d3aa74277b..344feba29063 100644
+--- a/drivers/acpi/acpica/evgpe.c
++++ b/drivers/acpi/acpica/evgpe.c
+@@ -146,6 +146,7 @@ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
+ * FUNCTION: acpi_ev_add_gpe_reference
+ *
+ * PARAMETERS: gpe_event_info - Add a reference to this GPE
++ * clear_on_enable - Clear GPE status before enabling it
+ *
+ * RETURN: Status
+ *
+@@ -155,7 +156,8 @@ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
+ ******************************************************************************/
+
+ acpi_status
+-acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
++acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info,
++ u8 clear_on_enable)
+ {
+ acpi_status status = AE_OK;
+
+@@ -170,6 +172,10 @@ acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
+
+ /* Enable on first reference */
+
++ if (clear_on_enable) {
++ (void)acpi_hw_clear_gpe(gpe_event_info);
++ }
++
+ status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
+ if (ACPI_SUCCESS(status)) {
+ status = acpi_ev_enable_gpe(gpe_event_info);
+diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
+index 328d1d6123ad..fb15e9e2373b 100644
+--- a/drivers/acpi/acpica/evgpeblk.c
++++ b/drivers/acpi/acpica/evgpeblk.c
+@@ -453,7 +453,7 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ continue;
+ }
+
+- status = acpi_ev_add_gpe_reference(gpe_event_info);
++ status = acpi_ev_add_gpe_reference(gpe_event_info, FALSE);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not enable GPE 0x%02X",
+diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
+index 3df00eb6621b..279ef0557aa3 100644
+--- a/drivers/acpi/acpica/evxface.c
++++ b/drivers/acpi/acpica/evxface.c
+@@ -971,7 +971,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
+ ACPI_GPE_DISPATCH_METHOD) ||
+ (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
+ ACPI_GPE_DISPATCH_NOTIFY)) && handler->originally_enabled) {
+- (void)acpi_ev_add_gpe_reference(gpe_event_info);
++ (void)acpi_ev_add_gpe_reference(gpe_event_info, FALSE);
+ if (ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {
+
+ /* Poll edge triggered GPEs to handle existing events */
+diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
+index 30a083902f52..710488ec59e9 100644
+--- a/drivers/acpi/acpica/evxfgpe.c
++++ b/drivers/acpi/acpica/evxfgpe.c
+@@ -108,7 +108,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
+ if (gpe_event_info) {
+ if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
+ ACPI_GPE_DISPATCH_NONE) {
+- status = acpi_ev_add_gpe_reference(gpe_event_info);
++ status = acpi_ev_add_gpe_reference(gpe_event_info, TRUE);
+ if (ACPI_SUCCESS(status) &&
+ ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {
+
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 9d687e1d4325..3bfd9da58473 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -1469,7 +1469,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
+ tf->hob_lbah = buf[10];
+ tf->nsect = buf[12];
+ tf->hob_nsect = buf[13];
+- if (ata_id_has_ncq_autosense(dev->id))
++ if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
+ tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
+
+ return 0;
+@@ -1716,7 +1716,8 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
+ memcpy(&qc->result_tf, &tf, sizeof(tf));
+ qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+ qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
+- if ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary) {
++ if (dev->class == ATA_DEV_ZAC &&
++ ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
+ char sense_key, asc, ascq;
+
+ sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
+@@ -1770,10 +1771,11 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
+ }
+
+ switch (qc->dev->class) {
+- case ATA_DEV_ATA:
+ case ATA_DEV_ZAC:
+ if (stat & ATA_SENSE)
+ ata_eh_request_sense(qc, qc->scsicmd);
++ /* fall through */
++ case ATA_DEV_ATA:
+ if (err & ATA_ICRC)
+ qc->err_mask |= AC_ERR_ATA_BUS;
+ if (err & (ATA_UNC | ATA_AMNF))
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index 263f82516ff4..e5e1b3a01b1a 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -579,6 +579,8 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
+ }
+
+ if (!strcmp(name, "dummy")) {
++ kfree(map->debugfs_name);
++
+ map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
+ dummy_index);
+ name = map->debugfs_name;
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index f1025452bb39..19f57ccfbe1d 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -1637,6 +1637,8 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
+ map->format.reg_bytes +
+ map->format.pad_bytes,
+ val, val_len);
++ else
++ ret = -ENOTSUPP;
+
+ /* If that didn't work fall back on linearising by hand. */
+ if (ret == -ENOTSUPP) {
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index 9fb9b312ab6b..fee57f7f3821 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -2120,6 +2120,9 @@ static void setup_format_params(int track)
+ raw_cmd->kernel_data = floppy_track_buffer;
+ raw_cmd->length = 4 * F_SECT_PER_TRACK;
+
++ if (!F_SECT_PER_TRACK)
++ return;
++
+ /* allow for about 30ms for data transport per track */
+ head_shift = (F_SECT_PER_TRACK + 5) / 6;
+
+@@ -3230,8 +3233,12 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
+ int cnt;
+
+ /* sanity checking for parameters. */
+- if (g->sect <= 0 ||
+- g->head <= 0 ||
++ if ((int)g->sect <= 0 ||
++ (int)g->head <= 0 ||
++ /* check for overflow in max_sector */
++ (int)(g->sect * g->head) <= 0 ||
++ /* check for zero in F_SECT_PER_TRACK */
++ (unsigned char)((g->sect << 2) >> FD_SIZECODE(g)) == 0 ||
+ g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
+ /* check if reserved bits are set */
+ (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
+@@ -3375,6 +3382,24 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+ return 0;
+ }
+
++static bool valid_floppy_drive_params(const short autodetect[8],
++ int native_format)
++{
++ size_t floppy_type_size = ARRAY_SIZE(floppy_type);
++ size_t i = 0;
++
++ for (i = 0; i < 8; ++i) {
++ if (autodetect[i] < 0 ||
++ autodetect[i] >= floppy_type_size)
++ return false;
++ }
++
++ if (native_format < 0 || native_format >= floppy_type_size)
++ return false;
++
++ return true;
++}
++
+ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+ unsigned long param)
+ {
+@@ -3501,6 +3526,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
+ SUPBOUND(size, strlen((const char *)outparam) + 1);
+ break;
+ case FDSETDRVPRM:
++ if (!valid_floppy_drive_params(inparam.dp.autodetect,
++ inparam.dp.native_format))
++ return -EINVAL;
+ *UDP = inparam.dp;
+ break;
+ case FDGETDRVPRM:
+@@ -3698,6 +3726,8 @@ static int compat_setdrvprm(int drive,
+ return -EPERM;
+ if (copy_from_user(&v, arg, sizeof(struct compat_floppy_drive_params)))
+ return -EFAULT;
++ if (!valid_floppy_drive_params(v.autodetect, v.native_format))
++ return -EINVAL;
+ mutex_lock(&floppy_mutex);
+ UDP->cmos = v.cmos;
+ UDP->max_dtr = v.max_dtr;
+diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
+index 447d635c79a2..2a4f8bc4f930 100644
+--- a/drivers/block/null_blk_main.c
++++ b/drivers/block/null_blk_main.c
+@@ -327,11 +327,12 @@ static ssize_t nullb_device_power_store(struct config_item *item,
+ set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
+ dev->power = newp;
+ } else if (dev->power && !newp) {
+- mutex_lock(&lock);
+- dev->power = newp;
+- null_del_dev(dev->nullb);
+- mutex_unlock(&lock);
+- clear_bit(NULLB_DEV_FL_UP, &dev->flags);
++ if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
++ mutex_lock(&lock);
++ dev->power = newp;
++ null_del_dev(dev->nullb);
++ mutex_unlock(&lock);
++ }
+ clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
+ }
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 50aed5259c2b..6d61f5aafc78 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -264,7 +264,9 @@ static const struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME },
++ { USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME },
++ { USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME },
+
+ /* Broadcom BCM2035 */
+ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
+diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
+index 82b13faa9422..fe2e307009f4 100644
+--- a/drivers/bluetooth/hci_bcsp.c
++++ b/drivers/bluetooth/hci_bcsp.c
+@@ -744,6 +744,11 @@ static int bcsp_close(struct hci_uart *hu)
+ skb_queue_purge(&bcsp->rel);
+ skb_queue_purge(&bcsp->unrel);
+
++ if (bcsp->rx_skb) {
++ kfree_skb(bcsp->rx_skb);
++ bcsp->rx_skb = NULL;
++ }
++
+ kfree(bcsp);
+ return 0;
+ }
+diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
+index 122a81ab8e48..01ef2fab5764 100644
+--- a/drivers/clk/imx/clk-imx8mm.c
++++ b/drivers/clk/imx/clk-imx8mm.c
+@@ -325,7 +325,7 @@ static const char *imx8mm_dsi_dbi_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
+
+ static const char *imx8mm_usdhc3_sels[] = {"osc_24m", "sys_pll1_400m", "sys_pll1_800m", "sys_pll2_500m",
+- "sys_pll3_out", "sys_pll1_266m", "audio_pll2_clk", "sys_pll1_100m", };
++ "sys_pll3_out", "sys_pll1_266m", "audio_pll2_out", "sys_pll1_100m", };
+
+ static const char *imx8mm_csi1_core_sels[] = {"osc_24m", "sys_pll1_266m", "sys_pll2_250m", "sys_pll1_800m",
+ "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", };
+@@ -361,11 +361,11 @@ static const char *imx8mm_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audio_pll1_
+ "sys_pll2_1000m", "sys_pll3_out", "clk_ext3", "audio_pll2_out", };
+
+ static const char *imx8mm_vpu_h1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_800m", "sys_pll2_1000m",
+- "audio_pll2_clk", "sys_pll2_125m", "sys_pll3_clk", "audio_pll1_out", };
++ "audio_pll2_out", "sys_pll2_125m", "sys_pll3_clk", "audio_pll1_out", };
+
+ static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
+
+-static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_clk",
++static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_out",
+ "vpu_pll", "sys_pll1_80m", };
+
+ static struct clk *clks[IMX8MM_CLK_END];
+diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
+index e8eab16b154b..74cb299f5089 100644
+--- a/drivers/clocksource/exynos_mct.c
++++ b/drivers/clocksource/exynos_mct.c
+@@ -206,7 +206,7 @@ static void exynos4_frc_resume(struct clocksource *cs)
+
+ static struct clocksource mct_frc = {
+ .name = "mct-frc",
+- .rating = 400,
++ .rating = 450, /* use value higher than ARM arch timer */
+ .read = exynos4_frc_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+@@ -461,7 +461,7 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
+ evt->set_state_oneshot_stopped = set_state_shutdown;
+ evt->tick_resume = set_state_shutdown;
+ evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+- evt->rating = 450;
++ evt->rating = 500; /* use value higher than ARM arch timer */
+
+ exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
+
+diff --git a/drivers/clocksource/timer-tegra20.c b/drivers/clocksource/timer-tegra20.c
+index 1e7ece279730..462be34b41c4 100644
+--- a/drivers/clocksource/timer-tegra20.c
++++ b/drivers/clocksource/timer-tegra20.c
+@@ -288,7 +288,7 @@ static int __init tegra_init_timer(struct device_node *np)
+ pr_err("%s: can't map IRQ for CPU%d\n",
+ __func__, cpu);
+ ret = -EINVAL;
+- goto out;
++ goto out_irq;
+ }
+
+ irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN);
+@@ -298,7 +298,8 @@ static int __init tegra_init_timer(struct device_node *np)
+ if (ret) {
+ pr_err("%s: cannot setup irq %d for CPU%d\n",
+ __func__, cpu_to->clkevt.irq, cpu);
+- ret = -EINVAL;
++ irq_dispose_mapping(cpu_to->clkevt.irq);
++ cpu_to->clkevt.irq = 0;
+ goto out_irq;
+ }
+ }
+@@ -318,6 +319,8 @@ out_irq:
+ irq_dispose_mapping(cpu_to->clkevt.irq);
+ }
+ }
++
++ to->of_base.base = timer_reg_base;
+ out:
+ timer_of_cleanup(to);
+ return ret;
+diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
+index 49f3e0ce242c..cbfc607282f4 100644
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -67,12 +67,16 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
+ }
+
+ static inline int crypto4xx_crypt(struct skcipher_request *req,
+- const unsigned int ivlen, bool decrypt)
++ const unsigned int ivlen, bool decrypt,
++ bool check_blocksize)
+ {
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
+ __le32 iv[AES_IV_SIZE];
+
++ if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
++ return -EINVAL;
++
+ if (ivlen)
+ crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
+
+@@ -81,24 +85,34 @@ static inline int crypto4xx_crypt(struct skcipher_request *req,
+ ctx->sa_len, 0, NULL);
+ }
+
+-int crypto4xx_encrypt_noiv(struct skcipher_request *req)
++int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
++{
++ return crypto4xx_crypt(req, 0, false, true);
++}
++
++int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
++{
++ return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
++}
++
++int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
+ {
+- return crypto4xx_crypt(req, 0, false);
++ return crypto4xx_crypt(req, 0, true, true);
+ }
+
+-int crypto4xx_encrypt_iv(struct skcipher_request *req)
++int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
+ {
+- return crypto4xx_crypt(req, AES_IV_SIZE, false);
++ return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
+ }
+
+-int crypto4xx_decrypt_noiv(struct skcipher_request *req)
++int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
+ {
+- return crypto4xx_crypt(req, 0, true);
++ return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
+ }
+
+-int crypto4xx_decrypt_iv(struct skcipher_request *req)
++int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
+ {
+- return crypto4xx_crypt(req, AES_IV_SIZE, true);
++ return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
+ }
+
+ /**
+@@ -269,8 +283,8 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
+ return ret;
+ }
+
+- return encrypt ? crypto4xx_encrypt_iv(req)
+- : crypto4xx_decrypt_iv(req);
++ return encrypt ? crypto4xx_encrypt_iv_stream(req)
++ : crypto4xx_decrypt_iv_stream(req);
+ }
+
+ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
+diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
+index 16d911aaa508..53e2ba9b0c02 100644
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -1210,8 +1210,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cbc,
+- .encrypt = crypto4xx_encrypt_iv,
+- .decrypt = crypto4xx_decrypt_iv,
++ .encrypt = crypto4xx_encrypt_iv_block,
++ .decrypt = crypto4xx_decrypt_iv_block,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+@@ -1222,7 +1222,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+- .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+@@ -1230,8 +1230,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_cfb,
+- .encrypt = crypto4xx_encrypt_iv,
+- .decrypt = crypto4xx_decrypt_iv,
++ .encrypt = crypto4xx_encrypt_iv_stream,
++ .decrypt = crypto4xx_decrypt_iv_stream,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+@@ -1243,7 +1243,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
+ .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+- .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+@@ -1263,7 +1263,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+- .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+@@ -1290,8 +1290,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = crypto4xx_setkey_aes_ecb,
+- .encrypt = crypto4xx_encrypt_noiv,
+- .decrypt = crypto4xx_decrypt_noiv,
++ .encrypt = crypto4xx_encrypt_noiv_block,
++ .decrypt = crypto4xx_decrypt_noiv_block,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+@@ -1302,7 +1302,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+- .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_module = THIS_MODULE,
+ },
+@@ -1310,8 +1310,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_IV_SIZE,
+ .setkey = crypto4xx_setkey_aes_ofb,
+- .encrypt = crypto4xx_encrypt_iv,
+- .decrypt = crypto4xx_decrypt_iv,
++ .encrypt = crypto4xx_encrypt_iv_stream,
++ .decrypt = crypto4xx_decrypt_iv_stream,
+ .init = crypto4xx_sk_init,
+ .exit = crypto4xx_sk_exit,
+ } },
+diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
+index ca1c25c40c23..6b6841359190 100644
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -173,10 +173,12 @@ int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen);
+ int crypto4xx_encrypt_ctr(struct skcipher_request *req);
+ int crypto4xx_decrypt_ctr(struct skcipher_request *req);
+-int crypto4xx_encrypt_iv(struct skcipher_request *req);
+-int crypto4xx_decrypt_iv(struct skcipher_request *req);
+-int crypto4xx_encrypt_noiv(struct skcipher_request *req);
+-int crypto4xx_decrypt_noiv(struct skcipher_request *req);
++int crypto4xx_encrypt_iv_stream(struct skcipher_request *req);
++int crypto4xx_decrypt_iv_stream(struct skcipher_request *req);
++int crypto4xx_encrypt_iv_block(struct skcipher_request *req);
++int crypto4xx_decrypt_iv_block(struct skcipher_request *req);
++int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
++int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
+ int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
+ int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
+ int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
+index 02a6bed3b062..f10a87e541ed 100644
+--- a/drivers/crypto/amcc/crypto4xx_trng.c
++++ b/drivers/crypto/amcc/crypto4xx_trng.c
+@@ -108,7 +108,6 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
+ return;
+
+ err_out:
+- of_node_put(trng);
+ iounmap(dev->trng_base);
+ kfree(rng);
+ dev->trng_base = NULL;
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index c0ece44f303b..af9e473abdfd 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -999,6 +999,7 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ struct skcipher_request *req = context;
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
++ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ #ifdef DEBUG
+@@ -1023,9 +1024,9 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+- * ciphertext block. This is used e.g. by the CTS mode.
++ * ciphertext block when running in CBC mode.
+ */
+- if (ivsize)
++ if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
+ ivsize, ivsize, 0);
+
+@@ -1106,6 +1107,7 @@ static void init_aead_job(struct aead_request *req,
+ if (unlikely(req->src != req->dst)) {
+ if (!edesc->mapped_dst_nents) {
+ dst_dma = 0;
++ out_options = 0;
+ } else if (edesc->mapped_dst_nents == 1) {
+ dst_dma = sg_dma_address(req->dst);
+ out_options = 0;
+@@ -1842,9 +1844,9 @@ static int skcipher_decrypt(struct skcipher_request *req)
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+- * ciphertext block.
++ * ciphertext block when running in CBC mode.
+ */
+- if (ivsize)
++ if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
+ ivsize, ivsize, 0);
+
+diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
+index d290d6b41825..116cbc81fa8d 100644
+--- a/drivers/crypto/caam/caamalg_qi.c
++++ b/drivers/crypto/caam/caamalg_qi.c
+@@ -1109,7 +1109,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
+ (1 + !!ivsize) * sizeof(*sg_table),
+ out_len, 0);
+- } else if (mapped_dst_nents == 1) {
++ } else if (mapped_dst_nents <= 1) {
+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
+ 0);
+ } else {
+diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
+index 2b2980a8a9b9..b949944c8e55 100644
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -559,6 +559,14 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma +
+ (1 + !!ivsize) * sizeof(*sg_table));
+ }
++ } else if (!mapped_dst_nents) {
++ /*
++ * crypto engine requires the output entry to be present when
++ * "frame list" FD is used.
++ * Since engine does not support FMT=2'b11 (unused entry type),
++ * leaving out_fle zeroized is the best option.
++ */
++ goto skip_out_fle;
+ } else if (mapped_dst_nents == 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+@@ -570,6 +578,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+
+ dpaa2_fl_set_len(out_fle, out_len);
+
++skip_out_fle:
+ return edesc;
+ }
+
+diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
+index 9f08f84cca59..2d9b0485141f 100644
+--- a/drivers/crypto/caam/qi.c
++++ b/drivers/crypto/caam/qi.c
+@@ -18,6 +18,7 @@
+ #include "desc_constr.h"
+
+ #define PREHDR_RSLS_SHIFT 31
++#define PREHDR_ABS BIT(25)
+
+ /*
+ * Use a reasonable backlog of frames (per CPU) as congestion threshold,
+@@ -346,6 +347,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
+ */
+ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
+ num_words);
++ drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
+ memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
+ dma_sync_single_for_device(qidev, drv_ctx->context_a,
+ sizeof(drv_ctx->sh_desc) +
+@@ -401,6 +403,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
+ */
+ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
+ num_words);
++ drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
+ memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
+ size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
+ hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
+diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
+index cc3e96c4f5fb..f79eede71c62 100644
+--- a/drivers/crypto/ccp/ccp-dev.c
++++ b/drivers/crypto/ccp/ccp-dev.c
+@@ -32,56 +32,62 @@ struct ccp_tasklet_data {
+ };
+
+ /* Human-readable error strings */
++#define CCP_MAX_ERROR_CODE 64
+ static char *ccp_error_codes[] = {
+ "",
+- "ERR 01: ILLEGAL_ENGINE",
+- "ERR 02: ILLEGAL_KEY_ID",
+- "ERR 03: ILLEGAL_FUNCTION_TYPE",
+- "ERR 04: ILLEGAL_FUNCTION_MODE",
+- "ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
+- "ERR 06: ILLEGAL_FUNCTION_SIZE",
+- "ERR 07: Zlib_MISSING_INIT_EOM",
+- "ERR 08: ILLEGAL_FUNCTION_RSVD",
+- "ERR 09: ILLEGAL_BUFFER_LENGTH",
+- "ERR 10: VLSB_FAULT",
+- "ERR 11: ILLEGAL_MEM_ADDR",
+- "ERR 12: ILLEGAL_MEM_SEL",
+- "ERR 13: ILLEGAL_CONTEXT_ID",
+- "ERR 14: ILLEGAL_KEY_ADDR",
+- "ERR 15: 0xF Reserved",
+- "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
+- "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
+- "ERR 18: CMD_TIMEOUT",
+- "ERR 19: IDMA0_AXI_SLVERR",
+- "ERR 20: IDMA0_AXI_DECERR",
+- "ERR 21: 0x15 Reserved",
+- "ERR 22: IDMA1_AXI_SLAVE_FAULT",
+- "ERR 23: IDMA1_AIXI_DECERR",
+- "ERR 24: 0x18 Reserved",
+- "ERR 25: ZLIBVHB_AXI_SLVERR",
+- "ERR 26: ZLIBVHB_AXI_DECERR",
+- "ERR 27: 0x1B Reserved",
+- "ERR 27: ZLIB_UNEXPECTED_EOM",
+- "ERR 27: ZLIB_EXTRA_DATA",
+- "ERR 30: ZLIB_BTYPE",
+- "ERR 31: ZLIB_UNDEFINED_SYMBOL",
+- "ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
+- "ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
+- "ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
+- "ERR 35: ZLIB_UNCOMPRESSED_LEN",
+- "ERR 36: ZLIB_LIMIT_REACHED",
+- "ERR 37: ZLIB_CHECKSUM_MISMATCH0",
+- "ERR 38: ODMA0_AXI_SLVERR",
+- "ERR 39: ODMA0_AXI_DECERR",
+- "ERR 40: 0x28 Reserved",
+- "ERR 41: ODMA1_AXI_SLVERR",
+- "ERR 42: ODMA1_AXI_DECERR",
+- "ERR 43: LSB_PARITY_ERR",
++ "ILLEGAL_ENGINE",
++ "ILLEGAL_KEY_ID",
++ "ILLEGAL_FUNCTION_TYPE",
++ "ILLEGAL_FUNCTION_MODE",
++ "ILLEGAL_FUNCTION_ENCRYPT",
++ "ILLEGAL_FUNCTION_SIZE",
++ "Zlib_MISSING_INIT_EOM",
++ "ILLEGAL_FUNCTION_RSVD",
++ "ILLEGAL_BUFFER_LENGTH",
++ "VLSB_FAULT",
++ "ILLEGAL_MEM_ADDR",
++ "ILLEGAL_MEM_SEL",
++ "ILLEGAL_CONTEXT_ID",
++ "ILLEGAL_KEY_ADDR",
++ "0xF Reserved",
++ "Zlib_ILLEGAL_MULTI_QUEUE",
++ "Zlib_ILLEGAL_JOBID_CHANGE",
++ "CMD_TIMEOUT",
++ "IDMA0_AXI_SLVERR",
++ "IDMA0_AXI_DECERR",
++ "0x15 Reserved",
++ "IDMA1_AXI_SLAVE_FAULT",
++ "IDMA1_AIXI_DECERR",
++ "0x18 Reserved",
++ "ZLIBVHB_AXI_SLVERR",
++ "ZLIBVHB_AXI_DECERR",
++ "0x1B Reserved",
++ "ZLIB_UNEXPECTED_EOM",
++ "ZLIB_EXTRA_DATA",
++ "ZLIB_BTYPE",
++ "ZLIB_UNDEFINED_SYMBOL",
++ "ZLIB_UNDEFINED_DISTANCE_S",
++ "ZLIB_CODE_LENGTH_SYMBOL",
++ "ZLIB _VHB_ILLEGAL_FETCH",
++ "ZLIB_UNCOMPRESSED_LEN",
++ "ZLIB_LIMIT_REACHED",
++ "ZLIB_CHECKSUM_MISMATCH0",
++ "ODMA0_AXI_SLVERR",
++ "ODMA0_AXI_DECERR",
++ "0x28 Reserved",
++ "ODMA1_AXI_SLVERR",
++ "ODMA1_AXI_DECERR",
+ };
+
+-void ccp_log_error(struct ccp_device *d, int e)
++void ccp_log_error(struct ccp_device *d, unsigned int e)
+ {
+- dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
++ if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
++ return;
++
++ if (e < ARRAY_SIZE(ccp_error_codes))
++ dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
++ else
++ dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
+ }
+
+ /* List of CCPs, CCP count, read-write access lock, and access functions
+diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
+index 90523a069bff..5e624920fd99 100644
+--- a/drivers/crypto/ccp/ccp-dev.h
++++ b/drivers/crypto/ccp/ccp-dev.h
+@@ -629,7 +629,7 @@ struct ccp5_desc {
+ void ccp_add_device(struct ccp_device *ccp);
+ void ccp_del_device(struct ccp_device *ccp);
+
+-extern void ccp_log_error(struct ccp_device *, int);
++extern void ccp_log_error(struct ccp_device *, unsigned int);
+
+ struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
+ bool ccp_queues_suspended(struct ccp_device *ccp);
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index db8de89d990f..1cbdfc08ca00 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -622,6 +622,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+
+ unsigned long long *final;
+ unsigned int dm_offset;
++ unsigned int jobid;
+ unsigned int ilen;
+ bool in_place = true; /* Default value */
+ int ret;
+@@ -660,9 +661,11 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
+ }
+
++ jobid = CCP_NEW_JOBID(cmd_q->ccp);
++
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+- op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
++ op.jobid = jobid;
+ op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ op.init = 1;
+@@ -813,6 +816,13 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ final[0] = cpu_to_be64(aes->aad_len * 8);
+ final[1] = cpu_to_be64(ilen * 8);
+
++ memset(&op, 0, sizeof(op));
++ op.cmd_q = cmd_q;
++ op.jobid = jobid;
++ op.sb_key = cmd_q->sb_key; /* Pre-allocated */
++ op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
++ op.init = 1;
++ op.u.aes.type = aes->type;
+ op.u.aes.mode = CCP_AES_MODE_GHASH;
+ op.u.aes.action = CCP_AES_GHASHFINAL;
+ op.src.type = CCP_MEMTYPE_SYSTEM;
+@@ -840,7 +850,8 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ if (ret)
+ goto e_tag;
+
+- ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
++ ret = crypto_memneq(tag.address, final_wa.address,
++ AES_BLOCK_SIZE) ? -EBADMSG : 0;
+ ccp_dm_free(&tag);
+ }
+
+diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
+index de4be10b172f..ccacdcf07ffc 100644
+--- a/drivers/crypto/inside-secure/safexcel_cipher.c
++++ b/drivers/crypto/inside-secure/safexcel_cipher.c
+@@ -51,6 +51,8 @@ struct safexcel_cipher_ctx {
+
+ struct safexcel_cipher_req {
+ enum safexcel_cipher_direction direction;
++ /* Number of result descriptors associated to the request */
++ unsigned int rdescs;
+ bool needs_inv;
+ };
+
+@@ -333,7 +335,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
+
+ *ret = 0;
+
+- do {
++ if (unlikely(!sreq->rdescs))
++ return 0;
++
++ while (sreq->rdescs--) {
+ rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+ if (IS_ERR(rdesc)) {
+ dev_err(priv->dev,
+@@ -346,7 +351,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
+ *ret = safexcel_rdesc_check_errors(priv, rdesc);
+
+ ndesc++;
+- } while (!rdesc->last_seg);
++ }
+
+ safexcel_complete(priv, ring);
+
+@@ -501,6 +506,7 @@ cdesc_rollback:
+ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
+ int ring,
+ struct crypto_async_request *base,
++ struct safexcel_cipher_req *sreq,
+ bool *should_complete, int *ret)
+ {
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
+@@ -509,7 +515,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
+
+ *ret = 0;
+
+- do {
++ if (unlikely(!sreq->rdescs))
++ return 0;
++
++ while (sreq->rdescs--) {
+ rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+ if (IS_ERR(rdesc)) {
+ dev_err(priv->dev,
+@@ -522,7 +531,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
+ *ret = safexcel_rdesc_check_errors(priv, rdesc);
+
+ ndesc++;
+- } while (!rdesc->last_seg);
++ }
+
+ safexcel_complete(priv, ring);
+
+@@ -564,7 +573,7 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
+
+ if (sreq->needs_inv) {
+ sreq->needs_inv = false;
+- err = safexcel_handle_inv_result(priv, ring, async,
++ err = safexcel_handle_inv_result(priv, ring, async, sreq,
+ should_complete, ret);
+ } else {
+ err = safexcel_handle_req_result(priv, ring, async, req->src,
+@@ -587,7 +596,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
+
+ if (sreq->needs_inv) {
+ sreq->needs_inv = false;
+- err = safexcel_handle_inv_result(priv, ring, async,
++ err = safexcel_handle_inv_result(priv, ring, async, sreq,
+ should_complete, ret);
+ } else {
+ err = safexcel_handle_req_result(priv, ring, async, req->src,
+@@ -633,6 +642,8 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
+ ret = safexcel_send_req(async, ring, sreq, req->src,
+ req->dst, req->cryptlen, 0, 0, req->iv,
+ commands, results);
++
++ sreq->rdescs = *results;
+ return ret;
+ }
+
+@@ -655,6 +666,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
+ req->cryptlen, req->assoclen,
+ crypto_aead_authsize(tfm), req->iv,
+ commands, results);
++ sreq->rdescs = *results;
+ return ret;
+ }
+
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 8c57c5af0930..710e09e28227 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1036,7 +1036,6 @@ static void ipsec_esp_encrypt_done(struct device *dev,
+ unsigned int authsize = crypto_aead_authsize(authenc);
+ unsigned int ivsize = crypto_aead_ivsize(authenc);
+ struct talitos_edesc *edesc;
+- struct scatterlist *sg;
+ void *icvdata;
+
+ edesc = container_of(desc, struct talitos_edesc, desc);
+@@ -1050,9 +1049,8 @@ static void ipsec_esp_encrypt_done(struct device *dev,
+ else
+ icvdata = &edesc->link_tbl[edesc->src_nents +
+ edesc->dst_nents + 2];
+- sg = sg_last(areq->dst, edesc->dst_nents);
+- memcpy((char *)sg_virt(sg) + sg->length - authsize,
+- icvdata, authsize);
++ sg_pcopy_from_buffer(areq->dst, edesc->dst_nents ? : 1, icvdata,
++ authsize, areq->assoclen + areq->cryptlen);
+ }
+
+ dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
+@@ -1070,7 +1068,6 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+ unsigned int authsize = crypto_aead_authsize(authenc);
+ struct talitos_edesc *edesc;
+- struct scatterlist *sg;
+ char *oicv, *icv;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+@@ -1080,9 +1077,18 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
+ ipsec_esp_unmap(dev, edesc, req);
+
+ if (!err) {
++ char icvdata[SHA512_DIGEST_SIZE];
++ int nents = edesc->dst_nents ? : 1;
++ unsigned int len = req->assoclen + req->cryptlen;
++
+ /* auth check */
+- sg = sg_last(req->dst, edesc->dst_nents ? : 1);
+- icv = (char *)sg_virt(sg) + sg->length - authsize;
++ if (nents > 1) {
++ sg_pcopy_to_buffer(req->dst, nents, icvdata, authsize,
++ len - authsize);
++ icv = icvdata;
++ } else {
++ icv = (char *)sg_virt(req->dst) + len - authsize;
++ }
+
+ if (edesc->dma_len) {
+ if (is_sec1)
+@@ -1498,7 +1504,6 @@ static int aead_decrypt(struct aead_request *req)
+ struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ struct talitos_private *priv = dev_get_drvdata(ctx->dev);
+ struct talitos_edesc *edesc;
+- struct scatterlist *sg;
+ void *icvdata;
+
+ req->cryptlen -= authsize;
+@@ -1532,9 +1537,8 @@ static int aead_decrypt(struct aead_request *req)
+ else
+ icvdata = &edesc->link_tbl[0];
+
+- sg = sg_last(req->src, edesc->src_nents ? : 1);
+-
+- memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
++ sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
++ req->assoclen + req->cryptlen - authsize);
+
+ return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
+ }
+@@ -1606,11 +1610,15 @@ static void ablkcipher_done(struct device *dev,
+ int err)
+ {
+ struct ablkcipher_request *areq = context;
++ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
++ struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
++ unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
+ struct talitos_edesc *edesc;
+
+ edesc = container_of(desc, struct talitos_edesc, desc);
+
+ common_nonsnoop_unmap(dev, edesc, areq);
++ memcpy(areq->info, ctx->iv, ivsize);
+
+ kfree(edesc);
+
+@@ -3253,7 +3261,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
+ alg->cra_priority = t_alg->algt.priority;
+ else
+ alg->cra_priority = TALITOS_CRA_PRIORITY;
+- alg->cra_alignmask = 0;
++ if (has_ftr_sec1(priv))
++ alg->cra_alignmask = 3;
++ else
++ alg->cra_alignmask = 0;
+ alg->cra_ctxsize = sizeof(struct talitos_ctx);
+ alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index 464174685589..4386ea4b9b5a 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -26,7 +26,7 @@
+ static int edac_mc_log_ue = 1;
+ static int edac_mc_log_ce = 1;
+ static int edac_mc_panic_on_ue;
+-static int edac_mc_poll_msec = 1000;
++static unsigned int edac_mc_poll_msec = 1000;
+
+ /* Getter functions for above */
+ int edac_mc_get_log_ue(void)
+@@ -45,30 +45,30 @@ int edac_mc_get_panic_on_ue(void)
+ }
+
+ /* this is temporary */
+-int edac_mc_get_poll_msec(void)
++unsigned int edac_mc_get_poll_msec(void)
+ {
+ return edac_mc_poll_msec;
+ }
+
+ static int edac_set_poll_msec(const char *val, const struct kernel_param *kp)
+ {
+- unsigned long l;
++ unsigned int i;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+- ret = kstrtoul(val, 0, &l);
++ ret = kstrtouint(val, 0, &i);
+ if (ret)
+ return ret;
+
+- if (l < 1000)
++ if (i < 1000)
+ return -EINVAL;
+
+- *((unsigned long *)kp->arg) = l;
++ *((unsigned int *)kp->arg) = i;
+
+ /* notify edac_mc engine to reset the poll period */
+- edac_mc_reset_delay_period(l);
++ edac_mc_reset_delay_period(i);
+
+ return 0;
+ }
+@@ -82,7 +82,7 @@ MODULE_PARM_DESC(edac_mc_log_ue,
+ module_param(edac_mc_log_ce, int, 0644);
+ MODULE_PARM_DESC(edac_mc_log_ce,
+ "Log correctable error to console: 0=off 1=on");
+-module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
++module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_uint,
+ &edac_mc_poll_msec, 0644);
+ MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
+
+@@ -404,6 +404,8 @@ static inline int nr_pages_per_csrow(struct csrow_info *csrow)
+ static int edac_create_csrow_object(struct mem_ctl_info *mci,
+ struct csrow_info *csrow, int index)
+ {
++ int err;
++
+ csrow->dev.type = &csrow_attr_type;
+ csrow->dev.groups = csrow_dev_groups;
+ device_initialize(&csrow->dev);
+@@ -415,7 +417,11 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
+ edac_dbg(0, "creating (virtual) csrow node %s\n",
+ dev_name(&csrow->dev));
+
+- return device_add(&csrow->dev);
++ err = device_add(&csrow->dev);
++ if (err)
++ put_device(&csrow->dev);
++
++ return err;
+ }
+
+ /* Create a CSROW object under specifed edac_mc_device */
+@@ -443,7 +449,8 @@ error:
+ csrow = mci->csrows[i];
+ if (!nr_pages_per_csrow(csrow))
+ continue;
+- put_device(&mci->csrows[i]->dev);
++
++ device_del(&mci->csrows[i]->dev);
+ }
+
+ return err;
+@@ -645,9 +652,11 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
+ dev_set_drvdata(&dimm->dev, dimm);
+ pm_runtime_forbid(&mci->dev);
+
+- err = device_add(&dimm->dev);
++ err = device_add(&dimm->dev);
++ if (err)
++ put_device(&dimm->dev);
+
+- edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev));
++ edac_dbg(0, "created rank/dimm device %s\n", dev_name(&dimm->dev));
+
+ return err;
+ }
+@@ -928,6 +937,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
+ err = device_add(&mci->dev);
+ if (err < 0) {
+ edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
++ put_device(&mci->dev);
+ goto out;
+ }
+
+diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
+index dd7d0b509aa3..75528f07abd5 100644
+--- a/drivers/edac/edac_module.h
++++ b/drivers/edac/edac_module.h
+@@ -36,7 +36,7 @@ extern int edac_mc_get_log_ue(void);
+ extern int edac_mc_get_log_ce(void);
+ extern int edac_mc_get_panic_on_ue(void);
+ extern int edac_get_poll_msec(void);
+-extern int edac_mc_get_poll_msec(void);
++extern unsigned int edac_mc_get_poll_msec(void);
+
+ unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
+ unsigned len);
+diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
+index 9276ef616430..8591c410ecaa 100644
+--- a/drivers/gpio/gpio-omap.c
++++ b/drivers/gpio/gpio-omap.c
+@@ -829,9 +829,9 @@ static void omap_gpio_irq_shutdown(struct irq_data *d)
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+ bank->irq_usage &= ~(BIT(offset));
+- omap_set_gpio_irqenable(bank, offset, 0);
+- omap_clear_gpio_irqstatus(bank, offset);
+ omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
++ omap_clear_gpio_irqstatus(bank, offset);
++ omap_set_gpio_irqenable(bank, offset, 0);
+ if (!LINE_USED(bank->mod_usage, offset))
+ omap_clear_gpio_debounce(bank, offset);
+ omap_disable_gpio_module(bank, offset);
+@@ -867,8 +867,8 @@ static void omap_gpio_mask_irq(struct irq_data *d)
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+- omap_set_gpio_irqenable(bank, offset, 0);
+ omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
++ omap_set_gpio_irqenable(bank, offset, 0);
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+ }
+
+@@ -880,9 +880,6 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+- if (trigger)
+- omap_set_gpio_triggering(bank, offset, trigger);
+-
+ omap_set_gpio_irqenable(bank, offset, 1);
+
+ /*
+@@ -890,9 +887,13 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
+ * is cleared, thus after the handler has run. OMAP4 needs this done
+ * after enabing the interrupt to clear the wakeup status.
+ */
+- if (bank->level_mask & BIT(offset))
++ if (bank->regs->leveldetect0 && bank->regs->wkup_en &&
++ trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ omap_clear_gpio_irqstatus(bank, offset);
+
++ if (trigger)
++ omap_set_gpio_triggering(bank, offset, trigger);
++
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+ }
+
+@@ -1274,13 +1275,23 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
+ {
+ struct device *dev = bank->chip.parent;
+ void __iomem *base = bank->base;
+- u32 nowake;
++ u32 mask, nowake;
+
+ bank->saved_datain = readl_relaxed(base + bank->regs->datain);
+
+ if (!bank->enabled_non_wakeup_gpios)
+ goto update_gpio_context_count;
+
++ /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */
++ mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect;
++ mask &= ~bank->context.risingdetect;
++ bank->saved_datain |= mask;
++
++ /* Check for pending EDGE_RISING, ignore EDGE_BOTH */
++ mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect;
++ mask &= ~bank->context.fallingdetect;
++ bank->saved_datain &= ~mask;
++
+ if (!may_lose_context)
+ goto update_gpio_context_count;
+
+@@ -1453,6 +1464,8 @@ static struct omap_gpio_reg_offs omap4_gpio_regs = {
+ .clr_dataout = OMAP4_GPIO_CLEARDATAOUT,
+ .irqstatus = OMAP4_GPIO_IRQSTATUS0,
+ .irqstatus2 = OMAP4_GPIO_IRQSTATUS1,
++ .irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0,
++ .irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1,
+ .irqenable = OMAP4_GPIO_IRQSTATUSSET0,
+ .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1,
+ .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0,
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index e013d417a936..bb3104d2eb0c 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -3025,7 +3025,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
+ int gpiod_get_raw_value(const struct gpio_desc *desc)
+ {
+ VALIDATE_DESC(desc);
+- /* Should be using gpio_get_value_cansleep() */
++ /* Should be using gpiod_get_raw_value_cansleep() */
+ WARN_ON(desc->gdev->chip->can_sleep);
+ return gpiod_get_raw_value_commit(desc);
+ }
+@@ -3046,7 +3046,7 @@ int gpiod_get_value(const struct gpio_desc *desc)
+ int value;
+
+ VALIDATE_DESC(desc);
+- /* Should be using gpio_get_value_cansleep() */
++ /* Should be using gpiod_get_value_cansleep() */
+ WARN_ON(desc->gdev->chip->can_sleep);
+
+ value = gpiod_get_raw_value_commit(desc);
+@@ -3317,7 +3317,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
+ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
+ {
+ VALIDATE_DESC_VOID(desc);
+- /* Should be using gpiod_set_value_cansleep() */
++ /* Should be using gpiod_set_raw_value_cansleep() */
+ WARN_ON(desc->gdev->chip->can_sleep);
+ gpiod_set_raw_value_commit(desc, value);
+ }
+@@ -3358,6 +3358,7 @@ static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
+ void gpiod_set_value(struct gpio_desc *desc, int value)
+ {
+ VALIDATE_DESC_VOID(desc);
++ /* Should be using gpiod_set_value_cansleep() */
+ WARN_ON(desc->gdev->chip->can_sleep);
+ gpiod_set_value_nocheck(desc, value);
+ }
+@@ -4244,8 +4245,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_index);
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+- * provided @dflags. If the node does not have the requested GPIO
+- * property, NULL is returned.
++ * provided @dflags.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+@@ -4267,9 +4267,6 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ index, &flags);
+
+ if (!desc || IS_ERR(desc)) {
+- /* If it is not there, just return NULL */
+- if (PTR_ERR(desc) == -ENOENT)
+- return NULL;
+ return desc;
+ }
+
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index e804ac5dec02..e9d75549cde8 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -1339,6 +1339,7 @@ MODULE_PARM_DESC(edid_fixup,
+
+ static void drm_get_displayid(struct drm_connector *connector,
+ struct edid *edid);
++static int validate_displayid(u8 *displayid, int length, int idx);
+
+ static int drm_edid_block_checksum(const u8 *raw_edid)
+ {
+@@ -2922,16 +2923,46 @@ static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
+ return edid_ext;
+ }
+
+-static u8 *drm_find_cea_extension(const struct edid *edid)
+-{
+- return drm_find_edid_extension(edid, CEA_EXT);
+-}
+
+ static u8 *drm_find_displayid_extension(const struct edid *edid)
+ {
+ return drm_find_edid_extension(edid, DISPLAYID_EXT);
+ }
+
++static u8 *drm_find_cea_extension(const struct edid *edid)
++{
++ int ret;
++ int idx = 1;
++ int length = EDID_LENGTH;
++ struct displayid_block *block;
++ u8 *cea;
++ u8 *displayid;
++
++ /* Look for a top level CEA extension block */
++ cea = drm_find_edid_extension(edid, CEA_EXT);
++ if (cea)
++ return cea;
++
++ /* CEA blocks can also be found embedded in a DisplayID block */
++ displayid = drm_find_displayid_extension(edid);
++ if (!displayid)
++ return NULL;
++
++ ret = validate_displayid(displayid, length, idx);
++ if (ret)
++ return NULL;
++
++ idx += sizeof(struct displayid_hdr);
++ for_each_displayid_db(displayid, block, idx, length) {
++ if (block->tag == DATA_BLOCK_CTA) {
++ cea = (u8 *)block;
++ break;
++ }
++ }
++
++ return cea;
++}
++
+ /*
+ * Calculate the alternate clock for the CEA mode
+ * (60Hz vs. 59.94Hz etc.)
+@@ -3655,13 +3686,38 @@ cea_revision(const u8 *cea)
+ static int
+ cea_db_offsets(const u8 *cea, int *start, int *end)
+ {
+- /* Data block offset in CEA extension block */
+- *start = 4;
+- *end = cea[2];
+- if (*end == 0)
+- *end = 127;
+- if (*end < 4 || *end > 127)
+- return -ERANGE;
++ /* DisplayID CTA extension blocks and top-level CEA EDID
++ * block header definitions differ in the following bytes:
++ * 1) Byte 2 of the header specifies length differently,
++ * 2) Byte 3 is only present in the CEA top level block.
++ *
++ * The different definitions for byte 2 follow.
++ *
++ * DisplayID CTA extension block defines byte 2 as:
++ * Number of payload bytes
++ *
++ * CEA EDID block defines byte 2 as:
++ * Byte number (decimal) within this block where the 18-byte
++ * DTDs begin. If no non-DTD data is present in this extension
++ * block, the value should be set to 04h (the byte after next).
++ * If set to 00h, there are no DTDs present in this block and
++ * no non-DTD data.
++ */
++ if (cea[0] == DATA_BLOCK_CTA) {
++ *start = 3;
++ *end = *start + cea[2];
++ } else if (cea[0] == CEA_EXT) {
++ /* Data block offset in CEA extension block */
++ *start = 4;
++ *end = cea[2];
++ if (*end == 0)
++ *end = 127;
++ if (*end < 4 || *end > 127)
++ return -ERANGE;
++ } else {
++ return -ENOTSUPP;
++ }
++
+ return 0;
+ }
+
+@@ -5279,6 +5335,9 @@ static int drm_parse_display_id(struct drm_connector *connector,
+ case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
+ /* handled in mode gathering code. */
+ break;
++ case DATA_BLOCK_CTA:
++ /* handled in the cea parser code. */
++ break;
+ default:
+ DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag);
+ break;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+index ecacb22834d7..719345074711 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+@@ -184,6 +184,25 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
+ return 0;
+ }
+
++static int
++nvkm_i2c_preinit(struct nvkm_subdev *subdev)
++{
++ struct nvkm_i2c *i2c = nvkm_i2c(subdev);
++ struct nvkm_i2c_bus *bus;
++ struct nvkm_i2c_pad *pad;
++
++ /*
++ * We init our i2c busses as early as possible, since they may be
++ * needed by the vbios init scripts on some cards
++ */
++ list_for_each_entry(pad, &i2c->pad, head)
++ nvkm_i2c_pad_init(pad);
++ list_for_each_entry(bus, &i2c->bus, head)
++ nvkm_i2c_bus_init(bus);
++
++ return 0;
++}
++
+ static int
+ nvkm_i2c_init(struct nvkm_subdev *subdev)
+ {
+@@ -238,6 +257,7 @@ nvkm_i2c_dtor(struct nvkm_subdev *subdev)
+ static const struct nvkm_subdev_func
+ nvkm_i2c = {
+ .dtor = nvkm_i2c_dtor,
++ .preinit = nvkm_i2c_preinit,
+ .init = nvkm_i2c_init,
+ .fini = nvkm_i2c_fini,
+ .intr = nvkm_i2c_intr,
+diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
+index 89c3961f0fce..3428b0e72bc5 100644
+--- a/drivers/gpu/ipu-v3/ipu-ic.c
++++ b/drivers/gpu/ipu-v3/ipu-ic.c
+@@ -251,7 +251,7 @@ static int init_csc(struct ipu_ic *ic,
+ writel(param, base++);
+
+ param = ((a[0] & 0x1fe0) >> 5) | (params->scale << 8) |
+- (params->sat << 9);
++ (params->sat << 10);
+ writel(param, base++);
+
+ param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) |
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 83dd3a2a7316..3299b1474d1b 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -304,6 +304,9 @@ static void wacom_feature_mapping(struct hid_device *hdev,
+ wacom_hid_usage_quirk(hdev, field, usage);
+
+ switch (equivalent_usage) {
++ case WACOM_HID_WD_TOUCH_RING_SETTING:
++ wacom->generic_has_leds = true;
++ break;
+ case HID_DG_CONTACTMAX:
+ /* leave touch_max as is if predefined */
+ if (!features->touch_max) {
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 43f6da357165..489436503e49 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -1926,8 +1926,6 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+ break;
+ case WACOM_HID_WD_BUTTONCENTER:
+- wacom->generic_has_leds = true;
+- /* fall through */
+ case WACOM_HID_WD_BUTTONHOME:
+ case WACOM_HID_WD_BUTTONUP:
+ case WACOM_HID_WD_BUTTONDOWN:
+@@ -2119,14 +2117,12 @@ static void wacom_wac_pad_report(struct hid_device *hdev,
+ bool active = wacom_wac->hid_data.inrange_state != 0;
+
+ /* report prox for expresskey events */
+- if ((wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) &&
+- wacom_wac->hid_data.pad_input_event_flag) {
++ if (wacom_wac->hid_data.pad_input_event_flag) {
+ input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
+ input_sync(input);
+ if (!active)
+ wacom_wac->hid_data.pad_input_event_flag = false;
+ }
+-
+ }
+
+ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
+@@ -2702,9 +2698,7 @@ static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *repo
+ if (report->type != HID_INPUT_REPORT)
+ return -1;
+
+- if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
+- wacom_wac_pad_report(hdev, report, field);
+- else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
++ if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
+ wacom_wac_pen_report(hdev, report);
+ else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
+ wacom_wac_finger_report(hdev, report);
+@@ -2718,7 +2712,7 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct hid_field *field;
+ bool pad_in_hid_field = false, pen_in_hid_field = false,
+- finger_in_hid_field = false;
++ finger_in_hid_field = false, true_pad = false;
+ int r;
+ int prev_collection = -1;
+
+@@ -2734,6 +2728,8 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
+ pen_in_hid_field = true;
+ if (WACOM_FINGER_FIELD(field))
+ finger_in_hid_field = true;
++ if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY)
++ true_pad = true;
+ }
+
+ wacom_wac_battery_pre_report(hdev, report);
+@@ -2757,6 +2753,9 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
+ }
+
+ wacom_wac_battery_report(hdev, report);
++
++ if (true_pad && wacom->wacom_wac.pad_input)
++ wacom_wac_pad_report(hdev, report, field);
+ }
+
+ static int wacom_bpt_touch(struct wacom_wac *wacom)
+@@ -3713,7 +3712,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
+ 0, 5920, 4, 0);
+ }
+ input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
+- input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
++ input_abs_set_res(input_dev, ABS_MT_POSITION_Y, 40);
+
+ /* fall through */
+
+diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
+index cac68d1c20c5..420a19bfaae3 100644
+--- a/drivers/hid/wacom_wac.h
++++ b/drivers/hid/wacom_wac.h
+@@ -141,6 +141,7 @@
+ #define WACOM_HID_WD_OFFSETBOTTOM (WACOM_HID_UP_WACOMDIGITIZER | 0x0d33)
+ #define WACOM_HID_WD_DATAMODE (WACOM_HID_UP_WACOMDIGITIZER | 0x1002)
+ #define WACOM_HID_WD_DIGITIZERINFO (WACOM_HID_UP_WACOMDIGITIZER | 0x1013)
++#define WACOM_HID_WD_TOUCH_RING_SETTING (WACOM_HID_UP_WACOMDIGITIZER | 0x1032)
+ #define WACOM_HID_UP_G9 0xff090000
+ #define WACOM_HID_G9_PEN (WACOM_HID_UP_G9 | 0x02)
+ #define WACOM_HID_G9_TOUCHSCREEN (WACOM_HID_UP_G9 | 0x11)
+diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
+index 81bb54fa3ce8..cfd48c81b9d9 100644
+--- a/drivers/hwtracing/intel_th/msu.c
++++ b/drivers/hwtracing/intel_th/msu.c
+@@ -667,7 +667,7 @@ static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
+ goto err_out;
+
+ ret = -ENOMEM;
+- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
++ page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
+ if (!page)
+ goto err_free_sgt;
+
+@@ -767,6 +767,30 @@ err_nomem:
+ return -ENOMEM;
+ }
+
++#ifdef CONFIG_X86
++static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_blocks)
++{
++ int i;
++
++ for (i = 0; i < nr_blocks; i++)
++ /* Set the page as uncached */
++ set_memory_uc((unsigned long)msc_win_block(win, i), 1);
++}
++
++static void msc_buffer_set_wb(struct msc_window *win)
++{
++ int i;
++
++ for (i = 0; i < win->nr_blocks; i++)
++ /* Reset the page to write-back */
++ set_memory_wb((unsigned long)msc_win_block(win, i), 1);
++}
++#else /* !X86 */
++static inline void
++msc_buffer_set_uc(struct msc_window *win, unsigned int nr_blocks) {}
++static inline void msc_buffer_set_wb(struct msc_window *win) {}
++#endif /* CONFIG_X86 */
++
+ /**
+ * msc_buffer_win_alloc() - alloc a window for a multiblock mode
+ * @msc: MSC device
+@@ -780,7 +804,7 @@ err_nomem:
+ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
+ {
+ struct msc_window *win;
+- int ret = -ENOMEM, i;
++ int ret = -ENOMEM;
+
+ if (!nr_blocks)
+ return 0;
+@@ -811,11 +835,7 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
+ if (ret < 0)
+ goto err_nomem;
+
+-#ifdef CONFIG_X86
+- for (i = 0; i < ret; i++)
+- /* Set the page as uncached */
+- set_memory_uc((unsigned long)msc_win_block(win, i), 1);
+-#endif
++ msc_buffer_set_uc(win, ret);
+
+ win->nr_blocks = ret;
+
+@@ -860,8 +880,6 @@ static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
+ */
+ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
+ {
+- int i;
+-
+ msc->nr_pages -= win->nr_blocks;
+
+ list_del(&win->entry);
+@@ -870,11 +888,7 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
+ msc->base_addr = 0;
+ }
+
+-#ifdef CONFIG_X86
+- for (i = 0; i < win->nr_blocks; i++)
+- /* Reset the page to write-back */
+- set_memory_wb((unsigned long)msc_win_block(win, i), 1);
+-#endif
++ msc_buffer_set_wb(win);
+
+ __msc_buffer_win_free(msc, win);
+
+@@ -1386,10 +1400,9 @@ static int intel_th_msc_init(struct msc *msc)
+
+ static void msc_win_switch(struct msc *msc)
+ {
+- struct msc_window *last, *first;
++ struct msc_window *first;
+
+ first = list_first_entry(&msc->win_list, struct msc_window, entry);
+- last = list_last_entry(&msc->win_list, struct msc_window, entry);
+
+ if (msc_is_last_win(msc->cur_win))
+ msc->cur_win = first;
+diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
+index f1228708f2a2..c0378c3de9a4 100644
+--- a/drivers/hwtracing/intel_th/pci.c
++++ b/drivers/hwtracing/intel_th/pci.c
+@@ -194,6 +194,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
++ {
++ /* Ice Lake NNPI */
++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
++ .driver_data = (kernel_ulong_t)&intel_th_2x,
++ },
+ { 0 },
+ };
+
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index 5f4bd52121fe..7837ea67f1f8 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -91,6 +91,12 @@ void i3c_bus_normaluse_unlock(struct i3c_bus *bus)
+ up_read(&bus->lock);
+ }
+
++static struct i3c_master_controller *
++i3c_bus_to_i3c_master(struct i3c_bus *i3cbus)
++{
++ return container_of(i3cbus, struct i3c_master_controller, bus);
++}
++
+ static struct i3c_master_controller *dev_to_i3cmaster(struct device *dev)
+ {
+ return container_of(dev, struct i3c_master_controller, dev);
+@@ -565,20 +571,38 @@ static const struct device_type i3c_masterdev_type = {
+ .groups = i3c_masterdev_groups,
+ };
+
+-int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode)
++int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
++ unsigned long max_i2c_scl_rate)
+ {
+- i3cbus->mode = mode;
++ struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus);
+
+- if (!i3cbus->scl_rate.i3c)
+- i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
++ i3cbus->mode = mode;
+
+- if (!i3cbus->scl_rate.i2c) {
+- if (i3cbus->mode == I3C_BUS_MODE_MIXED_SLOW)
+- i3cbus->scl_rate.i2c = I3C_BUS_I2C_FM_SCL_RATE;
+- else
+- i3cbus->scl_rate.i2c = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
++ switch (i3cbus->mode) {
++ case I3C_BUS_MODE_PURE:
++ if (!i3cbus->scl_rate.i3c)
++ i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
++ break;
++ case I3C_BUS_MODE_MIXED_FAST:
++ if (!i3cbus->scl_rate.i3c)
++ i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
++ if (!i3cbus->scl_rate.i2c)
++ i3cbus->scl_rate.i2c = max_i2c_scl_rate;
++ break;
++ case I3C_BUS_MODE_MIXED_SLOW:
++ if (!i3cbus->scl_rate.i2c)
++ i3cbus->scl_rate.i2c = max_i2c_scl_rate;
++ if (!i3cbus->scl_rate.i3c ||
++ i3cbus->scl_rate.i3c > i3cbus->scl_rate.i2c)
++ i3cbus->scl_rate.i3c = i3cbus->scl_rate.i2c;
++ break;
++ default:
++ return -EINVAL;
+ }
+
++ dev_dbg(&master->dev, "i2c-scl = %ld Hz i3c-scl = %ld Hz\n",
++ i3cbus->scl_rate.i2c, i3cbus->scl_rate.i3c);
++
+ /*
+ * I3C/I2C frequency may have been overridden, check that user-provided
+ * values are not exceeding max possible frequency.
+@@ -1966,9 +1990,6 @@ of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master,
+ /* LVR is encoded in reg[2]. */
+ boardinfo->lvr = reg[2];
+
+- if (boardinfo->lvr & I3C_LVR_I2C_FM_MODE)
+- master->bus.scl_rate.i2c = I3C_BUS_I2C_FM_SCL_RATE;
+-
+ list_add_tail(&boardinfo->node, &master->boardinfo.i2c);
+ of_node_get(node);
+
+@@ -2417,6 +2438,7 @@ int i3c_master_register(struct i3c_master_controller *master,
+ const struct i3c_master_controller_ops *ops,
+ bool secondary)
+ {
++ unsigned long i2c_scl_rate = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
+ struct i3c_bus *i3cbus = i3c_master_get_bus(master);
+ enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
+ struct i2c_dev_boardinfo *i2cbi;
+@@ -2466,9 +2488,12 @@ int i3c_master_register(struct i3c_master_controller *master,
+ ret = -EINVAL;
+ goto err_put_dev;
+ }
++
++ if (i2cbi->lvr & I3C_LVR_I2C_FM_MODE)
++ i2c_scl_rate = I3C_BUS_I2C_FM_SCL_RATE;
+ }
+
+- ret = i3c_bus_set_mode(i3cbus, mode);
++ ret = i3c_bus_set_mode(i3cbus, mode, i2c_scl_rate);
+ if (ret)
+ goto err_put_dev;
+
+diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
+index f962b5bbfa40..e4b13a32692a 100644
+--- a/drivers/infiniband/core/umem_odp.c
++++ b/drivers/infiniband/core/umem_odp.c
+@@ -151,6 +151,7 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ {
+ struct ib_ucontext_per_mm *per_mm =
+ container_of(mn, struct ib_ucontext_per_mm, mn);
++ int rc;
+
+ if (mmu_notifier_range_blockable(range))
+ down_read(&per_mm->umem_rwsem);
+@@ -167,11 +168,14 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
+ return 0;
+ }
+
+- return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
+- range->end,
+- invalidate_range_start_trampoline,
+- mmu_notifier_range_blockable(range),
+- NULL);
++ rc = rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, range->start,
++ range->end,
++ invalidate_range_start_trampoline,
++ mmu_notifier_range_blockable(range),
++ NULL);
++ if (rc)
++ up_read(&per_mm->umem_rwsem);
++ return rc;
+ }
+
+ static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 340290b883fe..a6713a3b6c80 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -1043,15 +1043,19 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ }
+
+ if (MLX5_CAP_GEN(mdev, tag_matching)) {
+- props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
+ props->tm_caps.max_num_tags =
+ (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
+- props->tm_caps.flags = IB_TM_CAP_RC;
+ props->tm_caps.max_ops =
+ 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
+ }
+
++ if (MLX5_CAP_GEN(mdev, tag_matching) &&
++ MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
++ props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
++ props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
++ }
++
+ if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
+ props->cq_caps.max_cq_moderation_count =
+ MLX5_MAX_CQ_COUNT;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index 9b5e11d3fb85..04ea7db08e87 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -1998,6 +1998,7 @@ static int ipoib_get_vf_config(struct net_device *dev, int vf,
+ return err;
+
+ ivf->vf = vf;
++ memcpy(ivf->mac, dev->dev_addr, dev->addr_len);
+
+ return 0;
+ }
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 4305da2c9037..0b09d0cd9b3c 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -3483,13 +3483,14 @@ static const match_table_t srp_opt_tokens = {
+ * @net: [in] Network namespace.
+ * @sa: [out] Address family, IP address and port number.
+ * @addr_port_str: [in] IP address and port number.
++ * @has_port: [out] Whether or not @addr_port_str includes a port number.
+ *
+ * Parse the following address formats:
+ * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
+ * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
+ */
+ static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
+- const char *addr_port_str)
++ const char *addr_port_str, bool *has_port)
+ {
+ char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
+ char *port_str;
+@@ -3498,9 +3499,12 @@ static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
+ if (!addr)
+ return -ENOMEM;
+ port_str = strrchr(addr, ':');
+- if (!port_str)
+- return -EINVAL;
+- *port_str++ = '\0';
++ if (port_str && strchr(port_str, ']'))
++ port_str = NULL;
++ if (port_str)
++ *port_str++ = '\0';
++ if (has_port)
++ *has_port = port_str != NULL;
+ ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
+ if (ret && addr[0]) {
+ addr_end = addr + strlen(addr) - 1;
+@@ -3522,6 +3526,7 @@ static int srp_parse_options(struct net *net, const char *buf,
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ unsigned long long ull;
++ bool has_port;
+ int opt_mask = 0;
+ int token;
+ int ret = -EINVAL;
+@@ -3620,7 +3625,8 @@ static int srp_parse_options(struct net *net, const char *buf,
+ ret = -ENOMEM;
+ goto out;
+ }
+- ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
++ ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
++ NULL);
+ if (ret < 0) {
+ pr_warn("bad source parameter '%s'\n", p);
+ kfree(p);
+@@ -3636,7 +3642,10 @@ static int srp_parse_options(struct net *net, const char *buf,
+ ret = -ENOMEM;
+ goto out;
+ }
+- ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
++ ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
++ &has_port);
++ if (!has_port)
++ ret = -EINVAL;
+ if (ret < 0) {
+ pr_warn("bad dest parameter '%s'\n", p);
+ kfree(p);
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index 8996323ce8d9..34700eda0429 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -21,6 +21,7 @@
+
+ #include "psmouse.h"
+ #include "alps.h"
++#include "trackpoint.h"
+
+ /*
+ * Definitions for ALPS version 3 and 4 command mode protocol
+@@ -2861,6 +2862,23 @@ static const struct alps_protocol_info *alps_match_table(unsigned char *e7,
+ return NULL;
+ }
+
++static bool alps_is_cs19_trackpoint(struct psmouse *psmouse)
++{
++ u8 param[2] = { 0 };
++
++ if (ps2_command(&psmouse->ps2dev,
++ param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
++ return false;
++
++ /*
++ * param[0] contains the trackpoint device variant_id while
++ * param[1] contains the firmware_id. So far all alps
++ * trackpoint-only devices have their variant_ids equal
++ * TP_VARIANT_ALPS and their firmware_ids are in 0x20~0x2f range.
++ */
++ return param[0] == TP_VARIANT_ALPS && ((param[1] & 0xf0) == 0x20);
++}
++
+ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
+ {
+ const struct alps_protocol_info *protocol;
+@@ -3161,6 +3179,20 @@ int alps_detect(struct psmouse *psmouse, bool set_properties)
+ if (error)
+ return error;
+
++ /*
++ * ALPS cs19 is a trackpoint-only device, and uses different
++ * protocol than DualPoint ones, so we return -EINVAL here and let
++ * trackpoint.c drive this device. If the trackpoint driver is not
++ * enabled, the device will fall back to a bare PS/2 mouse.
++ * If ps2_command() fails here, we depend on the immediately
++ * followed psmouse_reset() to reset the device to normal state.
++ */
++ if (alps_is_cs19_trackpoint(psmouse)) {
++ psmouse_dbg(psmouse,
++ "ALPS CS19 trackpoint-only device detected, ignoring\n");
++ return -EINVAL;
++ }
++
+ /*
+ * Reset the device to make sure it is fully operational:
+ * on some laptops, like certain Dell Latitudes, we may
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 1080c0c49815..7f8f4780b511 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -176,6 +176,7 @@ static const char * const smbus_pnp_ids[] = {
+ "LEN0093", /* T480 */
+ "LEN0096", /* X280 */
+ "LEN0097", /* X280 -> ALPS trackpoint */
++ "LEN009b", /* T580 */
+ "LEN200f", /* T450s */
+ "LEN2054", /* E480 */
+ "LEN2055", /* E580 */
+diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
+index 4b8b9d7aa75e..35031228a6d0 100644
+--- a/drivers/input/tablet/gtco.c
++++ b/drivers/input/tablet/gtco.c
+@@ -78,6 +78,7 @@ Scott Hill shill@gtcocalcomp.com
+
+ /* Max size of a single report */
+ #define REPORT_MAX_SIZE 10
++#define MAX_COLLECTION_LEVELS 10
+
+
+ /* Bitmask whether pen is in range */
+@@ -223,8 +224,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
+ char maintype = 'x';
+ char globtype[12];
+ int indent = 0;
+- char indentstr[10] = "";
+-
++ char indentstr[MAX_COLLECTION_LEVELS + 1] = { 0 };
+
+ dev_dbg(ddev, "======>>>>>>PARSE<<<<<<======\n");
+
+@@ -350,6 +350,13 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
+ case TAG_MAIN_COL_START:
+ maintype = 'S';
+
++ if (indent == MAX_COLLECTION_LEVELS) {
++ dev_err(ddev, "Collection level %d would exceed limit of %d\n",
++ indent + 1,
++ MAX_COLLECTION_LEVELS);
++ break;
++ }
++
+ if (data == 0) {
+ dev_dbg(ddev, "======>>>>>> Physical\n");
+ strcpy(globtype, "Physical");
+@@ -369,8 +376,15 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
+ break;
+
+ case TAG_MAIN_COL_END:
+- dev_dbg(ddev, "<<<<<<======\n");
+ maintype = 'E';
++
++ if (indent == 0) {
++ dev_err(ddev, "Collection level already at zero\n");
++ break;
++ }
++
++ dev_dbg(ddev, "<<<<<<======\n");
++
+ indent--;
+ for (x = 0; x < indent; x++)
+ indentstr[x] = '-';
+diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
+index 4d5a694f02c2..0fee8f7957ec 100644
+--- a/drivers/iommu/arm-smmu-v3.c
++++ b/drivers/iommu/arm-smmu-v3.c
+@@ -1884,9 +1884,13 @@ static int arm_smmu_enable_ats(struct arm_smmu_master *master)
+
+ static void arm_smmu_disable_ats(struct arm_smmu_master *master)
+ {
++ struct arm_smmu_cmdq_ent cmd;
++
+ if (!master->ats_enabled || !dev_is_pci(master->dev))
+ return;
+
++ arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
++ arm_smmu_atc_inv_master(master, &cmd);
+ pci_disable_ats(to_pci_dev(master->dev));
+ master->ats_enabled = false;
+ }
+@@ -1906,7 +1910,6 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master)
+ master->domain = NULL;
+ arm_smmu_install_ste_for_dev(master);
+
+- /* Disabling ATS invalidates all ATC entries */
+ arm_smmu_disable_ats(master);
+ }
+
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 9f0a2844371c..30db41e9f15c 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -225,18 +225,21 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new,
+ pos = pos->next;
+ } else if ((start >= a) && (end <= b)) {
+ if (new->type == type)
+- goto done;
++ return 0;
+ else
+ pos = pos->next;
+ } else {
+ if (new->type == type) {
+ phys_addr_t new_start = min(a, start);
+ phys_addr_t new_end = max(b, end);
++ int ret;
+
+ list_del(&entry->list);
+ entry->start = new_start;
+ entry->length = new_end - new_start + 1;
+- iommu_insert_resv_region(entry, regions);
++ ret = iommu_insert_resv_region(entry, regions);
++ kfree(entry);
++ return ret;
+ } else {
+ pos = pos->next;
+ }
+@@ -249,7 +252,6 @@ insert:
+ return -ENOMEM;
+
+ list_add_tail(®ion->list, pos);
+-done:
+ return 0;
+ }
+
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 6377cb864f4c..f3e44d6d9255 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -461,8 +461,12 @@ static void gic_deactivate_unhandled(u32 irqnr)
+
+ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
+ {
++ bool irqs_enabled = interrupts_enabled(regs);
+ int err;
+
++ if (irqs_enabled)
++ nmi_enter();
++
+ if (static_branch_likely(&supports_deactivate_key))
+ gic_write_eoir(irqnr);
+ /*
+@@ -474,6 +478,9 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
+ err = handle_domain_nmi(gic_data.domain, irqnr, regs);
+ if (err)
+ gic_deactivate_unhandled(irqnr);
++
++ if (irqs_enabled)
++ nmi_exit();
+ }
+
+ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
+index 8eb92eb98f54..dcdc23b9dce6 100644
+--- a/drivers/irqchip/irq-meson-gpio.c
++++ b/drivers/irqchip/irq-meson-gpio.c
+@@ -60,6 +60,7 @@ static const struct of_device_id meson_irq_gpio_matches[] = {
+ { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
+ { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params },
+ { .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params },
++ { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
+ { }
+ };
+
+diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
+index 7d555b110ecd..a600934fdd9c 100644
+--- a/drivers/lightnvm/core.c
++++ b/drivers/lightnvm/core.c
+@@ -478,7 +478,7 @@ static void __nvm_remove_target(struct nvm_target *t, bool graceful)
+ */
+ static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
+ {
+- struct nvm_target *t;
++ struct nvm_target *t = NULL;
+ struct nvm_dev *dev;
+
+ down_read(&nvm_lock);
+diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
+index 773537804319..f546e6f28b8a 100644
+--- a/drivers/lightnvm/pblk-core.c
++++ b/drivers/lightnvm/pblk-core.c
+@@ -323,14 +323,16 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
+ void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
+ int nr_pages)
+ {
+- struct bio_vec bv;
+- int i;
+-
+- WARN_ON(off + nr_pages != bio->bi_vcnt);
+-
+- for (i = off; i < nr_pages + off; i++) {
+- bv = bio->bi_io_vec[i];
+- mempool_free(bv.bv_page, &pblk->page_bio_pool);
++ struct bio_vec *bv;
++ struct page *page;
++ int i, e, nbv = 0;
++
++ for (i = 0; i < bio->bi_vcnt; i++) {
++ bv = &bio->bi_io_vec[i];
++ page = bv->bv_page;
++ for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++)
++ if (nbv >= off)
++ mempool_free(page++, &pblk->page_bio_pool);
+ }
+ }
+
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index f8986effcb50..6f776823b9ba 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -393,6 +393,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
+ struct bucket *b;
+ long r;
+
++
++ /* No allocation if CACHE_SET_IO_DISABLE bit is set */
++ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
++ return -1;
++
+ /* fastpath */
+ if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
+ fifo_pop(&ca->free[reserve], r))
+@@ -484,6 +489,10 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
+ {
+ int i;
+
++ /* No allocation if CACHE_SET_IO_DISABLE bit is set */
++ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
++ return -1;
++
+ lockdep_assert_held(&c->bucket_lock);
+ BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index fdf75352e16a..e30a983a68cd 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -726,8 +726,6 @@ struct cache_set {
+
+ #define BUCKET_HASH_BITS 12
+ struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
+-
+- DECLARE_HEAP(struct btree *, flush_btree);
+ };
+
+ struct bbio {
+diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
+index c25097968319..4d93f07f63e5 100644
+--- a/drivers/md/bcache/io.c
++++ b/drivers/md/bcache/io.c
+@@ -58,6 +58,18 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
+
+ WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
+
++ /*
++ * Read-ahead requests on a degrading and recovering md raid
++ * (e.g. raid6) device might be failured immediately by md
++ * raid code, which is not a real hardware media failure. So
++ * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
++ */
++ if (bio->bi_opf & REQ_RAHEAD) {
++ pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore",
++ dc->backing_dev_name);
++ return;
++ }
++
+ errors = atomic_add_return(1, &dc->io_errors);
+ if (errors < dc->error_limit)
+ pr_err("%s: IO error on backing device, unrecoverable",
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index 12dae9348147..cae2aff5e27a 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -268,7 +268,7 @@ bsearch:
+ struct journal_replay,
+ list)->j.seq;
+
+- return ret;
++ return 0;
+ #undef read_bucket
+ }
+
+@@ -391,12 +391,6 @@ err:
+ }
+
+ /* Journalling */
+-#define journal_max_cmp(l, r) \
+- (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) < \
+- fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
+-#define journal_min_cmp(l, r) \
+- (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) > \
+- fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
+
+ static void btree_flush_write(struct cache_set *c)
+ {
+@@ -404,35 +398,25 @@ static void btree_flush_write(struct cache_set *c)
+ * Try to find the btree node with that references the oldest journal
+ * entry, best is our current candidate and is locked if non NULL:
+ */
+- struct btree *b;
+- int i;
++ struct btree *b, *best;
++ unsigned int i;
+
+ atomic_long_inc(&c->flush_write);
+-
+ retry:
+- spin_lock(&c->journal.lock);
+- if (heap_empty(&c->flush_btree)) {
+- for_each_cached_btree(b, c, i)
+- if (btree_current_write(b)->journal) {
+- if (!heap_full(&c->flush_btree))
+- heap_add(&c->flush_btree, b,
+- journal_max_cmp);
+- else if (journal_max_cmp(b,
+- heap_peek(&c->flush_btree))) {
+- c->flush_btree.data[0] = b;
+- heap_sift(&c->flush_btree, 0,
+- journal_max_cmp);
+- }
++ best = NULL;
++
++ for_each_cached_btree(b, c, i)
++ if (btree_current_write(b)->journal) {
++ if (!best)
++ best = b;
++ else if (journal_pin_cmp(c,
++ btree_current_write(best)->journal,
++ btree_current_write(b)->journal)) {
++ best = b;
+ }
++ }
+
+- for (i = c->flush_btree.used / 2 - 1; i >= 0; --i)
+- heap_sift(&c->flush_btree, i, journal_min_cmp);
+- }
+-
+- b = NULL;
+- heap_pop(&c->flush_btree, b, journal_min_cmp);
+- spin_unlock(&c->journal.lock);
+-
++ b = best;
+ if (b) {
+ mutex_lock(&b->write_lock);
+ if (!btree_current_write(b)->journal) {
+@@ -811,6 +795,10 @@ atomic_t *bch_journal(struct cache_set *c,
+ struct journal_write *w;
+ atomic_t *ret;
+
++ /* No journaling if CACHE_SET_IO_DISABLE set already */
++ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
++ return NULL;
++
+ if (!CACHE_SYNC(&c->sb))
+ return NULL;
+
+@@ -855,7 +843,6 @@ void bch_journal_free(struct cache_set *c)
+ free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
+ free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
+ free_fifo(&c->journal.pin);
+- free_heap(&c->flush_btree);
+ }
+
+ int bch_journal_alloc(struct cache_set *c)
+@@ -870,8 +857,7 @@ int bch_journal_alloc(struct cache_set *c)
+ j->w[0].c = c;
+ j->w[1].c = c;
+
+- if (!(init_heap(&c->flush_btree, 128, GFP_KERNEL)) ||
+- !(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
++ if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
+ !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
+ !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
+ return -ENOMEM;
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 1b63ac876169..6daf777105fb 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -40,6 +40,7 @@ static const char invalid_uuid[] = {
+
+ static struct kobject *bcache_kobj;
+ struct mutex bch_register_lock;
++bool bcache_is_reboot;
+ LIST_HEAD(bch_cache_sets);
+ static LIST_HEAD(uncached_devices);
+
+@@ -49,6 +50,7 @@ static wait_queue_head_t unregister_wait;
+ struct workqueue_struct *bcache_wq;
+ struct workqueue_struct *bch_journal_wq;
+
++
+ #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
+ /* limitation of partitions number on single bcache device */
+ #define BCACHE_MINORS 128
+@@ -1190,18 +1192,16 @@ static void cached_dev_free(struct closure *cl)
+ {
+ struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+
+- mutex_lock(&bch_register_lock);
+-
+ if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
+ cancel_writeback_rate_update_dwork(dc);
+
+ if (!IS_ERR_OR_NULL(dc->writeback_thread))
+ kthread_stop(dc->writeback_thread);
+- if (dc->writeback_write_wq)
+- destroy_workqueue(dc->writeback_write_wq);
+ if (!IS_ERR_OR_NULL(dc->status_update_thread))
+ kthread_stop(dc->status_update_thread);
+
++ mutex_lock(&bch_register_lock);
++
+ if (atomic_read(&dc->running))
+ bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
+ bcache_device_free(&dc->disk);
+@@ -1437,8 +1437,6 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
+
+ bool bch_cached_dev_error(struct cached_dev *dc)
+ {
+- struct cache_set *c;
+-
+ if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
+ return false;
+
+@@ -1449,21 +1447,6 @@ bool bch_cached_dev_error(struct cached_dev *dc)
+ pr_err("stop %s: too many IO errors on backing device %s\n",
+ dc->disk.disk->disk_name, dc->backing_dev_name);
+
+- /*
+- * If the cached device is still attached to a cache set,
+- * even dc->io_disable is true and no more I/O requests
+- * accepted, cache device internal I/O (writeback scan or
+- * garbage collection) may still prevent bcache device from
+- * being stopped. So here CACHE_SET_IO_DISABLE should be
+- * set to c->flags too, to make the internal I/O to cache
+- * device rejected and stopped immediately.
+- * If c is NULL, that means the bcache device is not attached
+- * to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
+- */
+- c = dc->disk.c;
+- if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
+- pr_info("CACHE_SET_IO_DISABLE already set");
+-
+ bcache_device_stop(&dc->disk);
+ return true;
+ }
+@@ -1564,7 +1547,7 @@ static void cache_set_flush(struct closure *cl)
+ kobject_put(&c->internal);
+ kobject_del(&c->kobj);
+
+- if (c->gc_thread)
++ if (!IS_ERR_OR_NULL(c->gc_thread))
+ kthread_stop(c->gc_thread);
+
+ if (!IS_ERR_OR_NULL(c->root))
+@@ -2301,6 +2284,11 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ if (!try_module_get(THIS_MODULE))
+ return -EBUSY;
+
++ /* For latest state of bcache_is_reboot */
++ smp_mb();
++ if (bcache_is_reboot)
++ return -EBUSY;
++
+ path = kstrndup(buffer, size, GFP_KERNEL);
+ if (!path)
+ goto err;
+@@ -2380,6 +2368,9 @@ err:
+
+ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
+ {
++ if (bcache_is_reboot)
++ return NOTIFY_DONE;
++
+ if (code == SYS_DOWN ||
+ code == SYS_HALT ||
+ code == SYS_POWER_OFF) {
+@@ -2392,19 +2383,45 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
+
+ mutex_lock(&bch_register_lock);
+
++ if (bcache_is_reboot)
++ goto out;
++
++ /* New registration is rejected since now */
++ bcache_is_reboot = true;
++ /*
++ * Make registering caller (if there is) on other CPU
++ * core know bcache_is_reboot set to true earlier
++ */
++ smp_mb();
++
+ if (list_empty(&bch_cache_sets) &&
+ list_empty(&uncached_devices))
+ goto out;
+
++ mutex_unlock(&bch_register_lock);
++
+ pr_info("Stopping all devices:");
+
++ /*
++ * The reason bch_register_lock is not held to call
++ * bch_cache_set_stop() and bcache_device_stop() is to
++ * avoid potential deadlock during reboot, because cache
++ * set or bcache device stopping process will acqurie
++ * bch_register_lock too.
++ *
++ * We are safe here because bcache_is_reboot sets to
++ * true already, register_bcache() will reject new
++ * registration now. bcache_is_reboot also makes sure
++ * bcache_reboot() won't be re-entered on by other thread,
++ * so there is no race in following list iteration by
++ * list_for_each_entry_safe().
++ */
+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
+ bch_cache_set_stop(c);
+
+ list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
+ bcache_device_stop(&dc->disk);
+
+- mutex_unlock(&bch_register_lock);
+
+ /*
+ * Give an early chance for other kthreads and
+@@ -2531,6 +2548,8 @@ static int __init bcache_init(void)
+ bch_debug_init();
+ closure_debug_init();
+
++ bcache_is_reboot = false;
++
+ return 0;
+ err:
+ bcache_exit();
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index bfb437ffb13c..54cd1727d20c 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -16,6 +16,8 @@
+ #include <linux/sort.h>
+ #include <linux/sched/clock.h>
+
++extern bool bcache_is_reboot;
++
+ /* Default is 0 ("writethrough") */
+ static const char * const bch_cache_modes[] = {
+ "writethrough",
+@@ -180,7 +182,7 @@ SHOW(__bch_cached_dev)
+ var_print(writeback_percent);
+ sysfs_hprint(writeback_rate,
+ wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
+- sysfs_hprint(io_errors, atomic_read(&dc->io_errors));
++ sysfs_printf(io_errors, "%i", atomic_read(&dc->io_errors));
+ sysfs_printf(io_error_limit, "%i", dc->error_limit);
+ sysfs_printf(io_disable, "%i", dc->io_disable);
+ var_print(writeback_rate_update_seconds);
+@@ -271,6 +273,10 @@ STORE(__cached_dev)
+ struct cache_set *c;
+ struct kobj_uevent_env *env;
+
++ /* no user space access if system is rebooting */
++ if (bcache_is_reboot)
++ return -EBUSY;
++
+ #define d_strtoul(var) sysfs_strtoul(var, dc->var)
+ #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
+ #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
+@@ -408,6 +414,10 @@ STORE(bch_cached_dev)
+ struct cached_dev *dc = container_of(kobj, struct cached_dev,
+ disk.kobj);
+
++ /* no user space access if system is rebooting */
++ if (bcache_is_reboot)
++ return -EBUSY;
++
+ mutex_lock(&bch_register_lock);
+ size = __cached_dev_store(kobj, attr, buf, size);
+
+@@ -464,7 +474,7 @@ static struct attribute *bch_cached_dev_files[] = {
+ &sysfs_writeback_rate_p_term_inverse,
+ &sysfs_writeback_rate_minimum,
+ &sysfs_writeback_rate_debug,
+- &sysfs_errors,
++ &sysfs_io_errors,
+ &sysfs_io_error_limit,
+ &sysfs_io_disable,
+ &sysfs_dirty_data,
+@@ -511,6 +521,10 @@ STORE(__bch_flash_dev)
+ kobj);
+ struct uuid_entry *u = &d->c->uuids[d->id];
+
++ /* no user space access if system is rebooting */
++ if (bcache_is_reboot)
++ return -EBUSY;
++
+ sysfs_strtoul(data_csum, d->data_csum);
+
+ if (attr == &sysfs_size) {
+@@ -746,6 +760,10 @@ STORE(__bch_cache_set)
+ struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+ ssize_t v;
+
++ /* no user space access if system is rebooting */
++ if (bcache_is_reboot)
++ return -EBUSY;
++
+ if (attr == &sysfs_unregister)
+ bch_cache_set_unregister(c);
+
+@@ -865,6 +883,10 @@ STORE(bch_cache_set_internal)
+ {
+ struct cache_set *c = container_of(kobj, struct cache_set, internal);
+
++ /* no user space access if system is rebooting */
++ if (bcache_is_reboot)
++ return -EBUSY;
++
+ return bch_cache_set_store(&c->kobj, attr, buf, size);
+ }
+
+@@ -1050,6 +1072,10 @@ STORE(__bch_cache)
+ struct cache *ca = container_of(kobj, struct cache, kobj);
+ ssize_t v;
+
++ /* no user space access if system is rebooting */
++ if (bcache_is_reboot)
++ return -EBUSY;
++
+ if (attr == &sysfs_discard) {
+ bool v = strtoul_or_return(buf);
+
+diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
+index 1fbced94e4cc..c029f7443190 100644
+--- a/drivers/md/bcache/util.h
++++ b/drivers/md/bcache/util.h
+@@ -113,8 +113,6 @@ do { \
+
+ #define heap_full(h) ((h)->used == (h)->size)
+
+-#define heap_empty(h) ((h)->used == 0)
+-
+ #define DECLARE_FIFO(type, name) \
+ struct { \
+ size_t front, back, size, mask; \
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 73f0efac2b9f..e9ffcea1ca50 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -735,6 +735,10 @@ static int bch_writeback_thread(void *arg)
+ }
+ }
+
++ if (dc->writeback_write_wq) {
++ flush_workqueue(dc->writeback_write_wq);
++ destroy_workqueue(dc->writeback_write_wq);
++ }
+ cached_dev_put(dc);
+ wait_for_kthread_stop();
+
+@@ -830,6 +834,7 @@ int bch_cached_dev_writeback_start(struct cached_dev *dc)
+ "bcache_writeback");
+ if (IS_ERR(dc->writeback_thread)) {
+ cached_dev_put(dc);
++ destroy_workqueue(dc->writeback_write_wq);
+ return PTR_ERR(dc->writeback_thread);
+ }
+ dc->writeback_running = true;
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 2a48ea3f1b30..b6b5acc92ca2 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1599,9 +1599,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+ unsigned long freed;
+
+ c = container_of(shrink, struct dm_bufio_client, shrinker);
+- if (sc->gfp_mask & __GFP_FS)
+- dm_bufio_lock(c);
+- else if (!dm_bufio_trylock(c))
++ if (!dm_bufio_trylock(c))
+ return SHRINK_STOP;
+
+ freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 7f0840601737..4c68a7b93d5e 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -2046,16 +2046,19 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
+
+ int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
+ {
+- int r;
++ int r = -EINVAL;
+ struct dm_block *sblock;
+ struct thin_disk_superblock *disk_super;
+
+ pmd_write_lock(pmd);
++ if (pmd->fail_io)
++ goto out;
++
+ pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
+
+ r = superblock_lock(pmd, &sblock);
+ if (r) {
+- DMERR("couldn't read superblock");
++ DMERR("couldn't lock superblock");
+ goto out;
+ }
+
+diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
+index d8334cd45d7c..4cdde7a02e94 100644
+--- a/drivers/md/dm-zoned-metadata.c
++++ b/drivers/md/dm-zoned-metadata.c
+@@ -1593,30 +1593,6 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
+ return zone;
+ }
+
+-/*
+- * Activate a zone (increment its reference count).
+- */
+-void dmz_activate_zone(struct dm_zone *zone)
+-{
+- set_bit(DMZ_ACTIVE, &zone->flags);
+- atomic_inc(&zone->refcount);
+-}
+-
+-/*
+- * Deactivate a zone. This decrement the zone reference counter
+- * and clears the active state of the zone once the count reaches 0,
+- * indicating that all BIOs to the zone have completed. Returns
+- * true if the zone was deactivated.
+- */
+-void dmz_deactivate_zone(struct dm_zone *zone)
+-{
+- if (atomic_dec_and_test(&zone->refcount)) {
+- WARN_ON(!test_bit(DMZ_ACTIVE, &zone->flags));
+- clear_bit_unlock(DMZ_ACTIVE, &zone->flags);
+- smp_mb__after_atomic();
+- }
+-}
+-
+ /*
+ * Get the zone mapping a chunk, if the chunk is mapped already.
+ * If no mapping exist and the operation is WRITE, a zone is
+diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
+index 12419f0bfe78..ed8de49c9a08 100644
+--- a/drivers/md/dm-zoned.h
++++ b/drivers/md/dm-zoned.h
+@@ -115,7 +115,6 @@ enum {
+ DMZ_BUF,
+
+ /* Zone internal state */
+- DMZ_ACTIVE,
+ DMZ_RECLAIM,
+ DMZ_SEQ_WRITE_ERR,
+ };
+@@ -128,7 +127,6 @@ enum {
+ #define dmz_is_empty(z) ((z)->wp_block == 0)
+ #define dmz_is_offline(z) test_bit(DMZ_OFFLINE, &(z)->flags)
+ #define dmz_is_readonly(z) test_bit(DMZ_READ_ONLY, &(z)->flags)
+-#define dmz_is_active(z) test_bit(DMZ_ACTIVE, &(z)->flags)
+ #define dmz_in_reclaim(z) test_bit(DMZ_RECLAIM, &(z)->flags)
+ #define dmz_seq_write_err(z) test_bit(DMZ_SEQ_WRITE_ERR, &(z)->flags)
+
+@@ -188,8 +186,30 @@ void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
+ unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd);
+ unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);
+
+-void dmz_activate_zone(struct dm_zone *zone);
+-void dmz_deactivate_zone(struct dm_zone *zone);
++/*
++ * Activate a zone (increment its reference count).
++ */
++static inline void dmz_activate_zone(struct dm_zone *zone)
++{
++ atomic_inc(&zone->refcount);
++}
++
++/*
++ * Deactivate a zone. This decrement the zone reference counter
++ * indicating that all BIOs to the zone have completed when the count is 0.
++ */
++static inline void dmz_deactivate_zone(struct dm_zone *zone)
++{
++ atomic_dec(&zone->refcount);
++}
++
++/*
++ * Test if a zone is active, that is, has a refcount > 0.
++ */
++static inline bool dmz_is_active(struct dm_zone *zone)
++{
++ return atomic_read(&zone->refcount);
++}
+
+ int dmz_lock_zone_reclaim(struct dm_zone *zone);
+ void dmz_unlock_zone_reclaim(struct dm_zone *zone);
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index b83bce2beb66..da94cbaa1a9e 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -7672,7 +7672,7 @@ abort:
+ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
+ {
+ struct r5conf *conf = mddev->private;
+- int err = -EEXIST;
++ int ret, err = -EEXIST;
+ int disk;
+ struct disk_info *p;
+ int first = 0;
+@@ -7687,7 +7687,14 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
+ * The array is in readonly mode if journal is missing, so no
+ * write requests running. We should be safe
+ */
+- log_init(conf, rdev, false);
++ ret = log_init(conf, rdev, false);
++ if (ret)
++ return ret;
++
++ ret = r5l_start(conf->log);
++ if (ret)
++ return ret;
++
+ return 0;
+ }
+ if (mddev->recovery_disabled == conf->recovery_disabled)
+diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
+index 3cf25abf5807..cfccee87909a 100644
+--- a/drivers/media/common/videobuf2/videobuf2-core.c
++++ b/drivers/media/common/videobuf2/videobuf2-core.c
+@@ -207,6 +207,10 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
+
++ /* Did it wrap around? */
++ if (size < vb->planes[plane].length)
++ goto free;
++
+ mem_priv = call_ptr_memop(vb, alloc,
+ q->alloc_devs[plane] ? : q->dev,
+ q->dma_attrs, size, q->dma_dir, q->gfp_flags);
+diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+index 4a4c49d6085c..0f06f08346ba 100644
+--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
++++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+@@ -59,7 +59,7 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
+ gfp_t gfp_flags)
+ {
+ unsigned int last_page = 0;
+- int size = buf->size;
++ unsigned long size = buf->size;
+
+ while (size > 0) {
+ struct page *pages;
+diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
+index f7c3e6be8e4d..2483f614d0e7 100644
+--- a/drivers/media/dvb-frontends/tua6100.c
++++ b/drivers/media/dvb-frontends/tua6100.c
+@@ -67,8 +67,8 @@ static int tua6100_set_params(struct dvb_frontend *fe)
+ struct i2c_msg msg1 = { .addr = priv->i2c_address, .flags = 0, .buf = reg1, .len = 4 };
+ struct i2c_msg msg2 = { .addr = priv->i2c_address, .flags = 0, .buf = reg2, .len = 3 };
+
+-#define _R 4
+-#define _P 32
++#define _R_VAL 4
++#define _P_VAL 32
+ #define _ri 4000000
+
+ // setup register 0
+@@ -83,14 +83,14 @@ static int tua6100_set_params(struct dvb_frontend *fe)
+ else
+ reg1[1] = 0x0c;
+
+- if (_P == 64)
++ if (_P_VAL == 64)
+ reg1[1] |= 0x40;
+ if (c->frequency >= 1525000)
+ reg1[1] |= 0x80;
+
+ // register 2
+- reg2[1] = (_R >> 8) & 0x03;
+- reg2[2] = _R;
++ reg2[1] = (_R_VAL >> 8) & 0x03;
++ reg2[2] = _R_VAL;
+ if (c->frequency < 1455000)
+ reg2[1] |= 0x1c;
+ else if (c->frequency < 1630000)
+@@ -102,18 +102,18 @@ static int tua6100_set_params(struct dvb_frontend *fe)
+ * The N divisor ratio (note: c->frequency is in kHz, but we
+ * need it in Hz)
+ */
+- prediv = (c->frequency * _R) / (_ri / 1000);
+- div = prediv / _P;
++ prediv = (c->frequency * _R_VAL) / (_ri / 1000);
++ div = prediv / _P_VAL;
+ reg1[1] |= (div >> 9) & 0x03;
+ reg1[2] = div >> 1;
+ reg1[3] = (div << 7);
+- priv->frequency = ((div * _P) * (_ri / 1000)) / _R;
++ priv->frequency = ((div * _P_VAL) * (_ri / 1000)) / _R_VAL;
+
+ // Finally, calculate and store the value for A
+- reg1[3] |= (prediv - (div*_P)) & 0x7f;
++ reg1[3] |= (prediv - (div*_P_VAL)) & 0x7f;
+
+-#undef _R
+-#undef _P
++#undef _R_VAL
++#undef _P_VAL
+ #undef _ri
+
+ if (fe->ops.i2c_gate_ctrl)
+diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
+index d8ad9dad495d..fd4ea86dedd5 100644
+--- a/drivers/media/i2c/Makefile
++++ b/drivers/media/i2c/Makefile
+@@ -35,7 +35,7 @@ obj-$(CONFIG_VIDEO_ADV748X) += adv748x/
+ obj-$(CONFIG_VIDEO_ADV7604) += adv7604.o
+ obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o
+ obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o
+-obj-$(CONFIG_VIDEO_ADV7511) += adv7511.o
++obj-$(CONFIG_VIDEO_ADV7511) += adv7511-v4l2.o
+ obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
+ obj-$(CONFIG_VIDEO_VS6624) += vs6624.o
+ obj-$(CONFIG_VIDEO_BT819) += bt819.o
+diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
+new file mode 100644
+index 000000000000..2ad6bdf1a9fc
+--- /dev/null
++++ b/drivers/media/i2c/adv7511-v4l2.c
+@@ -0,0 +1,1997 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Analog Devices ADV7511 HDMI Transmitter Device Driver
++ *
++ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
++ */
++
++/*
++ * This file is named adv7511-v4l2.c so it doesn't conflict with the Analog
++ * Device ADV7511 (config fragment CONFIG_DRM_I2C_ADV7511).
++ */
++
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/i2c.h>
++#include <linux/delay.h>
++#include <linux/videodev2.h>
++#include <linux/gpio.h>
++#include <linux/workqueue.h>
++#include <linux/hdmi.h>
++#include <linux/v4l2-dv-timings.h>
++#include <media/v4l2-device.h>
++#include <media/v4l2-common.h>
++#include <media/v4l2-ctrls.h>
++#include <media/v4l2-dv-timings.h>
++#include <media/i2c/adv7511.h>
++#include <media/cec.h>
++
++static int debug;
++module_param(debug, int, 0644);
++MODULE_PARM_DESC(debug, "debug level (0-2)");
++
++MODULE_DESCRIPTION("Analog Devices ADV7511 HDMI Transmitter Device Driver");
++MODULE_AUTHOR("Hans Verkuil");
++MODULE_LICENSE("GPL v2");
++
++#define MASK_ADV7511_EDID_RDY_INT 0x04
++#define MASK_ADV7511_MSEN_INT 0x40
++#define MASK_ADV7511_HPD_INT 0x80
++
++#define MASK_ADV7511_HPD_DETECT 0x40
++#define MASK_ADV7511_MSEN_DETECT 0x20
++#define MASK_ADV7511_EDID_RDY 0x10
++
++#define EDID_MAX_RETRIES (8)
++#define EDID_DELAY 250
++#define EDID_MAX_SEGM 8
++
++#define ADV7511_MAX_WIDTH 1920
++#define ADV7511_MAX_HEIGHT 1200
++#define ADV7511_MIN_PIXELCLOCK 20000000
++#define ADV7511_MAX_PIXELCLOCK 225000000
++
++#define ADV7511_MAX_ADDRS (3)
++
++/*
++**********************************************************************
++*
++* Arrays with configuration parameters for the ADV7511
++*
++**********************************************************************
++*/
++
++struct i2c_reg_value {
++ unsigned char reg;
++ unsigned char value;
++};
++
++struct adv7511_state_edid {
++ /* total number of blocks */
++ u32 blocks;
++ /* Number of segments read */
++ u32 segments;
++ u8 data[EDID_MAX_SEGM * 256];
++ /* Number of EDID read retries left */
++ unsigned read_retries;
++ bool complete;
++};
++
++struct adv7511_state {
++ struct adv7511_platform_data pdata;
++ struct v4l2_subdev sd;
++ struct media_pad pad;
++ struct v4l2_ctrl_handler hdl;
++ int chip_revision;
++ u8 i2c_edid_addr;
++ u8 i2c_pktmem_addr;
++ u8 i2c_cec_addr;
++
++ struct i2c_client *i2c_cec;
++ struct cec_adapter *cec_adap;
++ u8 cec_addr[ADV7511_MAX_ADDRS];
++ u8 cec_valid_addrs;
++ bool cec_enabled_adap;
++
++ /* Is the adv7511 powered on? */
++ bool power_on;
++ /* Did we receive hotplug and rx-sense signals? */
++ bool have_monitor;
++ bool enabled_irq;
++ /* timings from s_dv_timings */
++ struct v4l2_dv_timings dv_timings;
++ u32 fmt_code;
++ u32 colorspace;
++ u32 ycbcr_enc;
++ u32 quantization;
++ u32 xfer_func;
++ u32 content_type;
++ /* controls */
++ struct v4l2_ctrl *hdmi_mode_ctrl;
++ struct v4l2_ctrl *hotplug_ctrl;
++ struct v4l2_ctrl *rx_sense_ctrl;
++ struct v4l2_ctrl *have_edid0_ctrl;
++ struct v4l2_ctrl *rgb_quantization_range_ctrl;
++ struct v4l2_ctrl *content_type_ctrl;
++ struct i2c_client *i2c_edid;
++ struct i2c_client *i2c_pktmem;
++ struct adv7511_state_edid edid;
++ /* Running counter of the number of detected EDIDs (for debugging) */
++ unsigned edid_detect_counter;
++ struct workqueue_struct *work_queue;
++ struct delayed_work edid_handler; /* work entry */
++};
++
++static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd);
++static bool adv7511_check_edid_status(struct v4l2_subdev *sd);
++static void adv7511_setup(struct v4l2_subdev *sd);
++static int adv7511_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq);
++static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
++
++
++static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
++ .type = V4L2_DV_BT_656_1120,
++ /* keep this initialization for compatibility with GCC < 4.4.6 */
++ .reserved = { 0 },
++ V4L2_INIT_BT_TIMINGS(640, ADV7511_MAX_WIDTH, 350, ADV7511_MAX_HEIGHT,
++ ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
++ V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
++ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
++ V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
++ V4L2_DV_BT_CAP_CUSTOM)
++};
++
++static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd)
++{
++ return container_of(sd, struct adv7511_state, sd);
++}
++
++static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
++{
++ return &container_of(ctrl->handler, struct adv7511_state, hdl)->sd;
++}
++
++/* ------------------------ I2C ----------------------------------------------- */
++
++static s32 adv_smbus_read_byte_data_check(struct i2c_client *client,
++ u8 command, bool check)
++{
++ union i2c_smbus_data data;
++
++ if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
++ I2C_SMBUS_READ, command,
++ I2C_SMBUS_BYTE_DATA, &data))
++ return data.byte;
++ if (check)
++ v4l_err(client, "error reading %02x, %02x\n",
++ client->addr, command);
++ return -1;
++}
++
++static s32 adv_smbus_read_byte_data(struct i2c_client *client, u8 command)
++{
++ int i;
++ for (i = 0; i < 3; i++) {
++ int ret = adv_smbus_read_byte_data_check(client, command, true);
++ if (ret >= 0) {
++ if (i)
++ v4l_err(client, "read ok after %d retries\n", i);
++ return ret;
++ }
++ }
++ v4l_err(client, "read failed\n");
++ return -1;
++}
++
++static int adv7511_rd(struct v4l2_subdev *sd, u8 reg)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++
++ return adv_smbus_read_byte_data(client, reg);
++}
++
++static int adv7511_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ int ret;
++ int i;
++
++ for (i = 0; i < 3; i++) {
++ ret = i2c_smbus_write_byte_data(client, reg, val);
++ if (ret == 0)
++ return 0;
++ }
++ v4l2_err(sd, "%s: i2c write error\n", __func__);
++ return ret;
++}
++
++/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
++ and then the value-mask (to be OR-ed). */
++static inline void adv7511_wr_and_or(struct v4l2_subdev *sd, u8 reg, u8 clr_mask, u8 val_mask)
++{
++ adv7511_wr(sd, reg, (adv7511_rd(sd, reg) & clr_mask) | val_mask);
++}
++
++static int adv_smbus_read_i2c_block_data(struct i2c_client *client,
++ u8 command, unsigned length, u8 *values)
++{
++ union i2c_smbus_data data;
++ int ret;
++
++ if (length > I2C_SMBUS_BLOCK_MAX)
++ length = I2C_SMBUS_BLOCK_MAX;
++ data.block[0] = length;
++
++ ret = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
++ I2C_SMBUS_READ, command,
++ I2C_SMBUS_I2C_BLOCK_DATA, &data);
++ memcpy(values, data.block + 1, length);
++ return ret;
++}
++
++static void adv7511_edid_rd(struct v4l2_subdev *sd, uint16_t len, uint8_t *buf)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ int i;
++ int err = 0;
++
++ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
++
++ for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
++ err = adv_smbus_read_i2c_block_data(state->i2c_edid, i,
++ I2C_SMBUS_BLOCK_MAX, buf + i);
++ if (err)
++ v4l2_err(sd, "%s: i2c read error\n", __func__);
++}
++
++static inline int adv7511_cec_read(struct v4l2_subdev *sd, u8 reg)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ return i2c_smbus_read_byte_data(state->i2c_cec, reg);
++}
++
++static int adv7511_cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ int ret;
++ int i;
++
++ for (i = 0; i < 3; i++) {
++ ret = i2c_smbus_write_byte_data(state->i2c_cec, reg, val);
++ if (ret == 0)
++ return 0;
++ }
++ v4l2_err(sd, "%s: I2C Write Problem\n", __func__);
++ return ret;
++}
++
++static inline int adv7511_cec_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask,
++ u8 val)
++{
++ return adv7511_cec_write(sd, reg, (adv7511_cec_read(sd, reg) & mask) | val);
++}
++
++static int adv7511_pktmem_rd(struct v4l2_subdev *sd, u8 reg)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ return adv_smbus_read_byte_data(state->i2c_pktmem, reg);
++}
++
++static int adv7511_pktmem_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ int ret;
++ int i;
++
++ for (i = 0; i < 3; i++) {
++ ret = i2c_smbus_write_byte_data(state->i2c_pktmem, reg, val);
++ if (ret == 0)
++ return 0;
++ }
++ v4l2_err(sd, "%s: i2c write error\n", __func__);
++ return ret;
++}
++
++/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
++ and then the value-mask (to be OR-ed). */
++static inline void adv7511_pktmem_wr_and_or(struct v4l2_subdev *sd, u8 reg, u8 clr_mask, u8 val_mask)
++{
++ adv7511_pktmem_wr(sd, reg, (adv7511_pktmem_rd(sd, reg) & clr_mask) | val_mask);
++}
++
++static inline bool adv7511_have_hotplug(struct v4l2_subdev *sd)
++{
++ return adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT;
++}
++
++static inline bool adv7511_have_rx_sense(struct v4l2_subdev *sd)
++{
++ return adv7511_rd(sd, 0x42) & MASK_ADV7511_MSEN_DETECT;
++}
++
++static void adv7511_csc_conversion_mode(struct v4l2_subdev *sd, u8 mode)
++{
++ adv7511_wr_and_or(sd, 0x18, 0x9f, (mode & 0x3)<<5);
++}
++
++static void adv7511_csc_coeff(struct v4l2_subdev *sd,
++ u16 A1, u16 A2, u16 A3, u16 A4,
++ u16 B1, u16 B2, u16 B3, u16 B4,
++ u16 C1, u16 C2, u16 C3, u16 C4)
++{
++ /* A */
++ adv7511_wr_and_or(sd, 0x18, 0xe0, A1>>8);
++ adv7511_wr(sd, 0x19, A1);
++ adv7511_wr_and_or(sd, 0x1A, 0xe0, A2>>8);
++ adv7511_wr(sd, 0x1B, A2);
++ adv7511_wr_and_or(sd, 0x1c, 0xe0, A3>>8);
++ adv7511_wr(sd, 0x1d, A3);
++ adv7511_wr_and_or(sd, 0x1e, 0xe0, A4>>8);
++ adv7511_wr(sd, 0x1f, A4);
++
++ /* B */
++ adv7511_wr_and_or(sd, 0x20, 0xe0, B1>>8);
++ adv7511_wr(sd, 0x21, B1);
++ adv7511_wr_and_or(sd, 0x22, 0xe0, B2>>8);
++ adv7511_wr(sd, 0x23, B2);
++ adv7511_wr_and_or(sd, 0x24, 0xe0, B3>>8);
++ adv7511_wr(sd, 0x25, B3);
++ adv7511_wr_and_or(sd, 0x26, 0xe0, B4>>8);
++ adv7511_wr(sd, 0x27, B4);
++
++ /* C */
++ adv7511_wr_and_or(sd, 0x28, 0xe0, C1>>8);
++ adv7511_wr(sd, 0x29, C1);
++ adv7511_wr_and_or(sd, 0x2A, 0xe0, C2>>8);
++ adv7511_wr(sd, 0x2B, C2);
++ adv7511_wr_and_or(sd, 0x2C, 0xe0, C3>>8);
++ adv7511_wr(sd, 0x2D, C3);
++ adv7511_wr_and_or(sd, 0x2E, 0xe0, C4>>8);
++ adv7511_wr(sd, 0x2F, C4);
++}
++
++static void adv7511_csc_rgb_full2limit(struct v4l2_subdev *sd, bool enable)
++{
++ if (enable) {
++ u8 csc_mode = 0;
++ adv7511_csc_conversion_mode(sd, csc_mode);
++ adv7511_csc_coeff(sd,
++ 4096-564, 0, 0, 256,
++ 0, 4096-564, 0, 256,
++ 0, 0, 4096-564, 256);
++ /* enable CSC */
++ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x80);
++ /* AVI infoframe: Limited range RGB (16-235) */
++ adv7511_wr_and_or(sd, 0x57, 0xf3, 0x04);
++ } else {
++ /* disable CSC */
++ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
++ /* AVI infoframe: Full range RGB (0-255) */
++ adv7511_wr_and_or(sd, 0x57, 0xf3, 0x08);
++ }
++}
++
++static void adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l2_ctrl *ctrl)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ /* Only makes sense for RGB formats */
++ if (state->fmt_code != MEDIA_BUS_FMT_RGB888_1X24) {
++ /* so just keep quantization */
++ adv7511_csc_rgb_full2limit(sd, false);
++ return;
++ }
++
++ switch (ctrl->val) {
++ case V4L2_DV_RGB_RANGE_AUTO:
++ /* automatic */
++ if (state->dv_timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
++ /* CE format, RGB limited range (16-235) */
++ adv7511_csc_rgb_full2limit(sd, true);
++ } else {
++ /* not CE format, RGB full range (0-255) */
++ adv7511_csc_rgb_full2limit(sd, false);
++ }
++ break;
++ case V4L2_DV_RGB_RANGE_LIMITED:
++ /* RGB limited range (16-235) */
++ adv7511_csc_rgb_full2limit(sd, true);
++ break;
++ case V4L2_DV_RGB_RANGE_FULL:
++ /* RGB full range (0-255) */
++ adv7511_csc_rgb_full2limit(sd, false);
++ break;
++ }
++}
++
++/* ------------------------------ CTRL OPS ------------------------------ */
++
++static int adv7511_s_ctrl(struct v4l2_ctrl *ctrl)
++{
++ struct v4l2_subdev *sd = to_sd(ctrl);
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ v4l2_dbg(1, debug, sd, "%s: ctrl id: %d, ctrl->val %d\n", __func__, ctrl->id, ctrl->val);
++
++ if (state->hdmi_mode_ctrl == ctrl) {
++ /* Set HDMI or DVI-D */
++ adv7511_wr_and_or(sd, 0xaf, 0xfd, ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00);
++ return 0;
++ }
++ if (state->rgb_quantization_range_ctrl == ctrl) {
++ adv7511_set_rgb_quantization_mode(sd, ctrl);
++ return 0;
++ }
++ if (state->content_type_ctrl == ctrl) {
++ u8 itc, cn;
++
++ state->content_type = ctrl->val;
++ itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
++ cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
++ adv7511_wr_and_or(sd, 0x57, 0x7f, itc << 7);
++ adv7511_wr_and_or(sd, 0x59, 0xcf, cn << 4);
++ return 0;
++ }
++
++ return -EINVAL;
++}
++
++static const struct v4l2_ctrl_ops adv7511_ctrl_ops = {
++ .s_ctrl = adv7511_s_ctrl,
++};
++
++/* ---------------------------- CORE OPS ------------------------------------------- */
++
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++static void adv7511_inv_register(struct v4l2_subdev *sd)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ v4l2_info(sd, "0x000-0x0ff: Main Map\n");
++ if (state->i2c_cec)
++ v4l2_info(sd, "0x100-0x1ff: CEC Map\n");
++}
++
++static int adv7511_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ reg->size = 1;
++ switch (reg->reg >> 8) {
++ case 0:
++ reg->val = adv7511_rd(sd, reg->reg & 0xff);
++ break;
++ case 1:
++ if (state->i2c_cec) {
++ reg->val = adv7511_cec_read(sd, reg->reg & 0xff);
++ break;
++ }
++ /* fall through */
++ default:
++ v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
++ adv7511_inv_register(sd);
++ break;
++ }
++ return 0;
++}
++
++static int adv7511_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ switch (reg->reg >> 8) {
++ case 0:
++ adv7511_wr(sd, reg->reg & 0xff, reg->val & 0xff);
++ break;
++ case 1:
++ if (state->i2c_cec) {
++ adv7511_cec_write(sd, reg->reg & 0xff, reg->val & 0xff);
++ break;
++ }
++ /* fall through */
++ default:
++ v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
++ adv7511_inv_register(sd);
++ break;
++ }
++ return 0;
++}
++#endif
++
++struct adv7511_cfg_read_infoframe {
++ const char *desc;
++ u8 present_reg;
++ u8 present_mask;
++ u8 header[3];
++ u16 payload_addr;
++};
++
++static u8 hdmi_infoframe_checksum(u8 *ptr, size_t size)
++{
++ u8 csum = 0;
++ size_t i;
++
++ /* compute checksum */
++ for (i = 0; i < size; i++)
++ csum += ptr[i];
++
++ return 256 - csum;
++}
++
++static void log_infoframe(struct v4l2_subdev *sd, const struct adv7511_cfg_read_infoframe *cri)
++{
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ struct device *dev = &client->dev;
++ union hdmi_infoframe frame;
++ u8 buffer[32];
++ u8 len;
++ int i;
++
++ if (!(adv7511_rd(sd, cri->present_reg) & cri->present_mask)) {
++ v4l2_info(sd, "%s infoframe not transmitted\n", cri->desc);
++ return;
++ }
++
++ memcpy(buffer, cri->header, sizeof(cri->header));
++
++ len = buffer[2];
++
++ if (len + 4 > sizeof(buffer)) {
++ v4l2_err(sd, "%s: invalid %s infoframe length %d\n", __func__, cri->desc, len);
++ return;
++ }
++
++ if (cri->payload_addr >= 0x100) {
++ for (i = 0; i < len; i++)
++ buffer[i + 4] = adv7511_pktmem_rd(sd, cri->payload_addr + i - 0x100);
++ } else {
++ for (i = 0; i < len; i++)
++ buffer[i + 4] = adv7511_rd(sd, cri->payload_addr + i);
++ }
++ buffer[3] = 0;
++ buffer[3] = hdmi_infoframe_checksum(buffer, len + 4);
++
++ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
++ v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
++ return;
++ }
++
++ hdmi_infoframe_log(KERN_INFO, dev, &frame);
++}
++
++static void adv7511_log_infoframes(struct v4l2_subdev *sd)
++{
++ static const struct adv7511_cfg_read_infoframe cri[] = {
++ { "AVI", 0x44, 0x10, { 0x82, 2, 13 }, 0x55 },
++ { "Audio", 0x44, 0x08, { 0x84, 1, 10 }, 0x73 },
++ { "SDP", 0x40, 0x40, { 0x83, 1, 25 }, 0x103 },
++ };
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(cri); i++)
++ log_infoframe(sd, &cri[i]);
++}
++
++static int adv7511_log_status(struct v4l2_subdev *sd)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ struct adv7511_state_edid *edid = &state->edid;
++ int i;
++
++ static const char * const states[] = {
++ "in reset",
++ "reading EDID",
++ "idle",
++ "initializing HDCP",
++ "HDCP enabled",
++ "initializing HDCP repeater",
++ "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"
++ };
++ static const char * const errors[] = {
++ "no error",
++ "bad receiver BKSV",
++ "Ri mismatch",
++ "Pj mismatch",
++ "i2c error",
++ "timed out",
++ "max repeater cascade exceeded",
++ "hash check failed",
++ "too many devices",
++ "9", "A", "B", "C", "D", "E", "F"
++ };
++
++ v4l2_info(sd, "power %s\n", state->power_on ? "on" : "off");
++ v4l2_info(sd, "%s hotplug, %s Rx Sense, %s EDID (%d block(s))\n",
++ (adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT) ? "detected" : "no",
++ (adv7511_rd(sd, 0x42) & MASK_ADV7511_MSEN_DETECT) ? "detected" : "no",
++ edid->segments ? "found" : "no",
++ edid->blocks);
++ v4l2_info(sd, "%s output %s\n",
++ (adv7511_rd(sd, 0xaf) & 0x02) ?
++ "HDMI" : "DVI-D",
++ (adv7511_rd(sd, 0xa1) & 0x3c) ?
++ "disabled" : "enabled");
++ v4l2_info(sd, "state: %s, error: %s, detect count: %u, msk/irq: %02x/%02x\n",
++ states[adv7511_rd(sd, 0xc8) & 0xf],
++ errors[adv7511_rd(sd, 0xc8) >> 4], state->edid_detect_counter,
++ adv7511_rd(sd, 0x94), adv7511_rd(sd, 0x96));
++ v4l2_info(sd, "RGB quantization: %s range\n", adv7511_rd(sd, 0x18) & 0x80 ? "limited" : "full");
++ if (adv7511_rd(sd, 0xaf) & 0x02) {
++ /* HDMI only */
++ u8 manual_cts = adv7511_rd(sd, 0x0a) & 0x80;
++ u32 N = (adv7511_rd(sd, 0x01) & 0xf) << 16 |
++ adv7511_rd(sd, 0x02) << 8 |
++ adv7511_rd(sd, 0x03);
++ u8 vic_detect = adv7511_rd(sd, 0x3e) >> 2;
++ u8 vic_sent = adv7511_rd(sd, 0x3d) & 0x3f;
++ u32 CTS;
++
++ if (manual_cts)
++ CTS = (adv7511_rd(sd, 0x07) & 0xf) << 16 |
++ adv7511_rd(sd, 0x08) << 8 |
++ adv7511_rd(sd, 0x09);
++ else
++ CTS = (adv7511_rd(sd, 0x04) & 0xf) << 16 |
++ adv7511_rd(sd, 0x05) << 8 |
++ adv7511_rd(sd, 0x06);
++ v4l2_info(sd, "CTS %s mode: N %d, CTS %d\n",
++ manual_cts ? "manual" : "automatic", N, CTS);
++ v4l2_info(sd, "VIC: detected %d, sent %d\n",
++ vic_detect, vic_sent);
++ adv7511_log_infoframes(sd);
++ }
++ if (state->dv_timings.type == V4L2_DV_BT_656_1120)
++ v4l2_print_dv_timings(sd->name, "timings: ",
++ &state->dv_timings, false);
++ else
++ v4l2_info(sd, "no timings set\n");
++ v4l2_info(sd, "i2c edid addr: 0x%x\n", state->i2c_edid_addr);
++
++ if (state->i2c_cec == NULL)
++ return 0;
++
++ v4l2_info(sd, "i2c cec addr: 0x%x\n", state->i2c_cec_addr);
++
++ v4l2_info(sd, "CEC: %s\n", state->cec_enabled_adap ?
++ "enabled" : "disabled");
++ if (state->cec_enabled_adap) {
++ for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
++ bool is_valid = state->cec_valid_addrs & (1 << i);
++
++ if (is_valid)
++ v4l2_info(sd, "CEC Logical Address: 0x%x\n",
++ state->cec_addr[i]);
++ }
++ }
++ v4l2_info(sd, "i2c pktmem addr: 0x%x\n", state->i2c_pktmem_addr);
++ return 0;
++}
++
++/* Power up/down adv7511 */
++static int adv7511_s_power(struct v4l2_subdev *sd, int on)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ const int retries = 20;
++ int i;
++
++ v4l2_dbg(1, debug, sd, "%s: power %s\n", __func__, on ? "on" : "off");
++
++ state->power_on = on;
++
++ if (!on) {
++ /* Power down */
++ adv7511_wr_and_or(sd, 0x41, 0xbf, 0x40);
++ return true;
++ }
++
++ /* Power up */
++ /* The adv7511 does not always come up immediately.
++ Retry multiple times. */
++ for (i = 0; i < retries; i++) {
++ adv7511_wr_and_or(sd, 0x41, 0xbf, 0x0);
++ if ((adv7511_rd(sd, 0x41) & 0x40) == 0)
++ break;
++ adv7511_wr_and_or(sd, 0x41, 0xbf, 0x40);
++ msleep(10);
++ }
++ if (i == retries) {
++ v4l2_dbg(1, debug, sd, "%s: failed to powerup the adv7511!\n", __func__);
++ adv7511_s_power(sd, 0);
++ return false;
++ }
++ if (i > 1)
++ v4l2_dbg(1, debug, sd, "%s: needed %d retries to powerup the adv7511\n", __func__, i);
++
++ /* Reserved registers that must be set */
++ adv7511_wr(sd, 0x98, 0x03);
++ adv7511_wr_and_or(sd, 0x9a, 0xfe, 0x70);
++ adv7511_wr(sd, 0x9c, 0x30);
++ adv7511_wr_and_or(sd, 0x9d, 0xfc, 0x01);
++ adv7511_wr(sd, 0xa2, 0xa4);
++ adv7511_wr(sd, 0xa3, 0xa4);
++ adv7511_wr(sd, 0xe0, 0xd0);
++ adv7511_wr(sd, 0xf9, 0x00);
++
++ adv7511_wr(sd, 0x43, state->i2c_edid_addr);
++ adv7511_wr(sd, 0x45, state->i2c_pktmem_addr);
++
++ /* Set number of attempts to read the EDID */
++ adv7511_wr(sd, 0xc9, 0xf);
++ return true;
++}
++
++#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
++static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
++{
++ struct adv7511_state *state = cec_get_drvdata(adap);
++ struct v4l2_subdev *sd = &state->sd;
++
++ if (state->i2c_cec == NULL)
++ return -EIO;
++
++ if (!state->cec_enabled_adap && enable) {
++ /* power up cec section */
++ adv7511_cec_write_and_or(sd, 0x4e, 0xfc, 0x01);
++ /* legacy mode and clear all rx buffers */
++ adv7511_cec_write(sd, 0x4a, 0x00);
++ adv7511_cec_write(sd, 0x4a, 0x07);
++ adv7511_cec_write_and_or(sd, 0x11, 0xfe, 0); /* initially disable tx */
++ /* enabled irqs: */
++ /* tx: ready */
++ /* tx: arbitration lost */
++ /* tx: retry timeout */
++ /* rx: ready 1 */
++ if (state->enabled_irq)
++ adv7511_wr_and_or(sd, 0x95, 0xc0, 0x39);
++ } else if (state->cec_enabled_adap && !enable) {
++ if (state->enabled_irq)
++ adv7511_wr_and_or(sd, 0x95, 0xc0, 0x00);
++ /* disable address mask 1-3 */
++ adv7511_cec_write_and_or(sd, 0x4b, 0x8f, 0x00);
++ /* power down cec section */
++ adv7511_cec_write_and_or(sd, 0x4e, 0xfc, 0x00);
++ state->cec_valid_addrs = 0;
++ }
++ state->cec_enabled_adap = enable;
++ return 0;
++}
++
++static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
++{
++ struct adv7511_state *state = cec_get_drvdata(adap);
++ struct v4l2_subdev *sd = &state->sd;
++ unsigned int i, free_idx = ADV7511_MAX_ADDRS;
++
++ if (!state->cec_enabled_adap)
++ return addr == CEC_LOG_ADDR_INVALID ? 0 : -EIO;
++
++ if (addr == CEC_LOG_ADDR_INVALID) {
++ adv7511_cec_write_and_or(sd, 0x4b, 0x8f, 0);
++ state->cec_valid_addrs = 0;
++ return 0;
++ }
++
++ for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
++ bool is_valid = state->cec_valid_addrs & (1 << i);
++
++ if (free_idx == ADV7511_MAX_ADDRS && !is_valid)
++ free_idx = i;
++ if (is_valid && state->cec_addr[i] == addr)
++ return 0;
++ }
++ if (i == ADV7511_MAX_ADDRS) {
++ i = free_idx;
++ if (i == ADV7511_MAX_ADDRS)
++ return -ENXIO;
++ }
++ state->cec_addr[i] = addr;
++ state->cec_valid_addrs |= 1 << i;
++
++ switch (i) {
++ case 0:
++ /* enable address mask 0 */
++ adv7511_cec_write_and_or(sd, 0x4b, 0xef, 0x10);
++ /* set address for mask 0 */
++ adv7511_cec_write_and_or(sd, 0x4c, 0xf0, addr);
++ break;
++ case 1:
++ /* enable address mask 1 */
++ adv7511_cec_write_and_or(sd, 0x4b, 0xdf, 0x20);
++ /* set address for mask 1 */
++ adv7511_cec_write_and_or(sd, 0x4c, 0x0f, addr << 4);
++ break;
++ case 2:
++ /* enable address mask 2 */
++ adv7511_cec_write_and_or(sd, 0x4b, 0xbf, 0x40);
++ /* set address for mask 1 */
++ adv7511_cec_write_and_or(sd, 0x4d, 0xf0, addr);
++ break;
++ }
++ return 0;
++}
++
++static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
++ u32 signal_free_time, struct cec_msg *msg)
++{
++ struct adv7511_state *state = cec_get_drvdata(adap);
++ struct v4l2_subdev *sd = &state->sd;
++ u8 len = msg->len;
++ unsigned int i;
++
++ v4l2_dbg(1, debug, sd, "%s: len %d\n", __func__, len);
++
++ if (len > 16) {
++ v4l2_err(sd, "%s: len exceeded 16 (%d)\n", __func__, len);
++ return -EINVAL;
++ }
++
++ /*
++ * The number of retries is the number of attempts - 1, but retry
++ * at least once. It's not clear if a value of 0 is allowed, so
++ * let's do at least one retry.
++ */
++ adv7511_cec_write_and_or(sd, 0x12, ~0x70, max(1, attempts - 1) << 4);
++
++ /* clear cec tx irq status */
++ adv7511_wr(sd, 0x97, 0x38);
++
++ /* write data */
++ for (i = 0; i < len; i++)
++ adv7511_cec_write(sd, i, msg->msg[i]);
++
++ /* set length (data + header) */
++ adv7511_cec_write(sd, 0x10, len);
++ /* start transmit, enable tx */
++ adv7511_cec_write(sd, 0x11, 0x01);
++ return 0;
++}
++
++static void adv_cec_tx_raw_status(struct v4l2_subdev *sd, u8 tx_raw_status)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ if ((adv7511_cec_read(sd, 0x11) & 0x01) == 0) {
++ v4l2_dbg(1, debug, sd, "%s: tx raw: tx disabled\n", __func__);
++ return;
++ }
++
++ if (tx_raw_status & 0x10) {
++ v4l2_dbg(1, debug, sd,
++ "%s: tx raw: arbitration lost\n", __func__);
++ cec_transmit_done(state->cec_adap, CEC_TX_STATUS_ARB_LOST,
++ 1, 0, 0, 0);
++ return;
++ }
++ if (tx_raw_status & 0x08) {
++ u8 status;
++ u8 nack_cnt;
++ u8 low_drive_cnt;
++
++ v4l2_dbg(1, debug, sd, "%s: tx raw: retry failed\n", __func__);
++ /*
++ * We set this status bit since this hardware performs
++ * retransmissions.
++ */
++ status = CEC_TX_STATUS_MAX_RETRIES;
++ nack_cnt = adv7511_cec_read(sd, 0x14) & 0xf;
++ if (nack_cnt)
++ status |= CEC_TX_STATUS_NACK;
++ low_drive_cnt = adv7511_cec_read(sd, 0x14) >> 4;
++ if (low_drive_cnt)
++ status |= CEC_TX_STATUS_LOW_DRIVE;
++ cec_transmit_done(state->cec_adap, status,
++ 0, nack_cnt, low_drive_cnt, 0);
++ return;
++ }
++ if (tx_raw_status & 0x20) {
++ v4l2_dbg(1, debug, sd, "%s: tx raw: ready ok\n", __func__);
++ cec_transmit_done(state->cec_adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
++ return;
++ }
++}
++
++static const struct cec_adap_ops adv7511_cec_adap_ops = {
++ .adap_enable = adv7511_cec_adap_enable,
++ .adap_log_addr = adv7511_cec_adap_log_addr,
++ .adap_transmit = adv7511_cec_adap_transmit,
++};
++#endif
++
++/* Enable interrupts */
++static void adv7511_set_isr(struct v4l2_subdev *sd, bool enable)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ u8 irqs = MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT;
++ u8 irqs_rd;
++ int retries = 100;
++
++ v4l2_dbg(2, debug, sd, "%s: %s\n", __func__, enable ? "enable" : "disable");
++
++ if (state->enabled_irq == enable)
++ return;
++ state->enabled_irq = enable;
++
++ /* The datasheet says that the EDID ready interrupt should be
++ disabled if there is no hotplug. */
++ if (!enable)
++ irqs = 0;
++ else if (adv7511_have_hotplug(sd))
++ irqs |= MASK_ADV7511_EDID_RDY_INT;
++
++ /*
++ * This i2c write can fail (approx. 1 in 1000 writes). But it
++ * is essential that this register is correct, so retry it
++ * multiple times.
++ *
++ * Note that the i2c write does not report an error, but the readback
++ * clearly shows the wrong value.
++ */
++ do {
++ adv7511_wr(sd, 0x94, irqs);
++ irqs_rd = adv7511_rd(sd, 0x94);
++ } while (retries-- && irqs_rd != irqs);
++
++ if (irqs_rd != irqs)
++ v4l2_err(sd, "Could not set interrupts: hw failure?\n");
++
++ adv7511_wr_and_or(sd, 0x95, 0xc0,
++ (state->cec_enabled_adap && enable) ? 0x39 : 0x00);
++}
++
++/* Interrupt handler */
++static int adv7511_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
++{
++ u8 irq_status;
++ u8 cec_irq;
++
++ /* disable interrupts to prevent a race condition */
++ adv7511_set_isr(sd, false);
++ irq_status = adv7511_rd(sd, 0x96);
++ cec_irq = adv7511_rd(sd, 0x97);
++ /* clear detected interrupts */
++ adv7511_wr(sd, 0x96, irq_status);
++ adv7511_wr(sd, 0x97, cec_irq);
++
++ v4l2_dbg(1, debug, sd, "%s: irq 0x%x, cec-irq 0x%x\n", __func__,
++ irq_status, cec_irq);
++
++ if (irq_status & (MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT))
++ adv7511_check_monitor_present_status(sd);
++ if (irq_status & MASK_ADV7511_EDID_RDY_INT)
++ adv7511_check_edid_status(sd);
++
++#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
++ if (cec_irq & 0x38)
++ adv_cec_tx_raw_status(sd, cec_irq);
++
++ if (cec_irq & 1) {
++ struct adv7511_state *state = get_adv7511_state(sd);
++ struct cec_msg msg;
++
++ msg.len = adv7511_cec_read(sd, 0x25) & 0x1f;
++
++ v4l2_dbg(1, debug, sd, "%s: cec msg len %d\n", __func__,
++ msg.len);
++
++ if (msg.len > 16)
++ msg.len = 16;
++
++ if (msg.len) {
++ u8 i;
++
++ for (i = 0; i < msg.len; i++)
++ msg.msg[i] = adv7511_cec_read(sd, i + 0x15);
++
++ adv7511_cec_write(sd, 0x4a, 0); /* toggle to re-enable rx 1 */
++ adv7511_cec_write(sd, 0x4a, 1);
++ cec_received_msg(state->cec_adap, &msg);
++ }
++ }
++#endif
++
++ /* enable interrupts */
++ adv7511_set_isr(sd, true);
++
++ if (handled)
++ *handled = true;
++ return 0;
++}
++
++static const struct v4l2_subdev_core_ops adv7511_core_ops = {
++ .log_status = adv7511_log_status,
++#ifdef CONFIG_VIDEO_ADV_DEBUG
++ .g_register = adv7511_g_register,
++ .s_register = adv7511_s_register,
++#endif
++ .s_power = adv7511_s_power,
++ .interrupt_service_routine = adv7511_isr,
++};
++
++/* ------------------------------ VIDEO OPS ------------------------------ */
++
++/* Enable/disable adv7511 output */
++static int adv7511_s_stream(struct v4l2_subdev *sd, int enable)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
++ adv7511_wr_and_or(sd, 0xa1, ~0x3c, (enable ? 0 : 0x3c));
++ if (enable) {
++ adv7511_check_monitor_present_status(sd);
++ } else {
++ adv7511_s_power(sd, 0);
++ state->have_monitor = false;
++ }
++ return 0;
++}
++
++static int adv7511_s_dv_timings(struct v4l2_subdev *sd,
++ struct v4l2_dv_timings *timings)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ struct v4l2_bt_timings *bt = &timings->bt;
++ u32 fps;
++
++ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
++
++ /* quick sanity check */
++ if (!v4l2_valid_dv_timings(timings, &adv7511_timings_cap, NULL, NULL))
++ return -EINVAL;
++
++ /* Fill the optional fields .standards and .flags in struct v4l2_dv_timings
++ if the format is one of the CEA or DMT timings. */
++ v4l2_find_dv_timings_cap(timings, &adv7511_timings_cap, 0, NULL, NULL);
++
++ /* save timings */
++ state->dv_timings = *timings;
++
++ /* set h/vsync polarities */
++ adv7511_wr_and_or(sd, 0x17, 0x9f,
++ ((bt->polarities & V4L2_DV_VSYNC_POS_POL) ? 0 : 0x40) |
++ ((bt->polarities & V4L2_DV_HSYNC_POS_POL) ? 0 : 0x20));
++
++ fps = (u32)bt->pixelclock / (V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt));
++ switch (fps) {
++ case 24:
++ adv7511_wr_and_or(sd, 0xfb, 0xf9, 1 << 1);
++ break;
++ case 25:
++ adv7511_wr_and_or(sd, 0xfb, 0xf9, 2 << 1);
++ break;
++ case 30:
++ adv7511_wr_and_or(sd, 0xfb, 0xf9, 3 << 1);
++ break;
++ default:
++ adv7511_wr_and_or(sd, 0xfb, 0xf9, 0);
++ break;
++ }
++
++ /* update quantization range based on new dv_timings */
++ adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
++
++ return 0;
++}
++
++static int adv7511_g_dv_timings(struct v4l2_subdev *sd,
++ struct v4l2_dv_timings *timings)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
++
++ if (!timings)
++ return -EINVAL;
++
++ *timings = state->dv_timings;
++
++ return 0;
++}
++
++static int adv7511_enum_dv_timings(struct v4l2_subdev *sd,
++ struct v4l2_enum_dv_timings *timings)
++{
++ if (timings->pad != 0)
++ return -EINVAL;
++
++ return v4l2_enum_dv_timings_cap(timings, &adv7511_timings_cap, NULL, NULL);
++}
++
++static int adv7511_dv_timings_cap(struct v4l2_subdev *sd,
++ struct v4l2_dv_timings_cap *cap)
++{
++ if (cap->pad != 0)
++ return -EINVAL;
++
++ *cap = adv7511_timings_cap;
++ return 0;
++}
++
++static const struct v4l2_subdev_video_ops adv7511_video_ops = {
++ .s_stream = adv7511_s_stream,
++ .s_dv_timings = adv7511_s_dv_timings,
++ .g_dv_timings = adv7511_g_dv_timings,
++};
++
++/* ------------------------------ AUDIO OPS ------------------------------ */
++static int adv7511_s_audio_stream(struct v4l2_subdev *sd, int enable)
++{
++ v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
++
++ if (enable)
++ adv7511_wr_and_or(sd, 0x4b, 0x3f, 0x80);
++ else
++ adv7511_wr_and_or(sd, 0x4b, 0x3f, 0x40);
++
++ return 0;
++}
++
++static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
++{
++ u32 N;
++
++ switch (freq) {
++ case 32000: N = 4096; break;
++ case 44100: N = 6272; break;
++ case 48000: N = 6144; break;
++ case 88200: N = 12544; break;
++ case 96000: N = 12288; break;
++ case 176400: N = 25088; break;
++ case 192000: N = 24576; break;
++ default:
++ return -EINVAL;
++ }
++
++ /* Set N (used with CTS to regenerate the audio clock) */
++ adv7511_wr(sd, 0x01, (N >> 16) & 0xf);
++ adv7511_wr(sd, 0x02, (N >> 8) & 0xff);
++ adv7511_wr(sd, 0x03, N & 0xff);
++
++ return 0;
++}
++
++static int adv7511_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq)
++{
++ u32 i2s_sf;
++
++ switch (freq) {
++ case 32000: i2s_sf = 0x30; break;
++ case 44100: i2s_sf = 0x00; break;
++ case 48000: i2s_sf = 0x20; break;
++ case 88200: i2s_sf = 0x80; break;
++ case 96000: i2s_sf = 0xa0; break;
++ case 176400: i2s_sf = 0xc0; break;
++ case 192000: i2s_sf = 0xe0; break;
++ default:
++ return -EINVAL;
++ }
++
++ /* Set sampling frequency for I2S audio to 48 kHz */
++ adv7511_wr_and_or(sd, 0x15, 0xf, i2s_sf);
++
++ return 0;
++}
++
++static int adv7511_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config)
++{
++ /* Only 2 channels in use for application */
++ adv7511_wr_and_or(sd, 0x73, 0xf8, 0x1);
++ /* Speaker mapping */
++ adv7511_wr(sd, 0x76, 0x00);
++
++ /* 16 bit audio word length */
++ adv7511_wr_and_or(sd, 0x14, 0xf0, 0x02);
++
++ return 0;
++}
++
++static const struct v4l2_subdev_audio_ops adv7511_audio_ops = {
++ .s_stream = adv7511_s_audio_stream,
++ .s_clock_freq = adv7511_s_clock_freq,
++ .s_i2s_clock_freq = adv7511_s_i2s_clock_freq,
++ .s_routing = adv7511_s_routing,
++};
++
++/* ---------------------------- PAD OPS ------------------------------------- */
++
++static int adv7511_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ memset(edid->reserved, 0, sizeof(edid->reserved));
++
++ if (edid->pad != 0)
++ return -EINVAL;
++
++ if (edid->start_block == 0 && edid->blocks == 0) {
++ edid->blocks = state->edid.segments * 2;
++ return 0;
++ }
++
++ if (state->edid.segments == 0)
++ return -ENODATA;
++
++ if (edid->start_block >= state->edid.segments * 2)
++ return -EINVAL;
++
++ if (edid->start_block + edid->blocks > state->edid.segments * 2)
++ edid->blocks = state->edid.segments * 2 - edid->start_block;
++
++ memcpy(edid->edid, &state->edid.data[edid->start_block * 128],
++ 128 * edid->blocks);
++
++ return 0;
++}
++
++static int adv7511_enum_mbus_code(struct v4l2_subdev *sd,
++ struct v4l2_subdev_pad_config *cfg,
++ struct v4l2_subdev_mbus_code_enum *code)
++{
++ if (code->pad != 0)
++ return -EINVAL;
++
++ switch (code->index) {
++ case 0:
++ code->code = MEDIA_BUS_FMT_RGB888_1X24;
++ break;
++ case 1:
++ code->code = MEDIA_BUS_FMT_YUYV8_1X16;
++ break;
++ case 2:
++ code->code = MEDIA_BUS_FMT_UYVY8_1X16;
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static void adv7511_fill_format(struct adv7511_state *state,
++ struct v4l2_mbus_framefmt *format)
++{
++ format->width = state->dv_timings.bt.width;
++ format->height = state->dv_timings.bt.height;
++ format->field = V4L2_FIELD_NONE;
++}
++
++static int adv7511_get_fmt(struct v4l2_subdev *sd,
++ struct v4l2_subdev_pad_config *cfg,
++ struct v4l2_subdev_format *format)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ if (format->pad != 0)
++ return -EINVAL;
++
++ memset(&format->format, 0, sizeof(format->format));
++ adv7511_fill_format(state, &format->format);
++
++ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
++ struct v4l2_mbus_framefmt *fmt;
++
++ fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
++ format->format.code = fmt->code;
++ format->format.colorspace = fmt->colorspace;
++ format->format.ycbcr_enc = fmt->ycbcr_enc;
++ format->format.quantization = fmt->quantization;
++ format->format.xfer_func = fmt->xfer_func;
++ } else {
++ format->format.code = state->fmt_code;
++ format->format.colorspace = state->colorspace;
++ format->format.ycbcr_enc = state->ycbcr_enc;
++ format->format.quantization = state->quantization;
++ format->format.xfer_func = state->xfer_func;
++ }
++
++ return 0;
++}
++
++static int adv7511_set_fmt(struct v4l2_subdev *sd,
++ struct v4l2_subdev_pad_config *cfg,
++ struct v4l2_subdev_format *format)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ /*
++ * Bitfield namings come the CEA-861-F standard, table 8 "Auxiliary
++ * Video Information (AVI) InfoFrame Format"
++ *
++ * c = Colorimetry
++ * ec = Extended Colorimetry
++ * y = RGB or YCbCr
++ * q = RGB Quantization Range
++ * yq = YCC Quantization Range
++ */
++ u8 c = HDMI_COLORIMETRY_NONE;
++ u8 ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
++ u8 y = HDMI_COLORSPACE_RGB;
++ u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT;
++ u8 yq = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
++ u8 itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
++ u8 cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
++
++ if (format->pad != 0)
++ return -EINVAL;
++ switch (format->format.code) {
++ case MEDIA_BUS_FMT_UYVY8_1X16:
++ case MEDIA_BUS_FMT_YUYV8_1X16:
++ case MEDIA_BUS_FMT_RGB888_1X24:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ adv7511_fill_format(state, &format->format);
++ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
++ struct v4l2_mbus_framefmt *fmt;
++
++ fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
++ fmt->code = format->format.code;
++ fmt->colorspace = format->format.colorspace;
++ fmt->ycbcr_enc = format->format.ycbcr_enc;
++ fmt->quantization = format->format.quantization;
++ fmt->xfer_func = format->format.xfer_func;
++ return 0;
++ }
++
++ switch (format->format.code) {
++ case MEDIA_BUS_FMT_UYVY8_1X16:
++ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
++ adv7511_wr_and_or(sd, 0x16, 0x03, 0xb8);
++ y = HDMI_COLORSPACE_YUV422;
++ break;
++ case MEDIA_BUS_FMT_YUYV8_1X16:
++ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
++ adv7511_wr_and_or(sd, 0x16, 0x03, 0xbc);
++ y = HDMI_COLORSPACE_YUV422;
++ break;
++ case MEDIA_BUS_FMT_RGB888_1X24:
++ default:
++ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x00);
++ adv7511_wr_and_or(sd, 0x16, 0x03, 0x00);
++ break;
++ }
++ state->fmt_code = format->format.code;
++ state->colorspace = format->format.colorspace;
++ state->ycbcr_enc = format->format.ycbcr_enc;
++ state->quantization = format->format.quantization;
++ state->xfer_func = format->format.xfer_func;
++
++ switch (format->format.colorspace) {
++ case V4L2_COLORSPACE_OPRGB:
++ c = HDMI_COLORIMETRY_EXTENDED;
++ ec = y ? HDMI_EXTENDED_COLORIMETRY_OPYCC_601 :
++ HDMI_EXTENDED_COLORIMETRY_OPRGB;
++ break;
++ case V4L2_COLORSPACE_SMPTE170M:
++ c = y ? HDMI_COLORIMETRY_ITU_601 : HDMI_COLORIMETRY_NONE;
++ if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV601) {
++ c = HDMI_COLORIMETRY_EXTENDED;
++ ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
++ }
++ break;
++ case V4L2_COLORSPACE_REC709:
++ c = y ? HDMI_COLORIMETRY_ITU_709 : HDMI_COLORIMETRY_NONE;
++ if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV709) {
++ c = HDMI_COLORIMETRY_EXTENDED;
++ ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
++ }
++ break;
++ case V4L2_COLORSPACE_SRGB:
++ c = y ? HDMI_COLORIMETRY_EXTENDED : HDMI_COLORIMETRY_NONE;
++ ec = y ? HDMI_EXTENDED_COLORIMETRY_S_YCC_601 :
++ HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
++ break;
++ case V4L2_COLORSPACE_BT2020:
++ c = HDMI_COLORIMETRY_EXTENDED;
++ if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
++ ec = 5; /* Not yet available in hdmi.h */
++ else
++ ec = 6; /* Not yet available in hdmi.h */
++ break;
++ default:
++ break;
++ }
++
++ /*
++ * CEA-861-F says that for RGB formats the YCC range must match the
++ * RGB range, although sources should ignore the YCC range.
++ *
++ * The RGB quantization range shouldn't be non-zero if the EDID doesn't
++ * have the Q bit set in the Video Capabilities Data Block, however this
++ * isn't checked at the moment. The assumption is that the application
++ * knows the EDID and can detect this.
++ *
++ * The same is true for the YCC quantization range: non-standard YCC
++ * quantization ranges should only be sent if the EDID has the YQ bit
++ * set in the Video Capabilities Data Block.
++ */
++ switch (format->format.quantization) {
++ case V4L2_QUANTIZATION_FULL_RANGE:
++ q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
++ HDMI_QUANTIZATION_RANGE_FULL;
++ yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_FULL;
++ break;
++ case V4L2_QUANTIZATION_LIM_RANGE:
++ q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
++ HDMI_QUANTIZATION_RANGE_LIMITED;
++ yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
++ break;
++ }
++
++ adv7511_wr_and_or(sd, 0x4a, 0xbf, 0);
++ adv7511_wr_and_or(sd, 0x55, 0x9f, y << 5);
++ adv7511_wr_and_or(sd, 0x56, 0x3f, c << 6);
++ adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2) | (itc << 7));
++ adv7511_wr_and_or(sd, 0x59, 0x0f, (yq << 6) | (cn << 4));
++ adv7511_wr_and_or(sd, 0x4a, 0xff, 1);
++ adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
++
++ return 0;
++}
++
++static const struct v4l2_subdev_pad_ops adv7511_pad_ops = {
++ .get_edid = adv7511_get_edid,
++ .enum_mbus_code = adv7511_enum_mbus_code,
++ .get_fmt = adv7511_get_fmt,
++ .set_fmt = adv7511_set_fmt,
++ .enum_dv_timings = adv7511_enum_dv_timings,
++ .dv_timings_cap = adv7511_dv_timings_cap,
++};
++
++/* --------------------- SUBDEV OPS --------------------------------------- */
++
++static const struct v4l2_subdev_ops adv7511_ops = {
++ .core = &adv7511_core_ops,
++ .pad = &adv7511_pad_ops,
++ .video = &adv7511_video_ops,
++ .audio = &adv7511_audio_ops,
++};
++
++/* ----------------------------------------------------------------------- */
++static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, int segment, u8 *buf)
++{
++ if (debug >= lvl) {
++ int i, j;
++ v4l2_dbg(lvl, debug, sd, "edid segment %d\n", segment);
++ for (i = 0; i < 256; i += 16) {
++ u8 b[128];
++ u8 *bp = b;
++ if (i == 128)
++ v4l2_dbg(lvl, debug, sd, "\n");
++ for (j = i; j < i + 16; j++) {
++ sprintf(bp, "0x%02x, ", buf[j]);
++ bp += 6;
++ }
++ bp[0] = '\0';
++ v4l2_dbg(lvl, debug, sd, "%s\n", b);
++ }
++ }
++}
++
++static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ struct adv7511_edid_detect ed;
++
++ /* We failed to read the EDID, so send an event for this. */
++ ed.present = false;
++ ed.segment = adv7511_rd(sd, 0xc4);
++ ed.phys_addr = CEC_PHYS_ADDR_INVALID;
++ cec_s_phys_addr(state->cec_adap, ed.phys_addr, false);
++ v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
++ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
++}
++
++static void adv7511_edid_handler(struct work_struct *work)
++{
++ struct delayed_work *dwork = to_delayed_work(work);
++ struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
++ struct v4l2_subdev *sd = &state->sd;
++
++ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
++
++ if (adv7511_check_edid_status(sd)) {
++ /* Return if we received the EDID. */
++ return;
++ }
++
++ if (adv7511_have_hotplug(sd)) {
++ /* We must retry reading the EDID several times, it is possible
++ * that initially the EDID couldn't be read due to i2c errors
++ * (DVI connectors are particularly prone to this problem). */
++ if (state->edid.read_retries) {
++ state->edid.read_retries--;
++ v4l2_dbg(1, debug, sd, "%s: edid read failed\n", __func__);
++ state->have_monitor = false;
++ adv7511_s_power(sd, false);
++ adv7511_s_power(sd, true);
++ queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
++ return;
++ }
++ }
++
++ /* We failed to read the EDID, so send an event for this. */
++ adv7511_notify_no_edid(sd);
++ v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
++}
++
++static void adv7511_audio_setup(struct v4l2_subdev *sd)
++{
++ v4l2_dbg(1, debug, sd, "%s\n", __func__);
++
++ adv7511_s_i2s_clock_freq(sd, 48000);
++ adv7511_s_clock_freq(sd, 48000);
++ adv7511_s_routing(sd, 0, 0, 0);
++}
++
++/* Configure hdmi transmitter. */
++static void adv7511_setup(struct v4l2_subdev *sd)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ v4l2_dbg(1, debug, sd, "%s\n", __func__);
++
++ /* Input format: RGB 4:4:4 */
++ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x0);
++ /* Output format: RGB 4:4:4 */
++ adv7511_wr_and_or(sd, 0x16, 0x7f, 0x0);
++ /* 1st order interpolation 4:2:2 -> 4:4:4 up conversion, Aspect ratio: 16:9 */
++ adv7511_wr_and_or(sd, 0x17, 0xf9, 0x06);
++ /* Disable pixel repetition */
++ adv7511_wr_and_or(sd, 0x3b, 0x9f, 0x0);
++ /* Disable CSC */
++ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
++ /* Output format: RGB 4:4:4, Active Format Information is valid,
++ * underscanned */
++ adv7511_wr_and_or(sd, 0x55, 0x9c, 0x12);
++ /* AVI Info frame packet enable, Audio Info frame disable */
++ adv7511_wr_and_or(sd, 0x44, 0xe7, 0x10);
++ /* Colorimetry, Active format aspect ratio: same as picure. */
++ adv7511_wr(sd, 0x56, 0xa8);
++ /* No encryption */
++ adv7511_wr_and_or(sd, 0xaf, 0xed, 0x0);
++
++ /* Positive clk edge capture for input video clock */
++ adv7511_wr_and_or(sd, 0xba, 0x1f, 0x60);
++
++ adv7511_audio_setup(sd);
++
++ v4l2_ctrl_handler_setup(&state->hdl);
++}
++
++static void adv7511_notify_monitor_detect(struct v4l2_subdev *sd)
++{
++ struct adv7511_monitor_detect mdt;
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ mdt.present = state->have_monitor;
++ v4l2_subdev_notify(sd, ADV7511_MONITOR_DETECT, (void *)&mdt);
++}
++
++static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ /* read hotplug and rx-sense state */
++ u8 status = adv7511_rd(sd, 0x42);
++
++ v4l2_dbg(1, debug, sd, "%s: status: 0x%x%s%s\n",
++ __func__,
++ status,
++ status & MASK_ADV7511_HPD_DETECT ? ", hotplug" : "",
++ status & MASK_ADV7511_MSEN_DETECT ? ", rx-sense" : "");
++
++ /* update read only ctrls */
++ v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
++ v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
++
++ if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
++ v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
++ if (!state->have_monitor) {
++ v4l2_dbg(1, debug, sd, "%s: monitor detected\n", __func__);
++ state->have_monitor = true;
++ adv7511_set_isr(sd, true);
++ if (!adv7511_s_power(sd, true)) {
++ v4l2_dbg(1, debug, sd, "%s: monitor detected, powerup failed\n", __func__);
++ return;
++ }
++ adv7511_setup(sd);
++ adv7511_notify_monitor_detect(sd);
++ state->edid.read_retries = EDID_MAX_RETRIES;
++ queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
++ }
++ } else if (status & MASK_ADV7511_HPD_DETECT) {
++ v4l2_dbg(1, debug, sd, "%s: hotplug detected\n", __func__);
++ state->edid.read_retries = EDID_MAX_RETRIES;
++ queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
++ } else if (!(status & MASK_ADV7511_HPD_DETECT)) {
++ v4l2_dbg(1, debug, sd, "%s: hotplug not detected\n", __func__);
++ if (state->have_monitor) {
++ v4l2_dbg(1, debug, sd, "%s: monitor not detected\n", __func__);
++ state->have_monitor = false;
++ adv7511_notify_monitor_detect(sd);
++ }
++ adv7511_s_power(sd, false);
++ memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
++ adv7511_notify_no_edid(sd);
++ }
++}
++
++static bool edid_block_verify_crc(u8 *edid_block)
++{
++ u8 sum = 0;
++ int i;
++
++ for (i = 0; i < 128; i++)
++ sum += edid_block[i];
++ return sum == 0;
++}
++
++static bool edid_verify_crc(struct v4l2_subdev *sd, u32 segment)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ u32 blocks = state->edid.blocks;
++ u8 *data = state->edid.data;
++
++ if (!edid_block_verify_crc(&data[segment * 256]))
++ return false;
++ if ((segment + 1) * 2 <= blocks)
++ return edid_block_verify_crc(&data[segment * 256 + 128]);
++ return true;
++}
++
++static bool edid_verify_header(struct v4l2_subdev *sd, u32 segment)
++{
++ static const u8 hdmi_header[] = {
++ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
++ };
++ struct adv7511_state *state = get_adv7511_state(sd);
++ u8 *data = state->edid.data;
++
++ if (segment != 0)
++ return true;
++ return !memcmp(data, hdmi_header, sizeof(hdmi_header));
++}
++
++static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ u8 edidRdy = adv7511_rd(sd, 0xc5);
++
++ v4l2_dbg(1, debug, sd, "%s: edid ready (retries: %d)\n",
++ __func__, EDID_MAX_RETRIES - state->edid.read_retries);
++
++ if (state->edid.complete)
++ return true;
++
++ if (edidRdy & MASK_ADV7511_EDID_RDY) {
++ int segment = adv7511_rd(sd, 0xc4);
++ struct adv7511_edid_detect ed;
++
++ if (segment >= EDID_MAX_SEGM) {
++ v4l2_err(sd, "edid segment number too big\n");
++ return false;
++ }
++ v4l2_dbg(1, debug, sd, "%s: got segment %d\n", __func__, segment);
++ adv7511_edid_rd(sd, 256, &state->edid.data[segment * 256]);
++ adv7511_dbg_dump_edid(2, debug, sd, segment, &state->edid.data[segment * 256]);
++ if (segment == 0) {
++ state->edid.blocks = state->edid.data[0x7e] + 1;
++ v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n", __func__, state->edid.blocks);
++ }
++ if (!edid_verify_crc(sd, segment) ||
++ !edid_verify_header(sd, segment)) {
++ /* edid crc error, force reread of edid segment */
++ v4l2_err(sd, "%s: edid crc or header error\n", __func__);
++ state->have_monitor = false;
++ adv7511_s_power(sd, false);
++ adv7511_s_power(sd, true);
++ return false;
++ }
++ /* one more segment read ok */
++ state->edid.segments = segment + 1;
++ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
++ if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
++ /* Request next EDID segment */
++ v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
++ adv7511_wr(sd, 0xc9, 0xf);
++ adv7511_wr(sd, 0xc4, state->edid.segments);
++ state->edid.read_retries = EDID_MAX_RETRIES;
++ queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
++ return false;
++ }
++
++ v4l2_dbg(1, debug, sd, "%s: edid complete with %d segment(s)\n", __func__, state->edid.segments);
++ state->edid.complete = true;
++ ed.phys_addr = cec_get_edid_phys_addr(state->edid.data,
++ state->edid.segments * 256,
++ NULL);
++ /* report when we have all segments
++ but report only for segment 0
++ */
++ ed.present = true;
++ ed.segment = 0;
++ state->edid_detect_counter++;
++ cec_s_phys_addr(state->cec_adap, ed.phys_addr, false);
++ v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
++ return ed.present;
++ }
++
++ return false;
++}
++
++static int adv7511_registered(struct v4l2_subdev *sd)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ struct i2c_client *client = v4l2_get_subdevdata(sd);
++ int err;
++
++ err = cec_register_adapter(state->cec_adap, &client->dev);
++ if (err)
++ cec_delete_adapter(state->cec_adap);
++ return err;
++}
++
++static void adv7511_unregistered(struct v4l2_subdev *sd)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ cec_unregister_adapter(state->cec_adap);
++}
++
++static const struct v4l2_subdev_internal_ops adv7511_int_ops = {
++ .registered = adv7511_registered,
++ .unregistered = adv7511_unregistered,
++};
++
++/* ----------------------------------------------------------------------- */
++/* Setup ADV7511 */
++static void adv7511_init_setup(struct v4l2_subdev *sd)
++{
++ struct adv7511_state *state = get_adv7511_state(sd);
++ struct adv7511_state_edid *edid = &state->edid;
++ u32 cec_clk = state->pdata.cec_clk;
++ u8 ratio;
++
++ v4l2_dbg(1, debug, sd, "%s\n", __func__);
++
++ /* clear all interrupts */
++ adv7511_wr(sd, 0x96, 0xff);
++ adv7511_wr(sd, 0x97, 0xff);
++ /*
++ * Stop HPD from resetting a lot of registers.
++ * It might leave the chip in a partly un-initialized state,
++ * in particular with regards to hotplug bounces.
++ */
++ adv7511_wr_and_or(sd, 0xd6, 0x3f, 0xc0);
++ memset(edid, 0, sizeof(struct adv7511_state_edid));
++ state->have_monitor = false;
++ adv7511_set_isr(sd, false);
++ adv7511_s_stream(sd, false);
++ adv7511_s_audio_stream(sd, false);
++
++ if (state->i2c_cec == NULL)
++ return;
++
++ v4l2_dbg(1, debug, sd, "%s: cec_clk %d\n", __func__, cec_clk);
++
++ /* cec soft reset */
++ adv7511_cec_write(sd, 0x50, 0x01);
++ adv7511_cec_write(sd, 0x50, 0x00);
++
++ /* legacy mode */
++ adv7511_cec_write(sd, 0x4a, 0x00);
++ adv7511_cec_write(sd, 0x4a, 0x07);
++
++ if (cec_clk % 750000 != 0)
++ v4l2_err(sd, "%s: cec_clk %d, not multiple of 750 Khz\n",
++ __func__, cec_clk);
++
++ ratio = (cec_clk / 750000) - 1;
++ adv7511_cec_write(sd, 0x4e, ratio << 2);
++}
++
++static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *id)
++{
++ struct adv7511_state *state;
++ struct adv7511_platform_data *pdata = client->dev.platform_data;
++ struct v4l2_ctrl_handler *hdl;
++ struct v4l2_subdev *sd;
++ u8 chip_id[2];
++ int err = -EIO;
++
++ /* Check if the adapter supports the needed features */
++ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
++ return -EIO;
++
++ state = devm_kzalloc(&client->dev, sizeof(struct adv7511_state), GFP_KERNEL);
++ if (!state)
++ return -ENOMEM;
++
++ /* Platform data */
++ if (!pdata) {
++ v4l_err(client, "No platform data!\n");
++ return -ENODEV;
++ }
++ memcpy(&state->pdata, pdata, sizeof(state->pdata));
++ state->fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
++ state->colorspace = V4L2_COLORSPACE_SRGB;
++
++ sd = &state->sd;
++
++ v4l2_dbg(1, debug, sd, "detecting adv7511 client on address 0x%x\n",
++ client->addr << 1);
++
++ v4l2_i2c_subdev_init(sd, client, &adv7511_ops);
++ sd->internal_ops = &adv7511_int_ops;
++
++ hdl = &state->hdl;
++ v4l2_ctrl_handler_init(hdl, 10);
++ /* add in ascending ID order */
++ state->hdmi_mode_ctrl = v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
++ V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI,
++ 0, V4L2_DV_TX_MODE_DVI_D);
++ state->hotplug_ctrl = v4l2_ctrl_new_std(hdl, NULL,
++ V4L2_CID_DV_TX_HOTPLUG, 0, 1, 0, 0);
++ state->rx_sense_ctrl = v4l2_ctrl_new_std(hdl, NULL,
++ V4L2_CID_DV_TX_RXSENSE, 0, 1, 0, 0);
++ state->have_edid0_ctrl = v4l2_ctrl_new_std(hdl, NULL,
++ V4L2_CID_DV_TX_EDID_PRESENT, 0, 1, 0, 0);
++ state->rgb_quantization_range_ctrl =
++ v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
++ V4L2_CID_DV_TX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
++ 0, V4L2_DV_RGB_RANGE_AUTO);
++ state->content_type_ctrl =
++ v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
++ V4L2_CID_DV_TX_IT_CONTENT_TYPE, V4L2_DV_IT_CONTENT_TYPE_NO_ITC,
++ 0, V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
++ sd->ctrl_handler = hdl;
++ if (hdl->error) {
++ err = hdl->error;
++ goto err_hdl;
++ }
++ state->pad.flags = MEDIA_PAD_FL_SINK;
++ sd->entity.function = MEDIA_ENT_F_DV_ENCODER;
++ err = media_entity_pads_init(&sd->entity, 1, &state->pad);
++ if (err)
++ goto err_hdl;
++
++ /* EDID and CEC i2c addr */
++ state->i2c_edid_addr = state->pdata.i2c_edid << 1;
++ state->i2c_cec_addr = state->pdata.i2c_cec << 1;
++ state->i2c_pktmem_addr = state->pdata.i2c_pktmem << 1;
++
++ state->chip_revision = adv7511_rd(sd, 0x0);
++ chip_id[0] = adv7511_rd(sd, 0xf5);
++ chip_id[1] = adv7511_rd(sd, 0xf6);
++ if (chip_id[0] != 0x75 || chip_id[1] != 0x11) {
++ v4l2_err(sd, "chip_id != 0x7511, read 0x%02x%02x\n", chip_id[0],
++ chip_id[1]);
++ err = -EIO;
++ goto err_entity;
++ }
++
++ state->i2c_edid = i2c_new_dummy(client->adapter,
++ state->i2c_edid_addr >> 1);
++ if (state->i2c_edid == NULL) {
++ v4l2_err(sd, "failed to register edid i2c client\n");
++ err = -ENOMEM;
++ goto err_entity;
++ }
++
++ adv7511_wr(sd, 0xe1, state->i2c_cec_addr);
++ if (state->pdata.cec_clk < 3000000 ||
++ state->pdata.cec_clk > 100000000) {
++ v4l2_err(sd, "%s: cec_clk %u outside range, disabling cec\n",
++ __func__, state->pdata.cec_clk);
++ state->pdata.cec_clk = 0;
++ }
++
++ if (state->pdata.cec_clk) {
++ state->i2c_cec = i2c_new_dummy(client->adapter,
++ state->i2c_cec_addr >> 1);
++ if (state->i2c_cec == NULL) {
++ v4l2_err(sd, "failed to register cec i2c client\n");
++ err = -ENOMEM;
++ goto err_unreg_edid;
++ }
++ adv7511_wr(sd, 0xe2, 0x00); /* power up cec section */
++ } else {
++ adv7511_wr(sd, 0xe2, 0x01); /* power down cec section */
++ }
++
++ state->i2c_pktmem = i2c_new_dummy(client->adapter, state->i2c_pktmem_addr >> 1);
++ if (state->i2c_pktmem == NULL) {
++ v4l2_err(sd, "failed to register pktmem i2c client\n");
++ err = -ENOMEM;
++ goto err_unreg_cec;
++ }
++
++ state->work_queue = create_singlethread_workqueue(sd->name);
++ if (state->work_queue == NULL) {
++ v4l2_err(sd, "could not create workqueue\n");
++ err = -ENOMEM;
++ goto err_unreg_pktmem;
++ }
++
++ INIT_DELAYED_WORK(&state->edid_handler, adv7511_edid_handler);
++
++ adv7511_init_setup(sd);
++
++#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
++ state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
++ state, dev_name(&client->dev), CEC_CAP_DEFAULTS,
++ ADV7511_MAX_ADDRS);
++ err = PTR_ERR_OR_ZERO(state->cec_adap);
++ if (err) {
++ destroy_workqueue(state->work_queue);
++ goto err_unreg_pktmem;
++ }
++#endif
++
++ adv7511_set_isr(sd, true);
++ adv7511_check_monitor_present_status(sd);
++
++ v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
++ client->addr << 1, client->adapter->name);
++ return 0;
++
++err_unreg_pktmem:
++ i2c_unregister_device(state->i2c_pktmem);
++err_unreg_cec:
++ if (state->i2c_cec)
++ i2c_unregister_device(state->i2c_cec);
++err_unreg_edid:
++ i2c_unregister_device(state->i2c_edid);
++err_entity:
++ media_entity_cleanup(&sd->entity);
++err_hdl:
++ v4l2_ctrl_handler_free(&state->hdl);
++ return err;
++}
++
++/* ----------------------------------------------------------------------- */
++
++static int adv7511_remove(struct i2c_client *client)
++{
++ struct v4l2_subdev *sd = i2c_get_clientdata(client);
++ struct adv7511_state *state = get_adv7511_state(sd);
++
++ state->chip_revision = -1;
++
++ v4l2_dbg(1, debug, sd, "%s removed @ 0x%x (%s)\n", client->name,
++ client->addr << 1, client->adapter->name);
++
++ adv7511_set_isr(sd, false);
++ adv7511_init_setup(sd);
++ cancel_delayed_work(&state->edid_handler);
++ i2c_unregister_device(state->i2c_edid);
++ if (state->i2c_cec)
++ i2c_unregister_device(state->i2c_cec);
++ i2c_unregister_device(state->i2c_pktmem);
++ destroy_workqueue(state->work_queue);
++ v4l2_device_unregister_subdev(sd);
++ media_entity_cleanup(&sd->entity);
++ v4l2_ctrl_handler_free(sd->ctrl_handler);
++ return 0;
++}
++
++/* ----------------------------------------------------------------------- */
++
++static const struct i2c_device_id adv7511_id[] = {
++ { "adv7511", 0 },
++ { }
++};
++MODULE_DEVICE_TABLE(i2c, adv7511_id);
++
++static struct i2c_driver adv7511_driver = {
++ .driver = {
++ .name = "adv7511",
++ },
++ .probe = adv7511_probe,
++ .remove = adv7511_remove,
++ .id_table = adv7511_id,
++};
++
++module_i2c_driver(adv7511_driver);
+diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
+deleted file mode 100644
+index cec5ebb1c9e6..000000000000
+--- a/drivers/media/i2c/adv7511.c
++++ /dev/null
+@@ -1,1992 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * Analog Devices ADV7511 HDMI Transmitter Device Driver
+- *
+- * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+- */
+-
+-
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/slab.h>
+-#include <linux/i2c.h>
+-#include <linux/delay.h>
+-#include <linux/videodev2.h>
+-#include <linux/gpio.h>
+-#include <linux/workqueue.h>
+-#include <linux/hdmi.h>
+-#include <linux/v4l2-dv-timings.h>
+-#include <media/v4l2-device.h>
+-#include <media/v4l2-common.h>
+-#include <media/v4l2-ctrls.h>
+-#include <media/v4l2-dv-timings.h>
+-#include <media/i2c/adv7511.h>
+-#include <media/cec.h>
+-
+-static int debug;
+-module_param(debug, int, 0644);
+-MODULE_PARM_DESC(debug, "debug level (0-2)");
+-
+-MODULE_DESCRIPTION("Analog Devices ADV7511 HDMI Transmitter Device Driver");
+-MODULE_AUTHOR("Hans Verkuil");
+-MODULE_LICENSE("GPL v2");
+-
+-#define MASK_ADV7511_EDID_RDY_INT 0x04
+-#define MASK_ADV7511_MSEN_INT 0x40
+-#define MASK_ADV7511_HPD_INT 0x80
+-
+-#define MASK_ADV7511_HPD_DETECT 0x40
+-#define MASK_ADV7511_MSEN_DETECT 0x20
+-#define MASK_ADV7511_EDID_RDY 0x10
+-
+-#define EDID_MAX_RETRIES (8)
+-#define EDID_DELAY 250
+-#define EDID_MAX_SEGM 8
+-
+-#define ADV7511_MAX_WIDTH 1920
+-#define ADV7511_MAX_HEIGHT 1200
+-#define ADV7511_MIN_PIXELCLOCK 20000000
+-#define ADV7511_MAX_PIXELCLOCK 225000000
+-
+-#define ADV7511_MAX_ADDRS (3)
+-
+-/*
+-**********************************************************************
+-*
+-* Arrays with configuration parameters for the ADV7511
+-*
+-**********************************************************************
+-*/
+-
+-struct i2c_reg_value {
+- unsigned char reg;
+- unsigned char value;
+-};
+-
+-struct adv7511_state_edid {
+- /* total number of blocks */
+- u32 blocks;
+- /* Number of segments read */
+- u32 segments;
+- u8 data[EDID_MAX_SEGM * 256];
+- /* Number of EDID read retries left */
+- unsigned read_retries;
+- bool complete;
+-};
+-
+-struct adv7511_state {
+- struct adv7511_platform_data pdata;
+- struct v4l2_subdev sd;
+- struct media_pad pad;
+- struct v4l2_ctrl_handler hdl;
+- int chip_revision;
+- u8 i2c_edid_addr;
+- u8 i2c_pktmem_addr;
+- u8 i2c_cec_addr;
+-
+- struct i2c_client *i2c_cec;
+- struct cec_adapter *cec_adap;
+- u8 cec_addr[ADV7511_MAX_ADDRS];
+- u8 cec_valid_addrs;
+- bool cec_enabled_adap;
+-
+- /* Is the adv7511 powered on? */
+- bool power_on;
+- /* Did we receive hotplug and rx-sense signals? */
+- bool have_monitor;
+- bool enabled_irq;
+- /* timings from s_dv_timings */
+- struct v4l2_dv_timings dv_timings;
+- u32 fmt_code;
+- u32 colorspace;
+- u32 ycbcr_enc;
+- u32 quantization;
+- u32 xfer_func;
+- u32 content_type;
+- /* controls */
+- struct v4l2_ctrl *hdmi_mode_ctrl;
+- struct v4l2_ctrl *hotplug_ctrl;
+- struct v4l2_ctrl *rx_sense_ctrl;
+- struct v4l2_ctrl *have_edid0_ctrl;
+- struct v4l2_ctrl *rgb_quantization_range_ctrl;
+- struct v4l2_ctrl *content_type_ctrl;
+- struct i2c_client *i2c_edid;
+- struct i2c_client *i2c_pktmem;
+- struct adv7511_state_edid edid;
+- /* Running counter of the number of detected EDIDs (for debugging) */
+- unsigned edid_detect_counter;
+- struct workqueue_struct *work_queue;
+- struct delayed_work edid_handler; /* work entry */
+-};
+-
+-static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd);
+-static bool adv7511_check_edid_status(struct v4l2_subdev *sd);
+-static void adv7511_setup(struct v4l2_subdev *sd);
+-static int adv7511_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq);
+-static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
+-
+-
+-static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
+- .type = V4L2_DV_BT_656_1120,
+- /* keep this initialization for compatibility with GCC < 4.4.6 */
+- .reserved = { 0 },
+- V4L2_INIT_BT_TIMINGS(640, ADV7511_MAX_WIDTH, 350, ADV7511_MAX_HEIGHT,
+- ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
+- V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+- V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+- V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+- V4L2_DV_BT_CAP_CUSTOM)
+-};
+-
+-static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd)
+-{
+- return container_of(sd, struct adv7511_state, sd);
+-}
+-
+-static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+-{
+- return &container_of(ctrl->handler, struct adv7511_state, hdl)->sd;
+-}
+-
+-/* ------------------------ I2C ----------------------------------------------- */
+-
+-static s32 adv_smbus_read_byte_data_check(struct i2c_client *client,
+- u8 command, bool check)
+-{
+- union i2c_smbus_data data;
+-
+- if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+- I2C_SMBUS_READ, command,
+- I2C_SMBUS_BYTE_DATA, &data))
+- return data.byte;
+- if (check)
+- v4l_err(client, "error reading %02x, %02x\n",
+- client->addr, command);
+- return -1;
+-}
+-
+-static s32 adv_smbus_read_byte_data(struct i2c_client *client, u8 command)
+-{
+- int i;
+- for (i = 0; i < 3; i++) {
+- int ret = adv_smbus_read_byte_data_check(client, command, true);
+- if (ret >= 0) {
+- if (i)
+- v4l_err(client, "read ok after %d retries\n", i);
+- return ret;
+- }
+- }
+- v4l_err(client, "read failed\n");
+- return -1;
+-}
+-
+-static int adv7511_rd(struct v4l2_subdev *sd, u8 reg)
+-{
+- struct i2c_client *client = v4l2_get_subdevdata(sd);
+-
+- return adv_smbus_read_byte_data(client, reg);
+-}
+-
+-static int adv7511_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
+-{
+- struct i2c_client *client = v4l2_get_subdevdata(sd);
+- int ret;
+- int i;
+-
+- for (i = 0; i < 3; i++) {
+- ret = i2c_smbus_write_byte_data(client, reg, val);
+- if (ret == 0)
+- return 0;
+- }
+- v4l2_err(sd, "%s: i2c write error\n", __func__);
+- return ret;
+-}
+-
+-/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
+- and then the value-mask (to be OR-ed). */
+-static inline void adv7511_wr_and_or(struct v4l2_subdev *sd, u8 reg, u8 clr_mask, u8 val_mask)
+-{
+- adv7511_wr(sd, reg, (adv7511_rd(sd, reg) & clr_mask) | val_mask);
+-}
+-
+-static int adv_smbus_read_i2c_block_data(struct i2c_client *client,
+- u8 command, unsigned length, u8 *values)
+-{
+- union i2c_smbus_data data;
+- int ret;
+-
+- if (length > I2C_SMBUS_BLOCK_MAX)
+- length = I2C_SMBUS_BLOCK_MAX;
+- data.block[0] = length;
+-
+- ret = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+- I2C_SMBUS_READ, command,
+- I2C_SMBUS_I2C_BLOCK_DATA, &data);
+- memcpy(values, data.block + 1, length);
+- return ret;
+-}
+-
+-static void adv7511_edid_rd(struct v4l2_subdev *sd, uint16_t len, uint8_t *buf)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- int i;
+- int err = 0;
+-
+- v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+-
+- for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
+- err = adv_smbus_read_i2c_block_data(state->i2c_edid, i,
+- I2C_SMBUS_BLOCK_MAX, buf + i);
+- if (err)
+- v4l2_err(sd, "%s: i2c read error\n", __func__);
+-}
+-
+-static inline int adv7511_cec_read(struct v4l2_subdev *sd, u8 reg)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- return i2c_smbus_read_byte_data(state->i2c_cec, reg);
+-}
+-
+-static int adv7511_cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- int ret;
+- int i;
+-
+- for (i = 0; i < 3; i++) {
+- ret = i2c_smbus_write_byte_data(state->i2c_cec, reg, val);
+- if (ret == 0)
+- return 0;
+- }
+- v4l2_err(sd, "%s: I2C Write Problem\n", __func__);
+- return ret;
+-}
+-
+-static inline int adv7511_cec_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask,
+- u8 val)
+-{
+- return adv7511_cec_write(sd, reg, (adv7511_cec_read(sd, reg) & mask) | val);
+-}
+-
+-static int adv7511_pktmem_rd(struct v4l2_subdev *sd, u8 reg)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- return adv_smbus_read_byte_data(state->i2c_pktmem, reg);
+-}
+-
+-static int adv7511_pktmem_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- int ret;
+- int i;
+-
+- for (i = 0; i < 3; i++) {
+- ret = i2c_smbus_write_byte_data(state->i2c_pktmem, reg, val);
+- if (ret == 0)
+- return 0;
+- }
+- v4l2_err(sd, "%s: i2c write error\n", __func__);
+- return ret;
+-}
+-
+-/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
+- and then the value-mask (to be OR-ed). */
+-static inline void adv7511_pktmem_wr_and_or(struct v4l2_subdev *sd, u8 reg, u8 clr_mask, u8 val_mask)
+-{
+- adv7511_pktmem_wr(sd, reg, (adv7511_pktmem_rd(sd, reg) & clr_mask) | val_mask);
+-}
+-
+-static inline bool adv7511_have_hotplug(struct v4l2_subdev *sd)
+-{
+- return adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT;
+-}
+-
+-static inline bool adv7511_have_rx_sense(struct v4l2_subdev *sd)
+-{
+- return adv7511_rd(sd, 0x42) & MASK_ADV7511_MSEN_DETECT;
+-}
+-
+-static void adv7511_csc_conversion_mode(struct v4l2_subdev *sd, u8 mode)
+-{
+- adv7511_wr_and_or(sd, 0x18, 0x9f, (mode & 0x3)<<5);
+-}
+-
+-static void adv7511_csc_coeff(struct v4l2_subdev *sd,
+- u16 A1, u16 A2, u16 A3, u16 A4,
+- u16 B1, u16 B2, u16 B3, u16 B4,
+- u16 C1, u16 C2, u16 C3, u16 C4)
+-{
+- /* A */
+- adv7511_wr_and_or(sd, 0x18, 0xe0, A1>>8);
+- adv7511_wr(sd, 0x19, A1);
+- adv7511_wr_and_or(sd, 0x1A, 0xe0, A2>>8);
+- adv7511_wr(sd, 0x1B, A2);
+- adv7511_wr_and_or(sd, 0x1c, 0xe0, A3>>8);
+- adv7511_wr(sd, 0x1d, A3);
+- adv7511_wr_and_or(sd, 0x1e, 0xe0, A4>>8);
+- adv7511_wr(sd, 0x1f, A4);
+-
+- /* B */
+- adv7511_wr_and_or(sd, 0x20, 0xe0, B1>>8);
+- adv7511_wr(sd, 0x21, B1);
+- adv7511_wr_and_or(sd, 0x22, 0xe0, B2>>8);
+- adv7511_wr(sd, 0x23, B2);
+- adv7511_wr_and_or(sd, 0x24, 0xe0, B3>>8);
+- adv7511_wr(sd, 0x25, B3);
+- adv7511_wr_and_or(sd, 0x26, 0xe0, B4>>8);
+- adv7511_wr(sd, 0x27, B4);
+-
+- /* C */
+- adv7511_wr_and_or(sd, 0x28, 0xe0, C1>>8);
+- adv7511_wr(sd, 0x29, C1);
+- adv7511_wr_and_or(sd, 0x2A, 0xe0, C2>>8);
+- adv7511_wr(sd, 0x2B, C2);
+- adv7511_wr_and_or(sd, 0x2C, 0xe0, C3>>8);
+- adv7511_wr(sd, 0x2D, C3);
+- adv7511_wr_and_or(sd, 0x2E, 0xe0, C4>>8);
+- adv7511_wr(sd, 0x2F, C4);
+-}
+-
+-static void adv7511_csc_rgb_full2limit(struct v4l2_subdev *sd, bool enable)
+-{
+- if (enable) {
+- u8 csc_mode = 0;
+- adv7511_csc_conversion_mode(sd, csc_mode);
+- adv7511_csc_coeff(sd,
+- 4096-564, 0, 0, 256,
+- 0, 4096-564, 0, 256,
+- 0, 0, 4096-564, 256);
+- /* enable CSC */
+- adv7511_wr_and_or(sd, 0x18, 0x7f, 0x80);
+- /* AVI infoframe: Limited range RGB (16-235) */
+- adv7511_wr_and_or(sd, 0x57, 0xf3, 0x04);
+- } else {
+- /* disable CSC */
+- adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
+- /* AVI infoframe: Full range RGB (0-255) */
+- adv7511_wr_and_or(sd, 0x57, 0xf3, 0x08);
+- }
+-}
+-
+-static void adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l2_ctrl *ctrl)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- /* Only makes sense for RGB formats */
+- if (state->fmt_code != MEDIA_BUS_FMT_RGB888_1X24) {
+- /* so just keep quantization */
+- adv7511_csc_rgb_full2limit(sd, false);
+- return;
+- }
+-
+- switch (ctrl->val) {
+- case V4L2_DV_RGB_RANGE_AUTO:
+- /* automatic */
+- if (state->dv_timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
+- /* CE format, RGB limited range (16-235) */
+- adv7511_csc_rgb_full2limit(sd, true);
+- } else {
+- /* not CE format, RGB full range (0-255) */
+- adv7511_csc_rgb_full2limit(sd, false);
+- }
+- break;
+- case V4L2_DV_RGB_RANGE_LIMITED:
+- /* RGB limited range (16-235) */
+- adv7511_csc_rgb_full2limit(sd, true);
+- break;
+- case V4L2_DV_RGB_RANGE_FULL:
+- /* RGB full range (0-255) */
+- adv7511_csc_rgb_full2limit(sd, false);
+- break;
+- }
+-}
+-
+-/* ------------------------------ CTRL OPS ------------------------------ */
+-
+-static int adv7511_s_ctrl(struct v4l2_ctrl *ctrl)
+-{
+- struct v4l2_subdev *sd = to_sd(ctrl);
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- v4l2_dbg(1, debug, sd, "%s: ctrl id: %d, ctrl->val %d\n", __func__, ctrl->id, ctrl->val);
+-
+- if (state->hdmi_mode_ctrl == ctrl) {
+- /* Set HDMI or DVI-D */
+- adv7511_wr_and_or(sd, 0xaf, 0xfd, ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00);
+- return 0;
+- }
+- if (state->rgb_quantization_range_ctrl == ctrl) {
+- adv7511_set_rgb_quantization_mode(sd, ctrl);
+- return 0;
+- }
+- if (state->content_type_ctrl == ctrl) {
+- u8 itc, cn;
+-
+- state->content_type = ctrl->val;
+- itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
+- cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
+- adv7511_wr_and_or(sd, 0x57, 0x7f, itc << 7);
+- adv7511_wr_and_or(sd, 0x59, 0xcf, cn << 4);
+- return 0;
+- }
+-
+- return -EINVAL;
+-}
+-
+-static const struct v4l2_ctrl_ops adv7511_ctrl_ops = {
+- .s_ctrl = adv7511_s_ctrl,
+-};
+-
+-/* ---------------------------- CORE OPS ------------------------------------------- */
+-
+-#ifdef CONFIG_VIDEO_ADV_DEBUG
+-static void adv7511_inv_register(struct v4l2_subdev *sd)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- v4l2_info(sd, "0x000-0x0ff: Main Map\n");
+- if (state->i2c_cec)
+- v4l2_info(sd, "0x100-0x1ff: CEC Map\n");
+-}
+-
+-static int adv7511_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- reg->size = 1;
+- switch (reg->reg >> 8) {
+- case 0:
+- reg->val = adv7511_rd(sd, reg->reg & 0xff);
+- break;
+- case 1:
+- if (state->i2c_cec) {
+- reg->val = adv7511_cec_read(sd, reg->reg & 0xff);
+- break;
+- }
+- /* fall through */
+- default:
+- v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
+- adv7511_inv_register(sd);
+- break;
+- }
+- return 0;
+-}
+-
+-static int adv7511_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- switch (reg->reg >> 8) {
+- case 0:
+- adv7511_wr(sd, reg->reg & 0xff, reg->val & 0xff);
+- break;
+- case 1:
+- if (state->i2c_cec) {
+- adv7511_cec_write(sd, reg->reg & 0xff, reg->val & 0xff);
+- break;
+- }
+- /* fall through */
+- default:
+- v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
+- adv7511_inv_register(sd);
+- break;
+- }
+- return 0;
+-}
+-#endif
+-
+-struct adv7511_cfg_read_infoframe {
+- const char *desc;
+- u8 present_reg;
+- u8 present_mask;
+- u8 header[3];
+- u16 payload_addr;
+-};
+-
+-static u8 hdmi_infoframe_checksum(u8 *ptr, size_t size)
+-{
+- u8 csum = 0;
+- size_t i;
+-
+- /* compute checksum */
+- for (i = 0; i < size; i++)
+- csum += ptr[i];
+-
+- return 256 - csum;
+-}
+-
+-static void log_infoframe(struct v4l2_subdev *sd, const struct adv7511_cfg_read_infoframe *cri)
+-{
+- struct i2c_client *client = v4l2_get_subdevdata(sd);
+- struct device *dev = &client->dev;
+- union hdmi_infoframe frame;
+- u8 buffer[32];
+- u8 len;
+- int i;
+-
+- if (!(adv7511_rd(sd, cri->present_reg) & cri->present_mask)) {
+- v4l2_info(sd, "%s infoframe not transmitted\n", cri->desc);
+- return;
+- }
+-
+- memcpy(buffer, cri->header, sizeof(cri->header));
+-
+- len = buffer[2];
+-
+- if (len + 4 > sizeof(buffer)) {
+- v4l2_err(sd, "%s: invalid %s infoframe length %d\n", __func__, cri->desc, len);
+- return;
+- }
+-
+- if (cri->payload_addr >= 0x100) {
+- for (i = 0; i < len; i++)
+- buffer[i + 4] = adv7511_pktmem_rd(sd, cri->payload_addr + i - 0x100);
+- } else {
+- for (i = 0; i < len; i++)
+- buffer[i + 4] = adv7511_rd(sd, cri->payload_addr + i);
+- }
+- buffer[3] = 0;
+- buffer[3] = hdmi_infoframe_checksum(buffer, len + 4);
+-
+- if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
+- v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
+- return;
+- }
+-
+- hdmi_infoframe_log(KERN_INFO, dev, &frame);
+-}
+-
+-static void adv7511_log_infoframes(struct v4l2_subdev *sd)
+-{
+- static const struct adv7511_cfg_read_infoframe cri[] = {
+- { "AVI", 0x44, 0x10, { 0x82, 2, 13 }, 0x55 },
+- { "Audio", 0x44, 0x08, { 0x84, 1, 10 }, 0x73 },
+- { "SDP", 0x40, 0x40, { 0x83, 1, 25 }, 0x103 },
+- };
+- int i;
+-
+- for (i = 0; i < ARRAY_SIZE(cri); i++)
+- log_infoframe(sd, &cri[i]);
+-}
+-
+-static int adv7511_log_status(struct v4l2_subdev *sd)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- struct adv7511_state_edid *edid = &state->edid;
+- int i;
+-
+- static const char * const states[] = {
+- "in reset",
+- "reading EDID",
+- "idle",
+- "initializing HDCP",
+- "HDCP enabled",
+- "initializing HDCP repeater",
+- "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"
+- };
+- static const char * const errors[] = {
+- "no error",
+- "bad receiver BKSV",
+- "Ri mismatch",
+- "Pj mismatch",
+- "i2c error",
+- "timed out",
+- "max repeater cascade exceeded",
+- "hash check failed",
+- "too many devices",
+- "9", "A", "B", "C", "D", "E", "F"
+- };
+-
+- v4l2_info(sd, "power %s\n", state->power_on ? "on" : "off");
+- v4l2_info(sd, "%s hotplug, %s Rx Sense, %s EDID (%d block(s))\n",
+- (adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT) ? "detected" : "no",
+- (adv7511_rd(sd, 0x42) & MASK_ADV7511_MSEN_DETECT) ? "detected" : "no",
+- edid->segments ? "found" : "no",
+- edid->blocks);
+- v4l2_info(sd, "%s output %s\n",
+- (adv7511_rd(sd, 0xaf) & 0x02) ?
+- "HDMI" : "DVI-D",
+- (adv7511_rd(sd, 0xa1) & 0x3c) ?
+- "disabled" : "enabled");
+- v4l2_info(sd, "state: %s, error: %s, detect count: %u, msk/irq: %02x/%02x\n",
+- states[adv7511_rd(sd, 0xc8) & 0xf],
+- errors[adv7511_rd(sd, 0xc8) >> 4], state->edid_detect_counter,
+- adv7511_rd(sd, 0x94), adv7511_rd(sd, 0x96));
+- v4l2_info(sd, "RGB quantization: %s range\n", adv7511_rd(sd, 0x18) & 0x80 ? "limited" : "full");
+- if (adv7511_rd(sd, 0xaf) & 0x02) {
+- /* HDMI only */
+- u8 manual_cts = adv7511_rd(sd, 0x0a) & 0x80;
+- u32 N = (adv7511_rd(sd, 0x01) & 0xf) << 16 |
+- adv7511_rd(sd, 0x02) << 8 |
+- adv7511_rd(sd, 0x03);
+- u8 vic_detect = adv7511_rd(sd, 0x3e) >> 2;
+- u8 vic_sent = adv7511_rd(sd, 0x3d) & 0x3f;
+- u32 CTS;
+-
+- if (manual_cts)
+- CTS = (adv7511_rd(sd, 0x07) & 0xf) << 16 |
+- adv7511_rd(sd, 0x08) << 8 |
+- adv7511_rd(sd, 0x09);
+- else
+- CTS = (adv7511_rd(sd, 0x04) & 0xf) << 16 |
+- adv7511_rd(sd, 0x05) << 8 |
+- adv7511_rd(sd, 0x06);
+- v4l2_info(sd, "CTS %s mode: N %d, CTS %d\n",
+- manual_cts ? "manual" : "automatic", N, CTS);
+- v4l2_info(sd, "VIC: detected %d, sent %d\n",
+- vic_detect, vic_sent);
+- adv7511_log_infoframes(sd);
+- }
+- if (state->dv_timings.type == V4L2_DV_BT_656_1120)
+- v4l2_print_dv_timings(sd->name, "timings: ",
+- &state->dv_timings, false);
+- else
+- v4l2_info(sd, "no timings set\n");
+- v4l2_info(sd, "i2c edid addr: 0x%x\n", state->i2c_edid_addr);
+-
+- if (state->i2c_cec == NULL)
+- return 0;
+-
+- v4l2_info(sd, "i2c cec addr: 0x%x\n", state->i2c_cec_addr);
+-
+- v4l2_info(sd, "CEC: %s\n", state->cec_enabled_adap ?
+- "enabled" : "disabled");
+- if (state->cec_enabled_adap) {
+- for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
+- bool is_valid = state->cec_valid_addrs & (1 << i);
+-
+- if (is_valid)
+- v4l2_info(sd, "CEC Logical Address: 0x%x\n",
+- state->cec_addr[i]);
+- }
+- }
+- v4l2_info(sd, "i2c pktmem addr: 0x%x\n", state->i2c_pktmem_addr);
+- return 0;
+-}
+-
+-/* Power up/down adv7511 */
+-static int adv7511_s_power(struct v4l2_subdev *sd, int on)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- const int retries = 20;
+- int i;
+-
+- v4l2_dbg(1, debug, sd, "%s: power %s\n", __func__, on ? "on" : "off");
+-
+- state->power_on = on;
+-
+- if (!on) {
+- /* Power down */
+- adv7511_wr_and_or(sd, 0x41, 0xbf, 0x40);
+- return true;
+- }
+-
+- /* Power up */
+- /* The adv7511 does not always come up immediately.
+- Retry multiple times. */
+- for (i = 0; i < retries; i++) {
+- adv7511_wr_and_or(sd, 0x41, 0xbf, 0x0);
+- if ((adv7511_rd(sd, 0x41) & 0x40) == 0)
+- break;
+- adv7511_wr_and_or(sd, 0x41, 0xbf, 0x40);
+- msleep(10);
+- }
+- if (i == retries) {
+- v4l2_dbg(1, debug, sd, "%s: failed to powerup the adv7511!\n", __func__);
+- adv7511_s_power(sd, 0);
+- return false;
+- }
+- if (i > 1)
+- v4l2_dbg(1, debug, sd, "%s: needed %d retries to powerup the adv7511\n", __func__, i);
+-
+- /* Reserved registers that must be set */
+- adv7511_wr(sd, 0x98, 0x03);
+- adv7511_wr_and_or(sd, 0x9a, 0xfe, 0x70);
+- adv7511_wr(sd, 0x9c, 0x30);
+- adv7511_wr_and_or(sd, 0x9d, 0xfc, 0x01);
+- adv7511_wr(sd, 0xa2, 0xa4);
+- adv7511_wr(sd, 0xa3, 0xa4);
+- adv7511_wr(sd, 0xe0, 0xd0);
+- adv7511_wr(sd, 0xf9, 0x00);
+-
+- adv7511_wr(sd, 0x43, state->i2c_edid_addr);
+- adv7511_wr(sd, 0x45, state->i2c_pktmem_addr);
+-
+- /* Set number of attempts to read the EDID */
+- adv7511_wr(sd, 0xc9, 0xf);
+- return true;
+-}
+-
+-#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
+-static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
+-{
+- struct adv7511_state *state = cec_get_drvdata(adap);
+- struct v4l2_subdev *sd = &state->sd;
+-
+- if (state->i2c_cec == NULL)
+- return -EIO;
+-
+- if (!state->cec_enabled_adap && enable) {
+- /* power up cec section */
+- adv7511_cec_write_and_or(sd, 0x4e, 0xfc, 0x01);
+- /* legacy mode and clear all rx buffers */
+- adv7511_cec_write(sd, 0x4a, 0x00);
+- adv7511_cec_write(sd, 0x4a, 0x07);
+- adv7511_cec_write_and_or(sd, 0x11, 0xfe, 0); /* initially disable tx */
+- /* enabled irqs: */
+- /* tx: ready */
+- /* tx: arbitration lost */
+- /* tx: retry timeout */
+- /* rx: ready 1 */
+- if (state->enabled_irq)
+- adv7511_wr_and_or(sd, 0x95, 0xc0, 0x39);
+- } else if (state->cec_enabled_adap && !enable) {
+- if (state->enabled_irq)
+- adv7511_wr_and_or(sd, 0x95, 0xc0, 0x00);
+- /* disable address mask 1-3 */
+- adv7511_cec_write_and_or(sd, 0x4b, 0x8f, 0x00);
+- /* power down cec section */
+- adv7511_cec_write_and_or(sd, 0x4e, 0xfc, 0x00);
+- state->cec_valid_addrs = 0;
+- }
+- state->cec_enabled_adap = enable;
+- return 0;
+-}
+-
+-static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+-{
+- struct adv7511_state *state = cec_get_drvdata(adap);
+- struct v4l2_subdev *sd = &state->sd;
+- unsigned int i, free_idx = ADV7511_MAX_ADDRS;
+-
+- if (!state->cec_enabled_adap)
+- return addr == CEC_LOG_ADDR_INVALID ? 0 : -EIO;
+-
+- if (addr == CEC_LOG_ADDR_INVALID) {
+- adv7511_cec_write_and_or(sd, 0x4b, 0x8f, 0);
+- state->cec_valid_addrs = 0;
+- return 0;
+- }
+-
+- for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
+- bool is_valid = state->cec_valid_addrs & (1 << i);
+-
+- if (free_idx == ADV7511_MAX_ADDRS && !is_valid)
+- free_idx = i;
+- if (is_valid && state->cec_addr[i] == addr)
+- return 0;
+- }
+- if (i == ADV7511_MAX_ADDRS) {
+- i = free_idx;
+- if (i == ADV7511_MAX_ADDRS)
+- return -ENXIO;
+- }
+- state->cec_addr[i] = addr;
+- state->cec_valid_addrs |= 1 << i;
+-
+- switch (i) {
+- case 0:
+- /* enable address mask 0 */
+- adv7511_cec_write_and_or(sd, 0x4b, 0xef, 0x10);
+- /* set address for mask 0 */
+- adv7511_cec_write_and_or(sd, 0x4c, 0xf0, addr);
+- break;
+- case 1:
+- /* enable address mask 1 */
+- adv7511_cec_write_and_or(sd, 0x4b, 0xdf, 0x20);
+- /* set address for mask 1 */
+- adv7511_cec_write_and_or(sd, 0x4c, 0x0f, addr << 4);
+- break;
+- case 2:
+- /* enable address mask 2 */
+- adv7511_cec_write_and_or(sd, 0x4b, 0xbf, 0x40);
+- /* set address for mask 1 */
+- adv7511_cec_write_and_or(sd, 0x4d, 0xf0, addr);
+- break;
+- }
+- return 0;
+-}
+-
+-static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+- u32 signal_free_time, struct cec_msg *msg)
+-{
+- struct adv7511_state *state = cec_get_drvdata(adap);
+- struct v4l2_subdev *sd = &state->sd;
+- u8 len = msg->len;
+- unsigned int i;
+-
+- v4l2_dbg(1, debug, sd, "%s: len %d\n", __func__, len);
+-
+- if (len > 16) {
+- v4l2_err(sd, "%s: len exceeded 16 (%d)\n", __func__, len);
+- return -EINVAL;
+- }
+-
+- /*
+- * The number of retries is the number of attempts - 1, but retry
+- * at least once. It's not clear if a value of 0 is allowed, so
+- * let's do at least one retry.
+- */
+- adv7511_cec_write_and_or(sd, 0x12, ~0x70, max(1, attempts - 1) << 4);
+-
+- /* clear cec tx irq status */
+- adv7511_wr(sd, 0x97, 0x38);
+-
+- /* write data */
+- for (i = 0; i < len; i++)
+- adv7511_cec_write(sd, i, msg->msg[i]);
+-
+- /* set length (data + header) */
+- adv7511_cec_write(sd, 0x10, len);
+- /* start transmit, enable tx */
+- adv7511_cec_write(sd, 0x11, 0x01);
+- return 0;
+-}
+-
+-static void adv_cec_tx_raw_status(struct v4l2_subdev *sd, u8 tx_raw_status)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- if ((adv7511_cec_read(sd, 0x11) & 0x01) == 0) {
+- v4l2_dbg(1, debug, sd, "%s: tx raw: tx disabled\n", __func__);
+- return;
+- }
+-
+- if (tx_raw_status & 0x10) {
+- v4l2_dbg(1, debug, sd,
+- "%s: tx raw: arbitration lost\n", __func__);
+- cec_transmit_done(state->cec_adap, CEC_TX_STATUS_ARB_LOST,
+- 1, 0, 0, 0);
+- return;
+- }
+- if (tx_raw_status & 0x08) {
+- u8 status;
+- u8 nack_cnt;
+- u8 low_drive_cnt;
+-
+- v4l2_dbg(1, debug, sd, "%s: tx raw: retry failed\n", __func__);
+- /*
+- * We set this status bit since this hardware performs
+- * retransmissions.
+- */
+- status = CEC_TX_STATUS_MAX_RETRIES;
+- nack_cnt = adv7511_cec_read(sd, 0x14) & 0xf;
+- if (nack_cnt)
+- status |= CEC_TX_STATUS_NACK;
+- low_drive_cnt = adv7511_cec_read(sd, 0x14) >> 4;
+- if (low_drive_cnt)
+- status |= CEC_TX_STATUS_LOW_DRIVE;
+- cec_transmit_done(state->cec_adap, status,
+- 0, nack_cnt, low_drive_cnt, 0);
+- return;
+- }
+- if (tx_raw_status & 0x20) {
+- v4l2_dbg(1, debug, sd, "%s: tx raw: ready ok\n", __func__);
+- cec_transmit_done(state->cec_adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+- return;
+- }
+-}
+-
+-static const struct cec_adap_ops adv7511_cec_adap_ops = {
+- .adap_enable = adv7511_cec_adap_enable,
+- .adap_log_addr = adv7511_cec_adap_log_addr,
+- .adap_transmit = adv7511_cec_adap_transmit,
+-};
+-#endif
+-
+-/* Enable interrupts */
+-static void adv7511_set_isr(struct v4l2_subdev *sd, bool enable)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- u8 irqs = MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT;
+- u8 irqs_rd;
+- int retries = 100;
+-
+- v4l2_dbg(2, debug, sd, "%s: %s\n", __func__, enable ? "enable" : "disable");
+-
+- if (state->enabled_irq == enable)
+- return;
+- state->enabled_irq = enable;
+-
+- /* The datasheet says that the EDID ready interrupt should be
+- disabled if there is no hotplug. */
+- if (!enable)
+- irqs = 0;
+- else if (adv7511_have_hotplug(sd))
+- irqs |= MASK_ADV7511_EDID_RDY_INT;
+-
+- /*
+- * This i2c write can fail (approx. 1 in 1000 writes). But it
+- * is essential that this register is correct, so retry it
+- * multiple times.
+- *
+- * Note that the i2c write does not report an error, but the readback
+- * clearly shows the wrong value.
+- */
+- do {
+- adv7511_wr(sd, 0x94, irqs);
+- irqs_rd = adv7511_rd(sd, 0x94);
+- } while (retries-- && irqs_rd != irqs);
+-
+- if (irqs_rd != irqs)
+- v4l2_err(sd, "Could not set interrupts: hw failure?\n");
+-
+- adv7511_wr_and_or(sd, 0x95, 0xc0,
+- (state->cec_enabled_adap && enable) ? 0x39 : 0x00);
+-}
+-
+-/* Interrupt handler */
+-static int adv7511_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
+-{
+- u8 irq_status;
+- u8 cec_irq;
+-
+- /* disable interrupts to prevent a race condition */
+- adv7511_set_isr(sd, false);
+- irq_status = adv7511_rd(sd, 0x96);
+- cec_irq = adv7511_rd(sd, 0x97);
+- /* clear detected interrupts */
+- adv7511_wr(sd, 0x96, irq_status);
+- adv7511_wr(sd, 0x97, cec_irq);
+-
+- v4l2_dbg(1, debug, sd, "%s: irq 0x%x, cec-irq 0x%x\n", __func__,
+- irq_status, cec_irq);
+-
+- if (irq_status & (MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT))
+- adv7511_check_monitor_present_status(sd);
+- if (irq_status & MASK_ADV7511_EDID_RDY_INT)
+- adv7511_check_edid_status(sd);
+-
+-#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
+- if (cec_irq & 0x38)
+- adv_cec_tx_raw_status(sd, cec_irq);
+-
+- if (cec_irq & 1) {
+- struct adv7511_state *state = get_adv7511_state(sd);
+- struct cec_msg msg;
+-
+- msg.len = adv7511_cec_read(sd, 0x25) & 0x1f;
+-
+- v4l2_dbg(1, debug, sd, "%s: cec msg len %d\n", __func__,
+- msg.len);
+-
+- if (msg.len > 16)
+- msg.len = 16;
+-
+- if (msg.len) {
+- u8 i;
+-
+- for (i = 0; i < msg.len; i++)
+- msg.msg[i] = adv7511_cec_read(sd, i + 0x15);
+-
+- adv7511_cec_write(sd, 0x4a, 0); /* toggle to re-enable rx 1 */
+- adv7511_cec_write(sd, 0x4a, 1);
+- cec_received_msg(state->cec_adap, &msg);
+- }
+- }
+-#endif
+-
+- /* enable interrupts */
+- adv7511_set_isr(sd, true);
+-
+- if (handled)
+- *handled = true;
+- return 0;
+-}
+-
+-static const struct v4l2_subdev_core_ops adv7511_core_ops = {
+- .log_status = adv7511_log_status,
+-#ifdef CONFIG_VIDEO_ADV_DEBUG
+- .g_register = adv7511_g_register,
+- .s_register = adv7511_s_register,
+-#endif
+- .s_power = adv7511_s_power,
+- .interrupt_service_routine = adv7511_isr,
+-};
+-
+-/* ------------------------------ VIDEO OPS ------------------------------ */
+-
+-/* Enable/disable adv7511 output */
+-static int adv7511_s_stream(struct v4l2_subdev *sd, int enable)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
+- adv7511_wr_and_or(sd, 0xa1, ~0x3c, (enable ? 0 : 0x3c));
+- if (enable) {
+- adv7511_check_monitor_present_status(sd);
+- } else {
+- adv7511_s_power(sd, 0);
+- state->have_monitor = false;
+- }
+- return 0;
+-}
+-
+-static int adv7511_s_dv_timings(struct v4l2_subdev *sd,
+- struct v4l2_dv_timings *timings)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- struct v4l2_bt_timings *bt = &timings->bt;
+- u32 fps;
+-
+- v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+-
+- /* quick sanity check */
+- if (!v4l2_valid_dv_timings(timings, &adv7511_timings_cap, NULL, NULL))
+- return -EINVAL;
+-
+- /* Fill the optional fields .standards and .flags in struct v4l2_dv_timings
+- if the format is one of the CEA or DMT timings. */
+- v4l2_find_dv_timings_cap(timings, &adv7511_timings_cap, 0, NULL, NULL);
+-
+- /* save timings */
+- state->dv_timings = *timings;
+-
+- /* set h/vsync polarities */
+- adv7511_wr_and_or(sd, 0x17, 0x9f,
+- ((bt->polarities & V4L2_DV_VSYNC_POS_POL) ? 0 : 0x40) |
+- ((bt->polarities & V4L2_DV_HSYNC_POS_POL) ? 0 : 0x20));
+-
+- fps = (u32)bt->pixelclock / (V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt));
+- switch (fps) {
+- case 24:
+- adv7511_wr_and_or(sd, 0xfb, 0xf9, 1 << 1);
+- break;
+- case 25:
+- adv7511_wr_and_or(sd, 0xfb, 0xf9, 2 << 1);
+- break;
+- case 30:
+- adv7511_wr_and_or(sd, 0xfb, 0xf9, 3 << 1);
+- break;
+- default:
+- adv7511_wr_and_or(sd, 0xfb, 0xf9, 0);
+- break;
+- }
+-
+- /* update quantization range based on new dv_timings */
+- adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
+-
+- return 0;
+-}
+-
+-static int adv7511_g_dv_timings(struct v4l2_subdev *sd,
+- struct v4l2_dv_timings *timings)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+-
+- if (!timings)
+- return -EINVAL;
+-
+- *timings = state->dv_timings;
+-
+- return 0;
+-}
+-
+-static int adv7511_enum_dv_timings(struct v4l2_subdev *sd,
+- struct v4l2_enum_dv_timings *timings)
+-{
+- if (timings->pad != 0)
+- return -EINVAL;
+-
+- return v4l2_enum_dv_timings_cap(timings, &adv7511_timings_cap, NULL, NULL);
+-}
+-
+-static int adv7511_dv_timings_cap(struct v4l2_subdev *sd,
+- struct v4l2_dv_timings_cap *cap)
+-{
+- if (cap->pad != 0)
+- return -EINVAL;
+-
+- *cap = adv7511_timings_cap;
+- return 0;
+-}
+-
+-static const struct v4l2_subdev_video_ops adv7511_video_ops = {
+- .s_stream = adv7511_s_stream,
+- .s_dv_timings = adv7511_s_dv_timings,
+- .g_dv_timings = adv7511_g_dv_timings,
+-};
+-
+-/* ------------------------------ AUDIO OPS ------------------------------ */
+-static int adv7511_s_audio_stream(struct v4l2_subdev *sd, int enable)
+-{
+- v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
+-
+- if (enable)
+- adv7511_wr_and_or(sd, 0x4b, 0x3f, 0x80);
+- else
+- adv7511_wr_and_or(sd, 0x4b, 0x3f, 0x40);
+-
+- return 0;
+-}
+-
+-static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
+-{
+- u32 N;
+-
+- switch (freq) {
+- case 32000: N = 4096; break;
+- case 44100: N = 6272; break;
+- case 48000: N = 6144; break;
+- case 88200: N = 12544; break;
+- case 96000: N = 12288; break;
+- case 176400: N = 25088; break;
+- case 192000: N = 24576; break;
+- default:
+- return -EINVAL;
+- }
+-
+- /* Set N (used with CTS to regenerate the audio clock) */
+- adv7511_wr(sd, 0x01, (N >> 16) & 0xf);
+- adv7511_wr(sd, 0x02, (N >> 8) & 0xff);
+- adv7511_wr(sd, 0x03, N & 0xff);
+-
+- return 0;
+-}
+-
+-static int adv7511_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq)
+-{
+- u32 i2s_sf;
+-
+- switch (freq) {
+- case 32000: i2s_sf = 0x30; break;
+- case 44100: i2s_sf = 0x00; break;
+- case 48000: i2s_sf = 0x20; break;
+- case 88200: i2s_sf = 0x80; break;
+- case 96000: i2s_sf = 0xa0; break;
+- case 176400: i2s_sf = 0xc0; break;
+- case 192000: i2s_sf = 0xe0; break;
+- default:
+- return -EINVAL;
+- }
+-
+- /* Set sampling frequency for I2S audio to 48 kHz */
+- adv7511_wr_and_or(sd, 0x15, 0xf, i2s_sf);
+-
+- return 0;
+-}
+-
+-static int adv7511_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config)
+-{
+- /* Only 2 channels in use for application */
+- adv7511_wr_and_or(sd, 0x73, 0xf8, 0x1);
+- /* Speaker mapping */
+- adv7511_wr(sd, 0x76, 0x00);
+-
+- /* 16 bit audio word length */
+- adv7511_wr_and_or(sd, 0x14, 0xf0, 0x02);
+-
+- return 0;
+-}
+-
+-static const struct v4l2_subdev_audio_ops adv7511_audio_ops = {
+- .s_stream = adv7511_s_audio_stream,
+- .s_clock_freq = adv7511_s_clock_freq,
+- .s_i2s_clock_freq = adv7511_s_i2s_clock_freq,
+- .s_routing = adv7511_s_routing,
+-};
+-
+-/* ---------------------------- PAD OPS ------------------------------------- */
+-
+-static int adv7511_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- memset(edid->reserved, 0, sizeof(edid->reserved));
+-
+- if (edid->pad != 0)
+- return -EINVAL;
+-
+- if (edid->start_block == 0 && edid->blocks == 0) {
+- edid->blocks = state->edid.segments * 2;
+- return 0;
+- }
+-
+- if (state->edid.segments == 0)
+- return -ENODATA;
+-
+- if (edid->start_block >= state->edid.segments * 2)
+- return -EINVAL;
+-
+- if (edid->start_block + edid->blocks > state->edid.segments * 2)
+- edid->blocks = state->edid.segments * 2 - edid->start_block;
+-
+- memcpy(edid->edid, &state->edid.data[edid->start_block * 128],
+- 128 * edid->blocks);
+-
+- return 0;
+-}
+-
+-static int adv7511_enum_mbus_code(struct v4l2_subdev *sd,
+- struct v4l2_subdev_pad_config *cfg,
+- struct v4l2_subdev_mbus_code_enum *code)
+-{
+- if (code->pad != 0)
+- return -EINVAL;
+-
+- switch (code->index) {
+- case 0:
+- code->code = MEDIA_BUS_FMT_RGB888_1X24;
+- break;
+- case 1:
+- code->code = MEDIA_BUS_FMT_YUYV8_1X16;
+- break;
+- case 2:
+- code->code = MEDIA_BUS_FMT_UYVY8_1X16;
+- break;
+- default:
+- return -EINVAL;
+- }
+- return 0;
+-}
+-
+-static void adv7511_fill_format(struct adv7511_state *state,
+- struct v4l2_mbus_framefmt *format)
+-{
+- format->width = state->dv_timings.bt.width;
+- format->height = state->dv_timings.bt.height;
+- format->field = V4L2_FIELD_NONE;
+-}
+-
+-static int adv7511_get_fmt(struct v4l2_subdev *sd,
+- struct v4l2_subdev_pad_config *cfg,
+- struct v4l2_subdev_format *format)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- if (format->pad != 0)
+- return -EINVAL;
+-
+- memset(&format->format, 0, sizeof(format->format));
+- adv7511_fill_format(state, &format->format);
+-
+- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+- struct v4l2_mbus_framefmt *fmt;
+-
+- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+- format->format.code = fmt->code;
+- format->format.colorspace = fmt->colorspace;
+- format->format.ycbcr_enc = fmt->ycbcr_enc;
+- format->format.quantization = fmt->quantization;
+- format->format.xfer_func = fmt->xfer_func;
+- } else {
+- format->format.code = state->fmt_code;
+- format->format.colorspace = state->colorspace;
+- format->format.ycbcr_enc = state->ycbcr_enc;
+- format->format.quantization = state->quantization;
+- format->format.xfer_func = state->xfer_func;
+- }
+-
+- return 0;
+-}
+-
+-static int adv7511_set_fmt(struct v4l2_subdev *sd,
+- struct v4l2_subdev_pad_config *cfg,
+- struct v4l2_subdev_format *format)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- /*
+- * Bitfield namings come the CEA-861-F standard, table 8 "Auxiliary
+- * Video Information (AVI) InfoFrame Format"
+- *
+- * c = Colorimetry
+- * ec = Extended Colorimetry
+- * y = RGB or YCbCr
+- * q = RGB Quantization Range
+- * yq = YCC Quantization Range
+- */
+- u8 c = HDMI_COLORIMETRY_NONE;
+- u8 ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+- u8 y = HDMI_COLORSPACE_RGB;
+- u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT;
+- u8 yq = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+- u8 itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
+- u8 cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
+-
+- if (format->pad != 0)
+- return -EINVAL;
+- switch (format->format.code) {
+- case MEDIA_BUS_FMT_UYVY8_1X16:
+- case MEDIA_BUS_FMT_YUYV8_1X16:
+- case MEDIA_BUS_FMT_RGB888_1X24:
+- break;
+- default:
+- return -EINVAL;
+- }
+-
+- adv7511_fill_format(state, &format->format);
+- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+- struct v4l2_mbus_framefmt *fmt;
+-
+- fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
+- fmt->code = format->format.code;
+- fmt->colorspace = format->format.colorspace;
+- fmt->ycbcr_enc = format->format.ycbcr_enc;
+- fmt->quantization = format->format.quantization;
+- fmt->xfer_func = format->format.xfer_func;
+- return 0;
+- }
+-
+- switch (format->format.code) {
+- case MEDIA_BUS_FMT_UYVY8_1X16:
+- adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
+- adv7511_wr_and_or(sd, 0x16, 0x03, 0xb8);
+- y = HDMI_COLORSPACE_YUV422;
+- break;
+- case MEDIA_BUS_FMT_YUYV8_1X16:
+- adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
+- adv7511_wr_and_or(sd, 0x16, 0x03, 0xbc);
+- y = HDMI_COLORSPACE_YUV422;
+- break;
+- case MEDIA_BUS_FMT_RGB888_1X24:
+- default:
+- adv7511_wr_and_or(sd, 0x15, 0xf0, 0x00);
+- adv7511_wr_and_or(sd, 0x16, 0x03, 0x00);
+- break;
+- }
+- state->fmt_code = format->format.code;
+- state->colorspace = format->format.colorspace;
+- state->ycbcr_enc = format->format.ycbcr_enc;
+- state->quantization = format->format.quantization;
+- state->xfer_func = format->format.xfer_func;
+-
+- switch (format->format.colorspace) {
+- case V4L2_COLORSPACE_OPRGB:
+- c = HDMI_COLORIMETRY_EXTENDED;
+- ec = y ? HDMI_EXTENDED_COLORIMETRY_OPYCC_601 :
+- HDMI_EXTENDED_COLORIMETRY_OPRGB;
+- break;
+- case V4L2_COLORSPACE_SMPTE170M:
+- c = y ? HDMI_COLORIMETRY_ITU_601 : HDMI_COLORIMETRY_NONE;
+- if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV601) {
+- c = HDMI_COLORIMETRY_EXTENDED;
+- ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+- }
+- break;
+- case V4L2_COLORSPACE_REC709:
+- c = y ? HDMI_COLORIMETRY_ITU_709 : HDMI_COLORIMETRY_NONE;
+- if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV709) {
+- c = HDMI_COLORIMETRY_EXTENDED;
+- ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
+- }
+- break;
+- case V4L2_COLORSPACE_SRGB:
+- c = y ? HDMI_COLORIMETRY_EXTENDED : HDMI_COLORIMETRY_NONE;
+- ec = y ? HDMI_EXTENDED_COLORIMETRY_S_YCC_601 :
+- HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+- break;
+- case V4L2_COLORSPACE_BT2020:
+- c = HDMI_COLORIMETRY_EXTENDED;
+- if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
+- ec = 5; /* Not yet available in hdmi.h */
+- else
+- ec = 6; /* Not yet available in hdmi.h */
+- break;
+- default:
+- break;
+- }
+-
+- /*
+- * CEA-861-F says that for RGB formats the YCC range must match the
+- * RGB range, although sources should ignore the YCC range.
+- *
+- * The RGB quantization range shouldn't be non-zero if the EDID doesn't
+- * have the Q bit set in the Video Capabilities Data Block, however this
+- * isn't checked at the moment. The assumption is that the application
+- * knows the EDID and can detect this.
+- *
+- * The same is true for the YCC quantization range: non-standard YCC
+- * quantization ranges should only be sent if the EDID has the YQ bit
+- * set in the Video Capabilities Data Block.
+- */
+- switch (format->format.quantization) {
+- case V4L2_QUANTIZATION_FULL_RANGE:
+- q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
+- HDMI_QUANTIZATION_RANGE_FULL;
+- yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_FULL;
+- break;
+- case V4L2_QUANTIZATION_LIM_RANGE:
+- q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
+- HDMI_QUANTIZATION_RANGE_LIMITED;
+- yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+- break;
+- }
+-
+- adv7511_wr_and_or(sd, 0x4a, 0xbf, 0);
+- adv7511_wr_and_or(sd, 0x55, 0x9f, y << 5);
+- adv7511_wr_and_or(sd, 0x56, 0x3f, c << 6);
+- adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2) | (itc << 7));
+- adv7511_wr_and_or(sd, 0x59, 0x0f, (yq << 6) | (cn << 4));
+- adv7511_wr_and_or(sd, 0x4a, 0xff, 1);
+- adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
+-
+- return 0;
+-}
+-
+-static const struct v4l2_subdev_pad_ops adv7511_pad_ops = {
+- .get_edid = adv7511_get_edid,
+- .enum_mbus_code = adv7511_enum_mbus_code,
+- .get_fmt = adv7511_get_fmt,
+- .set_fmt = adv7511_set_fmt,
+- .enum_dv_timings = adv7511_enum_dv_timings,
+- .dv_timings_cap = adv7511_dv_timings_cap,
+-};
+-
+-/* --------------------- SUBDEV OPS --------------------------------------- */
+-
+-static const struct v4l2_subdev_ops adv7511_ops = {
+- .core = &adv7511_core_ops,
+- .pad = &adv7511_pad_ops,
+- .video = &adv7511_video_ops,
+- .audio = &adv7511_audio_ops,
+-};
+-
+-/* ----------------------------------------------------------------------- */
+-static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, int segment, u8 *buf)
+-{
+- if (debug >= lvl) {
+- int i, j;
+- v4l2_dbg(lvl, debug, sd, "edid segment %d\n", segment);
+- for (i = 0; i < 256; i += 16) {
+- u8 b[128];
+- u8 *bp = b;
+- if (i == 128)
+- v4l2_dbg(lvl, debug, sd, "\n");
+- for (j = i; j < i + 16; j++) {
+- sprintf(bp, "0x%02x, ", buf[j]);
+- bp += 6;
+- }
+- bp[0] = '\0';
+- v4l2_dbg(lvl, debug, sd, "%s\n", b);
+- }
+- }
+-}
+-
+-static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- struct adv7511_edid_detect ed;
+-
+- /* We failed to read the EDID, so send an event for this. */
+- ed.present = false;
+- ed.segment = adv7511_rd(sd, 0xc4);
+- ed.phys_addr = CEC_PHYS_ADDR_INVALID;
+- cec_s_phys_addr(state->cec_adap, ed.phys_addr, false);
+- v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+- v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
+-}
+-
+-static void adv7511_edid_handler(struct work_struct *work)
+-{
+- struct delayed_work *dwork = to_delayed_work(work);
+- struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
+- struct v4l2_subdev *sd = &state->sd;
+-
+- v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+-
+- if (adv7511_check_edid_status(sd)) {
+- /* Return if we received the EDID. */
+- return;
+- }
+-
+- if (adv7511_have_hotplug(sd)) {
+- /* We must retry reading the EDID several times, it is possible
+- * that initially the EDID couldn't be read due to i2c errors
+- * (DVI connectors are particularly prone to this problem). */
+- if (state->edid.read_retries) {
+- state->edid.read_retries--;
+- v4l2_dbg(1, debug, sd, "%s: edid read failed\n", __func__);
+- state->have_monitor = false;
+- adv7511_s_power(sd, false);
+- adv7511_s_power(sd, true);
+- queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
+- return;
+- }
+- }
+-
+- /* We failed to read the EDID, so send an event for this. */
+- adv7511_notify_no_edid(sd);
+- v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
+-}
+-
+-static void adv7511_audio_setup(struct v4l2_subdev *sd)
+-{
+- v4l2_dbg(1, debug, sd, "%s\n", __func__);
+-
+- adv7511_s_i2s_clock_freq(sd, 48000);
+- adv7511_s_clock_freq(sd, 48000);
+- adv7511_s_routing(sd, 0, 0, 0);
+-}
+-
+-/* Configure hdmi transmitter. */
+-static void adv7511_setup(struct v4l2_subdev *sd)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- v4l2_dbg(1, debug, sd, "%s\n", __func__);
+-
+- /* Input format: RGB 4:4:4 */
+- adv7511_wr_and_or(sd, 0x15, 0xf0, 0x0);
+- /* Output format: RGB 4:4:4 */
+- adv7511_wr_and_or(sd, 0x16, 0x7f, 0x0);
+- /* 1st order interpolation 4:2:2 -> 4:4:4 up conversion, Aspect ratio: 16:9 */
+- adv7511_wr_and_or(sd, 0x17, 0xf9, 0x06);
+- /* Disable pixel repetition */
+- adv7511_wr_and_or(sd, 0x3b, 0x9f, 0x0);
+- /* Disable CSC */
+- adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
+- /* Output format: RGB 4:4:4, Active Format Information is valid,
+- * underscanned */
+- adv7511_wr_and_or(sd, 0x55, 0x9c, 0x12);
+- /* AVI Info frame packet enable, Audio Info frame disable */
+- adv7511_wr_and_or(sd, 0x44, 0xe7, 0x10);
+- /* Colorimetry, Active format aspect ratio: same as picure. */
+- adv7511_wr(sd, 0x56, 0xa8);
+- /* No encryption */
+- adv7511_wr_and_or(sd, 0xaf, 0xed, 0x0);
+-
+- /* Positive clk edge capture for input video clock */
+- adv7511_wr_and_or(sd, 0xba, 0x1f, 0x60);
+-
+- adv7511_audio_setup(sd);
+-
+- v4l2_ctrl_handler_setup(&state->hdl);
+-}
+-
+-static void adv7511_notify_monitor_detect(struct v4l2_subdev *sd)
+-{
+- struct adv7511_monitor_detect mdt;
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- mdt.present = state->have_monitor;
+- v4l2_subdev_notify(sd, ADV7511_MONITOR_DETECT, (void *)&mdt);
+-}
+-
+-static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- /* read hotplug and rx-sense state */
+- u8 status = adv7511_rd(sd, 0x42);
+-
+- v4l2_dbg(1, debug, sd, "%s: status: 0x%x%s%s\n",
+- __func__,
+- status,
+- status & MASK_ADV7511_HPD_DETECT ? ", hotplug" : "",
+- status & MASK_ADV7511_MSEN_DETECT ? ", rx-sense" : "");
+-
+- /* update read only ctrls */
+- v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
+- v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
+-
+- if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
+- v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
+- if (!state->have_monitor) {
+- v4l2_dbg(1, debug, sd, "%s: monitor detected\n", __func__);
+- state->have_monitor = true;
+- adv7511_set_isr(sd, true);
+- if (!adv7511_s_power(sd, true)) {
+- v4l2_dbg(1, debug, sd, "%s: monitor detected, powerup failed\n", __func__);
+- return;
+- }
+- adv7511_setup(sd);
+- adv7511_notify_monitor_detect(sd);
+- state->edid.read_retries = EDID_MAX_RETRIES;
+- queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
+- }
+- } else if (status & MASK_ADV7511_HPD_DETECT) {
+- v4l2_dbg(1, debug, sd, "%s: hotplug detected\n", __func__);
+- state->edid.read_retries = EDID_MAX_RETRIES;
+- queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
+- } else if (!(status & MASK_ADV7511_HPD_DETECT)) {
+- v4l2_dbg(1, debug, sd, "%s: hotplug not detected\n", __func__);
+- if (state->have_monitor) {
+- v4l2_dbg(1, debug, sd, "%s: monitor not detected\n", __func__);
+- state->have_monitor = false;
+- adv7511_notify_monitor_detect(sd);
+- }
+- adv7511_s_power(sd, false);
+- memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
+- adv7511_notify_no_edid(sd);
+- }
+-}
+-
+-static bool edid_block_verify_crc(u8 *edid_block)
+-{
+- u8 sum = 0;
+- int i;
+-
+- for (i = 0; i < 128; i++)
+- sum += edid_block[i];
+- return sum == 0;
+-}
+-
+-static bool edid_verify_crc(struct v4l2_subdev *sd, u32 segment)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- u32 blocks = state->edid.blocks;
+- u8 *data = state->edid.data;
+-
+- if (!edid_block_verify_crc(&data[segment * 256]))
+- return false;
+- if ((segment + 1) * 2 <= blocks)
+- return edid_block_verify_crc(&data[segment * 256 + 128]);
+- return true;
+-}
+-
+-static bool edid_verify_header(struct v4l2_subdev *sd, u32 segment)
+-{
+- static const u8 hdmi_header[] = {
+- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
+- };
+- struct adv7511_state *state = get_adv7511_state(sd);
+- u8 *data = state->edid.data;
+-
+- if (segment != 0)
+- return true;
+- return !memcmp(data, hdmi_header, sizeof(hdmi_header));
+-}
+-
+-static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- u8 edidRdy = adv7511_rd(sd, 0xc5);
+-
+- v4l2_dbg(1, debug, sd, "%s: edid ready (retries: %d)\n",
+- __func__, EDID_MAX_RETRIES - state->edid.read_retries);
+-
+- if (state->edid.complete)
+- return true;
+-
+- if (edidRdy & MASK_ADV7511_EDID_RDY) {
+- int segment = adv7511_rd(sd, 0xc4);
+- struct adv7511_edid_detect ed;
+-
+- if (segment >= EDID_MAX_SEGM) {
+- v4l2_err(sd, "edid segment number too big\n");
+- return false;
+- }
+- v4l2_dbg(1, debug, sd, "%s: got segment %d\n", __func__, segment);
+- adv7511_edid_rd(sd, 256, &state->edid.data[segment * 256]);
+- adv7511_dbg_dump_edid(2, debug, sd, segment, &state->edid.data[segment * 256]);
+- if (segment == 0) {
+- state->edid.blocks = state->edid.data[0x7e] + 1;
+- v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n", __func__, state->edid.blocks);
+- }
+- if (!edid_verify_crc(sd, segment) ||
+- !edid_verify_header(sd, segment)) {
+- /* edid crc error, force reread of edid segment */
+- v4l2_err(sd, "%s: edid crc or header error\n", __func__);
+- state->have_monitor = false;
+- adv7511_s_power(sd, false);
+- adv7511_s_power(sd, true);
+- return false;
+- }
+- /* one more segment read ok */
+- state->edid.segments = segment + 1;
+- v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
+- if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
+- /* Request next EDID segment */
+- v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
+- adv7511_wr(sd, 0xc9, 0xf);
+- adv7511_wr(sd, 0xc4, state->edid.segments);
+- state->edid.read_retries = EDID_MAX_RETRIES;
+- queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
+- return false;
+- }
+-
+- v4l2_dbg(1, debug, sd, "%s: edid complete with %d segment(s)\n", __func__, state->edid.segments);
+- state->edid.complete = true;
+- ed.phys_addr = cec_get_edid_phys_addr(state->edid.data,
+- state->edid.segments * 256,
+- NULL);
+- /* report when we have all segments
+- but report only for segment 0
+- */
+- ed.present = true;
+- ed.segment = 0;
+- state->edid_detect_counter++;
+- cec_s_phys_addr(state->cec_adap, ed.phys_addr, false);
+- v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+- return ed.present;
+- }
+-
+- return false;
+-}
+-
+-static int adv7511_registered(struct v4l2_subdev *sd)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- struct i2c_client *client = v4l2_get_subdevdata(sd);
+- int err;
+-
+- err = cec_register_adapter(state->cec_adap, &client->dev);
+- if (err)
+- cec_delete_adapter(state->cec_adap);
+- return err;
+-}
+-
+-static void adv7511_unregistered(struct v4l2_subdev *sd)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- cec_unregister_adapter(state->cec_adap);
+-}
+-
+-static const struct v4l2_subdev_internal_ops adv7511_int_ops = {
+- .registered = adv7511_registered,
+- .unregistered = adv7511_unregistered,
+-};
+-
+-/* ----------------------------------------------------------------------- */
+-/* Setup ADV7511 */
+-static void adv7511_init_setup(struct v4l2_subdev *sd)
+-{
+- struct adv7511_state *state = get_adv7511_state(sd);
+- struct adv7511_state_edid *edid = &state->edid;
+- u32 cec_clk = state->pdata.cec_clk;
+- u8 ratio;
+-
+- v4l2_dbg(1, debug, sd, "%s\n", __func__);
+-
+- /* clear all interrupts */
+- adv7511_wr(sd, 0x96, 0xff);
+- adv7511_wr(sd, 0x97, 0xff);
+- /*
+- * Stop HPD from resetting a lot of registers.
+- * It might leave the chip in a partly un-initialized state,
+- * in particular with regards to hotplug bounces.
+- */
+- adv7511_wr_and_or(sd, 0xd6, 0x3f, 0xc0);
+- memset(edid, 0, sizeof(struct adv7511_state_edid));
+- state->have_monitor = false;
+- adv7511_set_isr(sd, false);
+- adv7511_s_stream(sd, false);
+- adv7511_s_audio_stream(sd, false);
+-
+- if (state->i2c_cec == NULL)
+- return;
+-
+- v4l2_dbg(1, debug, sd, "%s: cec_clk %d\n", __func__, cec_clk);
+-
+- /* cec soft reset */
+- adv7511_cec_write(sd, 0x50, 0x01);
+- adv7511_cec_write(sd, 0x50, 0x00);
+-
+- /* legacy mode */
+- adv7511_cec_write(sd, 0x4a, 0x00);
+- adv7511_cec_write(sd, 0x4a, 0x07);
+-
+- if (cec_clk % 750000 != 0)
+- v4l2_err(sd, "%s: cec_clk %d, not multiple of 750 Khz\n",
+- __func__, cec_clk);
+-
+- ratio = (cec_clk / 750000) - 1;
+- adv7511_cec_write(sd, 0x4e, ratio << 2);
+-}
+-
+-static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *id)
+-{
+- struct adv7511_state *state;
+- struct adv7511_platform_data *pdata = client->dev.platform_data;
+- struct v4l2_ctrl_handler *hdl;
+- struct v4l2_subdev *sd;
+- u8 chip_id[2];
+- int err = -EIO;
+-
+- /* Check if the adapter supports the needed features */
+- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+- return -EIO;
+-
+- state = devm_kzalloc(&client->dev, sizeof(struct adv7511_state), GFP_KERNEL);
+- if (!state)
+- return -ENOMEM;
+-
+- /* Platform data */
+- if (!pdata) {
+- v4l_err(client, "No platform data!\n");
+- return -ENODEV;
+- }
+- memcpy(&state->pdata, pdata, sizeof(state->pdata));
+- state->fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
+- state->colorspace = V4L2_COLORSPACE_SRGB;
+-
+- sd = &state->sd;
+-
+- v4l2_dbg(1, debug, sd, "detecting adv7511 client on address 0x%x\n",
+- client->addr << 1);
+-
+- v4l2_i2c_subdev_init(sd, client, &adv7511_ops);
+- sd->internal_ops = &adv7511_int_ops;
+-
+- hdl = &state->hdl;
+- v4l2_ctrl_handler_init(hdl, 10);
+- /* add in ascending ID order */
+- state->hdmi_mode_ctrl = v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
+- V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI,
+- 0, V4L2_DV_TX_MODE_DVI_D);
+- state->hotplug_ctrl = v4l2_ctrl_new_std(hdl, NULL,
+- V4L2_CID_DV_TX_HOTPLUG, 0, 1, 0, 0);
+- state->rx_sense_ctrl = v4l2_ctrl_new_std(hdl, NULL,
+- V4L2_CID_DV_TX_RXSENSE, 0, 1, 0, 0);
+- state->have_edid0_ctrl = v4l2_ctrl_new_std(hdl, NULL,
+- V4L2_CID_DV_TX_EDID_PRESENT, 0, 1, 0, 0);
+- state->rgb_quantization_range_ctrl =
+- v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
+- V4L2_CID_DV_TX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
+- 0, V4L2_DV_RGB_RANGE_AUTO);
+- state->content_type_ctrl =
+- v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
+- V4L2_CID_DV_TX_IT_CONTENT_TYPE, V4L2_DV_IT_CONTENT_TYPE_NO_ITC,
+- 0, V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
+- sd->ctrl_handler = hdl;
+- if (hdl->error) {
+- err = hdl->error;
+- goto err_hdl;
+- }
+- state->pad.flags = MEDIA_PAD_FL_SINK;
+- sd->entity.function = MEDIA_ENT_F_DV_ENCODER;
+- err = media_entity_pads_init(&sd->entity, 1, &state->pad);
+- if (err)
+- goto err_hdl;
+-
+- /* EDID and CEC i2c addr */
+- state->i2c_edid_addr = state->pdata.i2c_edid << 1;
+- state->i2c_cec_addr = state->pdata.i2c_cec << 1;
+- state->i2c_pktmem_addr = state->pdata.i2c_pktmem << 1;
+-
+- state->chip_revision = adv7511_rd(sd, 0x0);
+- chip_id[0] = adv7511_rd(sd, 0xf5);
+- chip_id[1] = adv7511_rd(sd, 0xf6);
+- if (chip_id[0] != 0x75 || chip_id[1] != 0x11) {
+- v4l2_err(sd, "chip_id != 0x7511, read 0x%02x%02x\n", chip_id[0],
+- chip_id[1]);
+- err = -EIO;
+- goto err_entity;
+- }
+-
+- state->i2c_edid = i2c_new_dummy(client->adapter,
+- state->i2c_edid_addr >> 1);
+- if (state->i2c_edid == NULL) {
+- v4l2_err(sd, "failed to register edid i2c client\n");
+- err = -ENOMEM;
+- goto err_entity;
+- }
+-
+- adv7511_wr(sd, 0xe1, state->i2c_cec_addr);
+- if (state->pdata.cec_clk < 3000000 ||
+- state->pdata.cec_clk > 100000000) {
+- v4l2_err(sd, "%s: cec_clk %u outside range, disabling cec\n",
+- __func__, state->pdata.cec_clk);
+- state->pdata.cec_clk = 0;
+- }
+-
+- if (state->pdata.cec_clk) {
+- state->i2c_cec = i2c_new_dummy(client->adapter,
+- state->i2c_cec_addr >> 1);
+- if (state->i2c_cec == NULL) {
+- v4l2_err(sd, "failed to register cec i2c client\n");
+- err = -ENOMEM;
+- goto err_unreg_edid;
+- }
+- adv7511_wr(sd, 0xe2, 0x00); /* power up cec section */
+- } else {
+- adv7511_wr(sd, 0xe2, 0x01); /* power down cec section */
+- }
+-
+- state->i2c_pktmem = i2c_new_dummy(client->adapter, state->i2c_pktmem_addr >> 1);
+- if (state->i2c_pktmem == NULL) {
+- v4l2_err(sd, "failed to register pktmem i2c client\n");
+- err = -ENOMEM;
+- goto err_unreg_cec;
+- }
+-
+- state->work_queue = create_singlethread_workqueue(sd->name);
+- if (state->work_queue == NULL) {
+- v4l2_err(sd, "could not create workqueue\n");
+- err = -ENOMEM;
+- goto err_unreg_pktmem;
+- }
+-
+- INIT_DELAYED_WORK(&state->edid_handler, adv7511_edid_handler);
+-
+- adv7511_init_setup(sd);
+-
+-#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
+- state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
+- state, dev_name(&client->dev), CEC_CAP_DEFAULTS,
+- ADV7511_MAX_ADDRS);
+- err = PTR_ERR_OR_ZERO(state->cec_adap);
+- if (err) {
+- destroy_workqueue(state->work_queue);
+- goto err_unreg_pktmem;
+- }
+-#endif
+-
+- adv7511_set_isr(sd, true);
+- adv7511_check_monitor_present_status(sd);
+-
+- v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
+- client->addr << 1, client->adapter->name);
+- return 0;
+-
+-err_unreg_pktmem:
+- i2c_unregister_device(state->i2c_pktmem);
+-err_unreg_cec:
+- if (state->i2c_cec)
+- i2c_unregister_device(state->i2c_cec);
+-err_unreg_edid:
+- i2c_unregister_device(state->i2c_edid);
+-err_entity:
+- media_entity_cleanup(&sd->entity);
+-err_hdl:
+- v4l2_ctrl_handler_free(&state->hdl);
+- return err;
+-}
+-
+-/* ----------------------------------------------------------------------- */
+-
+-static int adv7511_remove(struct i2c_client *client)
+-{
+- struct v4l2_subdev *sd = i2c_get_clientdata(client);
+- struct adv7511_state *state = get_adv7511_state(sd);
+-
+- state->chip_revision = -1;
+-
+- v4l2_dbg(1, debug, sd, "%s removed @ 0x%x (%s)\n", client->name,
+- client->addr << 1, client->adapter->name);
+-
+- adv7511_set_isr(sd, false);
+- adv7511_init_setup(sd);
+- cancel_delayed_work(&state->edid_handler);
+- i2c_unregister_device(state->i2c_edid);
+- if (state->i2c_cec)
+- i2c_unregister_device(state->i2c_cec);
+- i2c_unregister_device(state->i2c_pktmem);
+- destroy_workqueue(state->work_queue);
+- v4l2_device_unregister_subdev(sd);
+- media_entity_cleanup(&sd->entity);
+- v4l2_ctrl_handler_free(sd->ctrl_handler);
+- return 0;
+-}
+-
+-/* ----------------------------------------------------------------------- */
+-
+-static const struct i2c_device_id adv7511_id[] = {
+- { "adv7511", 0 },
+- { }
+-};
+-MODULE_DEVICE_TABLE(i2c, adv7511_id);
+-
+-static struct i2c_driver adv7511_driver = {
+- .driver = {
+- .name = "adv7511",
+- },
+- .probe = adv7511_probe,
+- .remove = adv7511_remove,
+- .id_table = adv7511_id,
+-};
+-
+-module_i2c_driver(adv7511_driver);
+diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
+index 362c3b93636e..5a642b5ad076 100644
+--- a/drivers/media/i2c/mt9m111.c
++++ b/drivers/media/i2c/mt9m111.c
+@@ -1245,9 +1245,11 @@ static int mt9m111_probe(struct i2c_client *client,
+ if (!mt9m111)
+ return -ENOMEM;
+
+- ret = mt9m111_probe_fw(client, mt9m111);
+- if (ret)
+- return ret;
++ if (dev_fwnode(&client->dev)) {
++ ret = mt9m111_probe_fw(client, mt9m111);
++ if (ret)
++ return ret;
++ }
+
+ mt9m111->clk = v4l2_clk_get(&client->dev, "mclk");
+ if (IS_ERR(mt9m111->clk))
+diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
+index 54e80a60aa57..63011d4b4738 100644
+--- a/drivers/media/i2c/ov7740.c
++++ b/drivers/media/i2c/ov7740.c
+@@ -785,7 +785,11 @@ static int ov7740_try_fmt_internal(struct v4l2_subdev *sd,
+
+ fsize++;
+ }
+-
++ if (i >= ARRAY_SIZE(ov7740_framesizes)) {
++ fsize = &ov7740_framesizes[0];
++ fmt->width = fsize->width;
++ fmt->height = fsize->height;
++ }
+ if (ret_frmsize != NULL)
+ *ret_frmsize = fsize;
+
+diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
+index 9ae481ddd975..e19df5165e78 100644
+--- a/drivers/media/media-device.c
++++ b/drivers/media/media-device.c
+@@ -494,6 +494,7 @@ static long media_device_enum_links32(struct media_device *mdev,
+ {
+ struct media_links_enum links;
+ compat_uptr_t pads_ptr, links_ptr;
++ int ret;
+
+ memset(&links, 0, sizeof(links));
+
+@@ -505,7 +506,14 @@ static long media_device_enum_links32(struct media_device *mdev,
+ links.pads = compat_ptr(pads_ptr);
+ links.links = compat_ptr(links_ptr);
+
+- return media_device_enum_links(mdev, &links);
++ ret = media_device_enum_links(mdev, &links);
++ if (ret)
++ return ret;
++
++ if (copy_to_user(ulinks->reserved, links.reserved,
++ sizeof(ulinks->reserved)))
++ return -EFAULT;
++ return 0;
+ }
+
+ #define MEDIA_IOC_ENUM_LINKS32 _IOWR('|', 0x02, struct media_links_enum32)
+diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
+index c594aff92e70..9ae04e18e6c6 100644
+--- a/drivers/media/pci/saa7164/saa7164-core.c
++++ b/drivers/media/pci/saa7164/saa7164-core.c
+@@ -1112,16 +1112,25 @@ static int saa7164_proc_show(struct seq_file *m, void *v)
+ return 0;
+ }
+
++static struct proc_dir_entry *saa7164_pe;
++
+ static int saa7164_proc_create(void)
+ {
+- struct proc_dir_entry *pe;
+-
+- pe = proc_create_single("saa7164", S_IRUGO, NULL, saa7164_proc_show);
+- if (!pe)
++ saa7164_pe = proc_create_single("saa7164", 0444, NULL, saa7164_proc_show);
++ if (!saa7164_pe)
+ return -ENOMEM;
+
+ return 0;
+ }
++
++static void saa7164_proc_destroy(void)
++{
++ if (saa7164_pe)
++ remove_proc_entry("saa7164", NULL);
++}
++#else
++static int saa7164_proc_create(void) { return 0; }
++static void saa7164_proc_destroy(void) {}
+ #endif
+
+ static int saa7164_thread_function(void *data)
+@@ -1493,19 +1502,21 @@ static struct pci_driver saa7164_pci_driver = {
+
+ static int __init saa7164_init(void)
+ {
+- printk(KERN_INFO "saa7164 driver loaded\n");
++ int ret = pci_register_driver(&saa7164_pci_driver);
++
++ if (ret)
++ return ret;
+
+-#ifdef CONFIG_PROC_FS
+ saa7164_proc_create();
+-#endif
+- return pci_register_driver(&saa7164_pci_driver);
++
++ pr_info("saa7164 driver loaded\n");
++
++ return 0;
+ }
+
+ static void __exit saa7164_fini(void)
+ {
+-#ifdef CONFIG_PROC_FS
+- remove_proc_entry("saa7164", NULL);
+-#endif
++ saa7164_proc_destroy();
+ pci_unregister_driver(&saa7164_pci_driver);
+ }
+
+diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
+index 8144fe36ad48..de0f192afa8b 100644
+--- a/drivers/media/platform/aspeed-video.c
++++ b/drivers/media/platform/aspeed-video.c
+@@ -187,6 +187,7 @@ enum {
+ VIDEO_STREAMING,
+ VIDEO_FRAME_INPRG,
+ VIDEO_STOPPED,
++ VIDEO_CLOCKS_ON,
+ };
+
+ struct aspeed_video_addr {
+@@ -483,19 +484,29 @@ static void aspeed_video_enable_mode_detect(struct aspeed_video *video)
+
+ static void aspeed_video_off(struct aspeed_video *video)
+ {
++ if (!test_bit(VIDEO_CLOCKS_ON, &video->flags))
++ return;
++
+ /* Disable interrupts */
+ aspeed_video_write(video, VE_INTERRUPT_CTRL, 0);
+
+ /* Turn off the relevant clocks */
+ clk_disable_unprepare(video->vclk);
+ clk_disable_unprepare(video->eclk);
++
++ clear_bit(VIDEO_CLOCKS_ON, &video->flags);
+ }
+
+ static void aspeed_video_on(struct aspeed_video *video)
+ {
++ if (test_bit(VIDEO_CLOCKS_ON, &video->flags))
++ return;
++
+ /* Turn on the relevant clocks */
+ clk_prepare_enable(video->eclk);
+ clk_prepare_enable(video->vclk);
++
++ set_bit(VIDEO_CLOCKS_ON, &video->flags);
+ }
+
+ static void aspeed_video_bufs_done(struct aspeed_video *video,
+@@ -1589,8 +1600,9 @@ static int aspeed_video_init(struct aspeed_video *video)
+ return -ENODEV;
+ }
+
+- rc = devm_request_irq(dev, irq, aspeed_video_irq, IRQF_SHARED,
+- DEVICE_NAME, video);
++ rc = devm_request_threaded_irq(dev, irq, NULL, aspeed_video_irq,
++ IRQF_ONESHOT | IRQF_SHARED, DEVICE_NAME,
++ video);
+ if (rc < 0) {
+ dev_err(dev, "Unable to request IRQ %d\n", irq);
+ return rc;
+diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
+index 976f6aa69f41..8f918eb7eb77 100644
+--- a/drivers/media/platform/coda/coda-bit.c
++++ b/drivers/media/platform/coda/coda-bit.c
+@@ -1739,6 +1739,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
+ v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
+ return ret;
+ }
++ ctx->sequence_offset = ~0U;
+ ctx->initialized = 1;
+
+ /* Update kfifo out pointer from coda bitstream read pointer */
+@@ -2146,12 +2147,17 @@ static void coda_finish_decode(struct coda_ctx *ctx)
+ else if (ctx->display_idx < 0)
+ ctx->hold = true;
+ } else if (decoded_idx == -2) {
++ if (ctx->display_idx >= 0 &&
++ ctx->display_idx < ctx->num_internal_frames)
++ ctx->sequence_offset++;
+ /* no frame was decoded, we still return remaining buffers */
+ } else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
+ v4l2_err(&dev->v4l2_dev,
+ "decoded frame index out of range: %d\n", decoded_idx);
+ } else {
+- val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1;
++ val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM);
++ if (ctx->sequence_offset == -1)
++ ctx->sequence_offset = val;
+ val -= ctx->sequence_offset;
+ spin_lock(&ctx->buffer_meta_lock);
+ if (!list_empty(&ctx->buffer_meta_list)) {
+@@ -2304,7 +2310,6 @@ irqreturn_t coda_irq_handler(int irq, void *data)
+ if (ctx == NULL) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+- mutex_unlock(&dev->coda_mutex);
+ return IRQ_HANDLED;
+ }
+
+diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
+index 6238047273f2..68a585d3af91 100644
+--- a/drivers/media/platform/coda/coda-common.c
++++ b/drivers/media/platform/coda/coda-common.c
+@@ -1024,6 +1024,8 @@ static int coda_encoder_cmd(struct file *file, void *fh,
+ /* Set the stream-end flag on this context */
+ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+
++ flush_work(&ctx->pic_run_work);
++
+ /* If there is no buffer in flight, wake up */
+ if (!ctx->streamon_out || ctx->qsequence == ctx->osequence) {
+ dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
+index 61809d2050fa..f0f7ef638c56 100644
+--- a/drivers/media/platform/davinci/vpif_capture.c
++++ b/drivers/media/platform/davinci/vpif_capture.c
+@@ -1376,6 +1376,14 @@ vpif_init_free_channel_objects:
+ return err;
+ }
+
++static inline void free_vpif_objs(void)
++{
++ int i;
++
++ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++)
++ kfree(vpif_obj.dev[i]);
++}
++
+ static int vpif_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+@@ -1645,7 +1653,7 @@ static __init int vpif_probe(struct platform_device *pdev)
+ err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
+ if (err) {
+ v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
+- goto cleanup;
++ goto vpif_free;
+ }
+
+ while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
+@@ -1692,7 +1700,9 @@ static __init int vpif_probe(struct platform_device *pdev)
+ "registered sub device %s\n",
+ subdevdata->name);
+ }
+- vpif_probe_complete();
++ err = vpif_probe_complete();
++ if (err)
++ goto probe_subdev_out;
+ } else {
+ vpif_obj.notifier.ops = &vpif_async_ops;
+ err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
+@@ -1711,6 +1721,8 @@ probe_subdev_out:
+ kfree(vpif_obj.sd);
+ vpif_unregister:
+ v4l2_device_unregister(&vpif_obj.v4l2_dev);
++vpif_free:
++ free_vpif_objs();
+ cleanup:
+ v4l2_async_notifier_cleanup(&vpif_obj.notifier);
+
+diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
+index 3f079ac1b080..be91b0c7d20b 100644
+--- a/drivers/media/platform/davinci/vpss.c
++++ b/drivers/media/platform/davinci/vpss.c
+@@ -509,6 +509,11 @@ static int __init vpss_init(void)
+ return -EBUSY;
+
+ oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
++ if (unlikely(!oper_cfg.vpss_regs_base2)) {
++ release_mem_region(VPSS_CLK_CTRL, 4);
++ return -ENOMEM;
++ }
++
+ writel(VPSS_CLK_CTRL_VENCCLKEN |
+ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
+
+diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
+index f1b301810260..0a6411b877e9 100644
+--- a/drivers/media/platform/marvell-ccic/mcam-core.c
++++ b/drivers/media/platform/marvell-ccic/mcam-core.c
+@@ -200,7 +200,6 @@ struct mcam_vb_buffer {
+ struct list_head queue;
+ struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
+ dma_addr_t dma_desc_pa; /* Descriptor physical address */
+- int dma_desc_nent; /* Number of mapped descriptors */
+ };
+
+ static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_v4l2_buffer *vb)
+@@ -608,9 +607,11 @@ static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
+ static void mcam_sg_next_buffer(struct mcam_camera *cam)
+ {
+ struct mcam_vb_buffer *buf;
++ struct sg_table *sg_table;
+
+ buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
+ list_del_init(&buf->queue);
++ sg_table = vb2_dma_sg_plane_desc(&buf->vb_buf.vb2_buf, 0);
+ /*
+ * Very Bad Not Good Things happen if you don't clear
+ * C1_DESC_ENA before making any descriptor changes.
+@@ -618,7 +619,7 @@ static void mcam_sg_next_buffer(struct mcam_camera *cam)
+ mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
+ mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
+ mcam_reg_write(cam, REG_DESC_LEN_Y,
+- buf->dma_desc_nent*sizeof(struct mcam_dma_desc));
++ sg_table->nents * sizeof(struct mcam_dma_desc));
+ mcam_reg_write(cam, REG_DESC_LEN_U, 0);
+ mcam_reg_write(cam, REG_DESC_LEN_V, 0);
+ mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
+diff --git a/drivers/media/platform/meson/ao-cec-g12a.c b/drivers/media/platform/meson/ao-cec-g12a.c
+index 3620a1e310f5..ddfd060625da 100644
+--- a/drivers/media/platform/meson/ao-cec-g12a.c
++++ b/drivers/media/platform/meson/ao-cec-g12a.c
+@@ -415,7 +415,6 @@ static const struct regmap_config meson_ao_cec_g12a_cec_regmap_conf = {
+ .reg_read = meson_ao_cec_g12a_read,
+ .reg_write = meson_ao_cec_g12a_write,
+ .max_register = 0xffff,
+- .fast_io = true,
+ };
+
+ static inline void
+diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
+index 1eba23409ff3..d3d1748a7ef6 100644
+--- a/drivers/media/platform/qcom/venus/firmware.c
++++ b/drivers/media/platform/qcom/venus/firmware.c
+@@ -78,11 +78,11 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
+
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret)
+- return ret;
++ goto err_put_node;
+
+ ret = request_firmware(&mdt, fwname, dev);
+ if (ret < 0)
+- return ret;
++ goto err_put_node;
+
+ fw_size = qcom_mdt_get_size(mdt);
+ if (fw_size < 0) {
+@@ -116,6 +116,8 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
+ memunmap(mem_va);
+ err_release_fw:
+ release_firmware(mdt);
++err_put_node:
++ of_node_put(node);
+ return ret;
+ }
+
+diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
+index 6a90bc4c476e..b8615a288e2b 100644
+--- a/drivers/media/platform/rcar_fdp1.c
++++ b/drivers/media/platform/rcar_fdp1.c
+@@ -257,6 +257,8 @@ MODULE_PARM_DESC(debug, "activate debug info");
+ #define FD1_IP_H3_ES1 0x02010101
+ #define FD1_IP_M3W 0x02010202
+ #define FD1_IP_H3 0x02010203
++#define FD1_IP_M3N 0x02010204
++#define FD1_IP_E3 0x02010205
+
+ /* LUTs */
+ #define FD1_LUT_DIF_ADJ 0x1000
+@@ -2365,6 +2367,12 @@ static int fdp1_probe(struct platform_device *pdev)
+ case FD1_IP_H3:
+ dprintk(fdp1, "FDP1 Version R-Car H3\n");
+ break;
++ case FD1_IP_M3N:
++ dprintk(fdp1, "FDP1 Version R-Car M3N\n");
++ break;
++ case FD1_IP_E3:
++ dprintk(fdp1, "FDP1 Version R-Car E3\n");
++ break;
+ default:
+ dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
+ hw_version);
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
+index 4e936b95018a..481088a83212 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
+@@ -523,7 +523,8 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
+ dev);
+ ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
+ dev);
+- ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops,
++ if (FW_HAS_E_MIN_SCRATCH_BUF(dev))
++ ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops,
+ get_min_scratch_buf_size, dev);
+ if (ctx->img_width == 0 || ctx->img_height == 0)
+ ctx->state = MFCINST_ERROR;
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+index 2e62f8721fa5..7d52431c2c83 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+@@ -34,6 +34,11 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
+ for (i = 0; i < pm->num_clocks; i++) {
+ pm->clocks[i] = devm_clk_get(pm->device, pm->clk_names[i]);
+ if (IS_ERR(pm->clocks[i])) {
++ /* additional clocks are optional */
++ if (i && PTR_ERR(pm->clocks[i]) == -ENOENT) {
++ pm->clocks[i] = NULL;
++ continue;
++ }
+ mfc_err("Failed to get clock: %s\n",
+ pm->clk_names[i]);
+ return PTR_ERR(pm->clocks[i]);
+diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
+index 243c82b5d537..acd3bd48c7e2 100644
+--- a/drivers/media/platform/vim2m.c
++++ b/drivers/media/platform/vim2m.c
+@@ -1359,7 +1359,7 @@ static int vim2m_probe(struct platform_device *pdev)
+ MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n");
+- goto error_m2m;
++ goto error_dev;
+ }
+
+ ret = media_device_register(&dev->mdev);
+@@ -1373,11 +1373,11 @@ static int vim2m_probe(struct platform_device *pdev)
+ #ifdef CONFIG_MEDIA_CONTROLLER
+ error_m2m_mc:
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev);
+-error_m2m:
+- v4l2_m2m_release(dev->m2m_dev);
+ #endif
+ error_dev:
+ video_unregister_device(&dev->vfd);
++ /* vim2m_device_release called by video_unregister_device to release various objects */
++ return ret;
+ error_v4l2:
+ v4l2_device_unregister(&dev->v4l2_dev);
+ error_free:
+diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
+index 946dc0908566..664855708fdf 100644
+--- a/drivers/media/platform/vimc/vimc-capture.c
++++ b/drivers/media/platform/vimc/vimc-capture.c
+@@ -142,12 +142,15 @@ static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+ {
+ struct vimc_cap_device *vcap = video_drvdata(file);
++ int ret;
+
+ /* Do not change the format while stream is on */
+ if (vb2_is_busy(&vcap->queue))
+ return -EBUSY;
+
+- vimc_cap_try_fmt_vid_cap(file, priv, f);
++ ret = vimc_cap_try_fmt_vid_cap(file, priv, f);
++ if (ret)
++ return ret;
+
+ dev_dbg(vcap->dev, "%s: format update: "
+ "old:%dx%d (0x%x, %d, %d, %d, %d) "
+diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
+index c80a6df47f5e..469366dae1d5 100644
+--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
++++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
+@@ -541,6 +541,7 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
+
+ /* Register with V4L2 subsystem as RADIO device */
+ if (video_register_device(&gradio_dev, VFL_TYPE_RADIO, radio_nr)) {
++ v4l2_device_unregister(&fmdev->v4l2_dev);
+ fmerr("Could not register video device\n");
+ return -ENOMEM;
+ }
+@@ -554,6 +555,8 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
+ if (ret < 0) {
+ fmerr("(fmdev): Can't init ctrl handler\n");
+ v4l2_ctrl_handler_free(&fmdev->ctrl_handler);
++ video_unregister_device(fmdev->radio_dev);
++ v4l2_device_unregister(&fmdev->v4l2_dev);
+ return -EBUSY;
+ }
+
+diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c
+index 66334e8d63ba..c58f2d38a458 100644
+--- a/drivers/media/rc/ir-spi.c
++++ b/drivers/media/rc/ir-spi.c
+@@ -161,6 +161,7 @@ static const struct of_device_id ir_spi_of_match[] = {
+ { .compatible = "ir-spi-led" },
+ {},
+ };
++MODULE_DEVICE_TABLE(of, ir_spi_of_match);
+
+ static struct spi_driver ir_spi_driver = {
+ .probe = ir_spi_probe,
+diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+index e97f6edc98de..65f2b1a20ca1 100644
+--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
++++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
+@@ -284,12 +284,15 @@ EXPORT_SYMBOL(dvb_usb_device_init);
+ void dvb_usb_device_exit(struct usb_interface *intf)
+ {
+ struct dvb_usb_device *d = usb_get_intfdata(intf);
+- const char *name = "generic DVB-USB module";
++ const char *default_name = "generic DVB-USB module";
++ char name[40];
+
+ usb_set_intfdata(intf, NULL);
+ if (d != NULL && d->desc != NULL) {
+- name = d->desc->name;
++ strscpy(name, d->desc->name, sizeof(name));
+ dvb_usb_exit(d);
++ } else {
++ strscpy(name, default_name, sizeof(name));
+ }
+ info("%s successfully deinitialized and disconnected.", name);
+
+diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
+index 7580fc5f2f12..6a6405b80797 100644
+--- a/drivers/media/usb/hdpvr/hdpvr-video.c
++++ b/drivers/media/usb/hdpvr/hdpvr-video.c
+@@ -435,7 +435,7 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
+ /* wait for the first buffer */
+ if (!(file->f_flags & O_NONBLOCK)) {
+ if (wait_event_interruptible(dev->wait_data,
+- hdpvr_get_next_buffer(dev)))
++ !list_empty_careful(&dev->rec_buff_list)))
+ return -ERESTARTSYS;
+ }
+
+@@ -461,10 +461,17 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
+ goto err;
+ }
+ if (!err) {
+- v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev,
+- "timeout: restart streaming\n");
++ v4l2_info(&dev->v4l2_dev,
++ "timeout: restart streaming\n");
++ mutex_lock(&dev->io_mutex);
+ hdpvr_stop_streaming(dev);
+- msecs_to_jiffies(4000);
++ mutex_unlock(&dev->io_mutex);
++ /*
++ * The FW needs about 4 seconds after streaming
++ * stopped before it is ready to restart
++ * streaming.
++ */
++ msleep(4000);
+ err = hdpvr_start_streaming(dev);
+ if (err) {
+ ret = err;
+@@ -1127,9 +1134,7 @@ static void hdpvr_device_release(struct video_device *vdev)
+ struct hdpvr_device *dev = video_get_drvdata(vdev);
+
+ hdpvr_delete(dev);
+- mutex_lock(&dev->io_mutex);
+ flush_work(&dev->worker);
+- mutex_unlock(&dev->io_mutex);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+ v4l2_ctrl_handler_free(&dev->hdl);
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index 26163a5bde7d..e399b9fad757 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -2345,7 +2345,9 @@ void uvc_ctrl_cleanup_device(struct uvc_device *dev)
+ struct uvc_entity *entity;
+ unsigned int i;
+
+- cancel_work_sync(&dev->async_ctrl.work);
++ /* Can be uninitialized if we are aborting on probe error. */
++ if (dev->async_ctrl.work.func)
++ cancel_work_sync(&dev->async_ctrl.work);
+
+ /* Free controls and control mappings for all entities. */
+ list_for_each_entry(entity, &dev->entities, list) {
+diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
+index 37a7992585df..48803eb773ed 100644
+--- a/drivers/media/usb/zr364xx/zr364xx.c
++++ b/drivers/media/usb/zr364xx/zr364xx.c
+@@ -694,7 +694,8 @@ static int zr364xx_vidioc_querycap(struct file *file, void *priv,
+ struct zr364xx_camera *cam = video_drvdata(file);
+
+ strscpy(cap->driver, DRIVER_DESC, sizeof(cap->driver));
+- strscpy(cap->card, cam->udev->product, sizeof(cap->card));
++ if (cam->udev->product)
++ strscpy(cap->card, cam->udev->product, sizeof(cap->card));
+ strscpy(cap->bus_info, dev_name(&cam->udev->dev),
+ sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
+index 7d3a33258748..4d385489be6d 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
+@@ -2149,15 +2149,6 @@ static int handler_new_ref(struct v4l2_ctrl_handler *hdl,
+ if (size_extra_req)
+ new_ref->p_req.p = &new_ref[1];
+
+- if (ctrl->handler == hdl) {
+- /* By default each control starts in a cluster of its own.
+- new_ref->ctrl is basically a cluster array with one
+- element, so that's perfect to use as the cluster pointer.
+- But only do this for the handler that owns the control. */
+- ctrl->cluster = &new_ref->ctrl;
+- ctrl->ncontrols = 1;
+- }
+-
+ INIT_LIST_HEAD(&new_ref->node);
+
+ mutex_lock(hdl->lock);
+@@ -2190,6 +2181,15 @@ insert_in_hash:
+ hdl->buckets[bucket] = new_ref;
+ if (ctrl_ref)
+ *ctrl_ref = new_ref;
++ if (ctrl->handler == hdl) {
++ /* By default each control starts in a cluster of its own.
++ * new_ref->ctrl is basically a cluster array with one
++ * element, so that's perfect to use as the cluster pointer.
++ * But only do this for the handler that owns the control.
++ */
++ ctrl->cluster = &new_ref->ctrl;
++ ctrl->ncontrols = 1;
++ }
+
+ unlock:
+ mutex_unlock(hdl->lock);
+@@ -2369,16 +2369,15 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
+ v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
+ &def, &flags);
+
+- is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU ||
+- cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU);
++ is_menu = (type == V4L2_CTRL_TYPE_MENU ||
++ type == V4L2_CTRL_TYPE_INTEGER_MENU);
+ if (is_menu)
+ WARN_ON(step);
+ else
+ WARN_ON(cfg->menu_skip_mask);
+- if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL)
++ if (type == V4L2_CTRL_TYPE_MENU && !qmenu) {
+ qmenu = v4l2_ctrl_get_menu(cfg->id);
+- else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU &&
+- qmenu_int == NULL) {
++ } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index 5fc76a1993d0..9cf14b359c14 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -575,11 +575,14 @@ static int msm_init_cm_dll(struct sdhci_host *host)
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ int wait_cnt = 50;
+- unsigned long flags;
++ unsigned long flags, xo_clk = 0;
+ u32 config;
+ const struct sdhci_msm_offset *msm_offset =
+ msm_host->offset;
+
++ if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
++ xo_clk = clk_get_rate(msm_host->xo_clk);
++
+ spin_lock_irqsave(&host->lock, flags);
+
+ /*
+@@ -627,10 +630,10 @@ static int msm_init_cm_dll(struct sdhci_host *host)
+ config &= CORE_FLL_CYCLE_CNT;
+ if (config)
+ mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
+- clk_get_rate(msm_host->xo_clk));
++ xo_clk);
+ else
+ mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
+- clk_get_rate(msm_host->xo_clk));
++ xo_clk);
+
+ config = readl_relaxed(host->ioaddr +
+ msm_offset->core_dll_config_2);
+diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
+index dceff28c9a31..23fe19397315 100644
+--- a/drivers/mtd/nand/raw/mtk_nand.c
++++ b/drivers/mtd/nand/raw/mtk_nand.c
+@@ -500,7 +500,8 @@ static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline,
+ {
+ struct mtk_nfc *nfc = nand_get_controller_data(chip);
+ const struct nand_sdr_timings *timings;
+- u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt;
++ u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst = 0, trlt = 0;
++ u32 thold;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+@@ -536,11 +537,28 @@ static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline,
+ twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
+ twh &= 0xf;
+
+- twst = timings->tWP_min / 1000;
++ /* Calculate real WE#/RE# hold time in nanosecond */
++ thold = (twh + 1) * 1000000 / rate;
++ /* nanosecond to picosecond */
++ thold *= 1000;
++
++ /*
++ * WE# low level time should be expaned to meet WE# pulse time
++ * and WE# cycle time at the same time.
++ */
++ if (thold < timings->tWC_min)
++ twst = timings->tWC_min - thold;
++ twst = max(timings->tWP_min, twst) / 1000;
+ twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
+ twst &= 0xf;
+
+- trlt = max(timings->tREA_max, timings->tRP_min) / 1000;
++ /*
++ * RE# low level time should be expaned to meet RE# pulse time,
++ * RE# access time and RE# cycle time at the same time.
++ */
++ if (thold < timings->tRC_min)
++ trlt = timings->tRC_min - thold;
++ trlt = max3(trlt, timings->tREA_max, timings->tRP_min) / 1000;
+ trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
+ trlt &= 0xf;
+
+diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
+index 4c15bb58c623..20560c0b1f5d 100644
+--- a/drivers/mtd/nand/spi/core.c
++++ b/drivers/mtd/nand/spi/core.c
+@@ -511,12 +511,12 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+ if (ret == -EBADMSG) {
+ ecc_failed = true;
+ mtd->ecc_stats.failed++;
+- ret = 0;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ }
+
++ ret = 0;
+ ops->retlen += iter.req.datalen;
+ ops->oobretlen += iter.req.ooblen;
+ }
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 799fc38c5c34..b0aab3a0a1bf 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3866,8 +3866,8 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
+ struct net_device *bond_dev)
+ {
+ struct bonding *bond = netdev_priv(bond_dev);
+- struct iphdr *iph = ip_hdr(skb);
+ struct slave *slave;
++ int slave_cnt;
+ u32 slave_id;
+
+ /* Start with the curr_active_slave that joined the bond as the
+@@ -3876,23 +3876,32 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
+ * send the join/membership reports. The curr_active_slave found
+ * will send all of this type of traffic.
+ */
+- if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
+- slave = rcu_dereference(bond->curr_active_slave);
+- if (slave)
+- bond_dev_queue_xmit(bond, skb, slave->dev);
+- else
+- bond_xmit_slave_id(bond, skb, 0);
+- } else {
+- int slave_cnt = READ_ONCE(bond->slave_cnt);
++ if (skb->protocol == htons(ETH_P_IP)) {
++ int noff = skb_network_offset(skb);
++ struct iphdr *iph;
+
+- if (likely(slave_cnt)) {
+- slave_id = bond_rr_gen_slave_id(bond);
+- bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
+- } else {
+- bond_tx_drop(bond_dev, skb);
++ if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
++ goto non_igmp;
++
++ iph = ip_hdr(skb);
++ if (iph->protocol == IPPROTO_IGMP) {
++ slave = rcu_dereference(bond->curr_active_slave);
++ if (slave)
++ bond_dev_queue_xmit(bond, skb, slave->dev);
++ else
++ bond_xmit_slave_id(bond, skb, 0);
++ return NETDEV_TX_OK;
+ }
+ }
+
++non_igmp:
++ slave_cnt = READ_ONCE(bond->slave_cnt);
++ if (likely(slave_cnt)) {
++ slave_id = bond_rr_gen_slave_id(bond);
++ bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
++ } else {
++ bond_tx_drop(bond_dev, skb);
++ }
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
+index 1c3959efebc4..844e038f3dc6 100644
+--- a/drivers/net/dsa/sja1105/sja1105_main.c
++++ b/drivers/net/dsa/sja1105/sja1105_main.c
+@@ -734,15 +734,16 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
+ return sja1105_clocking_setup_port(priv, port);
+ }
+
+-static void sja1105_adjust_link(struct dsa_switch *ds, int port,
+- struct phy_device *phydev)
++static void sja1105_mac_config(struct dsa_switch *ds, int port,
++ unsigned int link_an_mode,
++ const struct phylink_link_state *state)
+ {
+ struct sja1105_private *priv = ds->priv;
+
+- if (!phydev->link)
++ if (!state->link)
+ sja1105_adjust_port_config(priv, port, 0, false);
+ else
+- sja1105_adjust_port_config(priv, port, phydev->speed, true);
++ sja1105_adjust_port_config(priv, port, state->speed, true);
+ }
+
+ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
+@@ -1515,9 +1516,9 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
+ static const struct dsa_switch_ops sja1105_switch_ops = {
+ .get_tag_protocol = sja1105_get_tag_protocol,
+ .setup = sja1105_setup,
+- .adjust_link = sja1105_adjust_link,
+ .set_ageing_time = sja1105_set_ageing_time,
+ .phylink_validate = sja1105_phylink_validate,
++ .phylink_mac_config = sja1105_mac_config,
+ .get_strings = sja1105_get_strings,
+ .get_ethtool_stats = sja1105_get_ethtool_stats,
+ .get_sset_count = sja1105_get_sset_count,
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 008ad0ca89ba..c12c1bab0fe4 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -3857,9 +3857,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ if (!(bp->flags & TX_TIMESTAMPING_EN)) {
++ bp->eth_stats.ptp_skip_tx_ts++;
+ BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
+ } else if (bp->ptp_tx_skb) {
+- BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
++ bp->eth_stats.ptp_skip_tx_ts++;
++ netdev_err_once(bp->dev,
++ "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
+ } else {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* schedule check for Tx timestamp */
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+index 51fc845de31a..4a0ba6801c9e 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+@@ -182,7 +182,9 @@ static const struct {
+ { STATS_OFFSET32(driver_filtered_tx_pkt),
+ 4, false, "driver_filtered_tx_pkt" },
+ { STATS_OFFSET32(eee_tx_lpi),
+- 4, true, "Tx LPI entry count"}
++ 4, true, "Tx LPI entry count"},
++ { STATS_OFFSET32(ptp_skip_tx_ts),
++ 4, false, "ptp_skipped_tx_tstamp" },
+ };
+
+ #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index 03ac10b1cd1e..2cc14db8f0ec 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -15214,11 +15214,24 @@ static void bnx2x_ptp_task(struct work_struct *work)
+ u32 val_seq;
+ u64 timestamp, ns;
+ struct skb_shared_hwtstamps shhwtstamps;
++ bool bail = true;
++ int i;
++
++ /* FW may take a while to complete timestamping; try a bit and if it's
++ * still not complete, may indicate an error state - bail out then.
++ */
++ for (i = 0; i < 10; i++) {
++ /* Read Tx timestamp registers */
++ val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
++ NIG_REG_P0_TLLH_PTP_BUF_SEQID);
++ if (val_seq & 0x10000) {
++ bail = false;
++ break;
++ }
++ msleep(1 << i);
++ }
+
+- /* Read Tx timestamp registers */
+- val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
+- NIG_REG_P0_TLLH_PTP_BUF_SEQID);
+- if (val_seq & 0x10000) {
++ if (!bail) {
+ /* There is a valid timestamp value */
+ timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
+ NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
+@@ -15233,16 +15246,18 @@ static void bnx2x_ptp_task(struct work_struct *work)
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
+- dev_kfree_skb_any(bp->ptp_tx_skb);
+- bp->ptp_tx_skb = NULL;
+
+ DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
+ timestamp, ns);
+ } else {
+- DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
+- /* Reschedule to keep checking for a valid timestamp value */
+- schedule_work(&bp->ptp_task);
++ DP(BNX2X_MSG_PTP,
++ "Tx timestamp is not recorded (register read=%u)\n",
++ val_seq);
++ bp->eth_stats.ptp_skip_tx_ts++;
+ }
++
++ dev_kfree_skb_any(bp->ptp_tx_skb);
++ bp->ptp_tx_skb = NULL;
+ }
+
+ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+index b2644ed13d06..d55e63692cf3 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+@@ -207,6 +207,9 @@ struct bnx2x_eth_stats {
+ u32 driver_filtered_tx_pkt;
+ /* src: Clear-on-Read register; Will not survive PMF Migration */
+ u32 eee_tx_lpi;
++
++ /* PTP */
++ u32 ptp_skip_tx_ts;
+ };
+
+ struct bnx2x_eth_q_stats {
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index f758b2e0591f..9090c79387c1 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -5508,7 +5508,16 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
+
+ static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
+ {
+- return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
++ int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
++ int cp = bp->cp_nr_rings;
++
++ if (!ulp_stat)
++ return cp;
++
++ if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
++ return bnxt_get_ulp_msix_base(bp) + ulp_stat;
++
++ return cp + ulp_stat;
+ }
+
+ static bool bnxt_need_reserve_rings(struct bnxt *bp)
+@@ -7477,11 +7486,7 @@ unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
+
+ unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
+ {
+- unsigned int stat;
+-
+- stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp);
+- stat -= bp->cp_nr_rings;
+- return stat;
++ return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
+ }
+
+ int bnxt_get_avail_msix(struct bnxt *bp, int num)
+@@ -10262,10 +10267,10 @@ static void bnxt_remove_one(struct pci_dev *pdev)
+ bnxt_dcb_free(bp);
+ kfree(bp->edev);
+ bp->edev = NULL;
++ bnxt_cleanup_pci(bp);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
+- bnxt_cleanup_pci(bp);
+ bnxt_free_port_stats(bp);
+ free_netdev(dev);
+ }
+@@ -10859,6 +10864,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ bnxt_clear_int_mode(bp);
++ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, bp->wol);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+index bfa342a98d08..fc77caf0a076 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+@@ -157,8 +157,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
+
+ if (BNXT_NEW_RM(bp)) {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
++ int resv_msix;
+
+- avail_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
++ resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
++ avail_msix = min_t(int, resv_msix, avail_msix);
+ edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
+ }
+ bnxt_fill_msix_vecs(bp, ent);
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 38f10f7dcbc3..831bb709e783 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1689,10 +1689,10 @@ static void fec_get_mac(struct net_device *ndev)
+ */
+ if (!is_valid_ether_addr(iap)) {
+ /* Report it and use a random ethernet address instead */
+- netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
++ dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
+ eth_hw_addr_random(ndev);
+- netdev_info(ndev, "Using random MAC address: %pM\n",
+- ndev->dev_addr);
++ dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
++ ndev->dev_addr);
+ return;
+ }
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+index fa8b8506b120..738e01393b68 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+@@ -251,6 +251,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
+
+ ae_algo->ops->uninit_ae_dev(ae_dev);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
++ ae_dev->ops = NULL;
+ }
+
+ list_del(&ae_algo->node);
+@@ -351,6 +352,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
+
+ ae_algo->ops->uninit_ae_dev(ae_dev);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
++ ae_dev->ops = NULL;
+ }
+
+ list_del(&ae_dev->node);
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index f326805543a4..66b691b7221f 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -4,6 +4,9 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/etherdevice.h>
+ #include <linux/interrupt.h>
++#ifdef CONFIG_RFS_ACCEL
++#include <linux/cpu_rmap.h>
++#endif
+ #include <linux/if_vlan.h>
+ #include <linux/ip.h>
+ #include <linux/ipv6.h>
+@@ -24,8 +27,7 @@
+ #define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
+ #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
+
+-static void hns3_clear_all_ring(struct hnae3_handle *h);
+-static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
++static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
+ static void hns3_remove_hw_addr(struct net_device *netdev);
+
+ static const char hns3_driver_name[] = "hns3";
+@@ -79,23 +81,6 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector)
+ return IRQ_HANDLED;
+ }
+
+-/* This callback function is used to set affinity changes to the irq affinity
+- * masks when the irq_set_affinity_notifier function is used.
+- */
+-static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
+- const cpumask_t *mask)
+-{
+- struct hns3_enet_tqp_vector *tqp_vectors =
+- container_of(notify, struct hns3_enet_tqp_vector,
+- affinity_notify);
+-
+- tqp_vectors->affinity_mask = *mask;
+-}
+-
+-static void hns3_nic_irq_affinity_release(struct kref *ref)
+-{
+-}
+-
+ static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
+ {
+ struct hns3_enet_tqp_vector *tqp_vectors;
+@@ -107,8 +92,7 @@ static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
+ if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
+ continue;
+
+- /* clear the affinity notifier and affinity mask */
+- irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
++ /* clear the affinity mask */
+ irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
+
+ /* release the irq resource */
+@@ -161,12 +145,6 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
+ return ret;
+ }
+
+- tqp_vectors->affinity_notify.notify =
+- hns3_nic_irq_affinity_notify;
+- tqp_vectors->affinity_notify.release =
+- hns3_nic_irq_affinity_release;
+- irq_set_affinity_notifier(tqp_vectors->vector_irq,
+- &tqp_vectors->affinity_notify);
+ irq_set_affinity_hint(tqp_vectors->vector_irq,
+ &tqp_vectors->affinity_mask);
+
+@@ -340,6 +318,40 @@ static void hns3_tqp_disable(struct hnae3_queue *tqp)
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+ }
+
++static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
++{
++#ifdef CONFIG_RFS_ACCEL
++ free_irq_cpu_rmap(netdev->rx_cpu_rmap);
++ netdev->rx_cpu_rmap = NULL;
++#endif
++}
++
++static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
++{
++#ifdef CONFIG_RFS_ACCEL
++ struct hns3_nic_priv *priv = netdev_priv(netdev);
++ struct hns3_enet_tqp_vector *tqp_vector;
++ int i, ret;
++
++ if (!netdev->rx_cpu_rmap) {
++ netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
++ if (!netdev->rx_cpu_rmap)
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < priv->vector_num; i++) {
++ tqp_vector = &priv->tqp_vector[i];
++ ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
++ tqp_vector->vector_irq);
++ if (ret) {
++ hns3_free_rx_cpu_rmap(netdev);
++ return ret;
++ }
++ }
++#endif
++ return 0;
++}
++
+ static int hns3_nic_net_up(struct net_device *netdev)
+ {
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+@@ -351,11 +363,16 @@ static int hns3_nic_net_up(struct net_device *netdev)
+ if (ret)
+ return ret;
+
++ /* the device can work without cpu rmap, only aRFS needs it */
++ ret = hns3_set_rx_cpu_rmap(netdev);
++ if (ret)
++ netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret);
++
+ /* get irq resource for all vectors */
+ ret = hns3_nic_init_irq(priv);
+ if (ret) {
+ netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
+- return ret;
++ goto free_rmap;
+ }
+
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+@@ -384,7 +401,8 @@ out_start_err:
+ hns3_vector_disable(&priv->tqp_vector[j]);
+
+ hns3_nic_uninit_irq(priv);
+-
++free_rmap:
++ hns3_free_rx_cpu_rmap(netdev);
+ return ret;
+ }
+
+@@ -447,6 +465,20 @@ static int hns3_nic_net_open(struct net_device *netdev)
+ return 0;
+ }
+
++static void hns3_reset_tx_queue(struct hnae3_handle *h)
++{
++ struct net_device *ndev = h->kinfo.netdev;
++ struct hns3_nic_priv *priv = netdev_priv(ndev);
++ struct netdev_queue *dev_queue;
++ u32 i;
++
++ for (i = 0; i < h->kinfo.num_tqps; i++) {
++ dev_queue = netdev_get_tx_queue(ndev,
++ priv->ring_data[i].queue_index);
++ netdev_tx_reset_queue(dev_queue);
++ }
++}
++
+ static void hns3_nic_net_down(struct net_device *netdev)
+ {
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+@@ -467,10 +499,19 @@ static void hns3_nic_net_down(struct net_device *netdev)
+ if (ops->stop)
+ ops->stop(priv->ae_handle);
+
++ hns3_free_rx_cpu_rmap(netdev);
++
+ /* free irq resources */
+ hns3_nic_uninit_irq(priv);
+
+- hns3_clear_all_ring(priv->ae_handle);
++ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
++ * during reset process, because driver may not be able
++ * to disable the ring through firmware when downing the netdev.
++ */
++ if (!hns3_nic_resetting(netdev))
++ hns3_clear_all_ring(priv->ae_handle, false);
++
++ hns3_reset_tx_queue(priv->ae_handle);
+ }
+
+ static int hns3_nic_net_stop(struct net_device *netdev)
+@@ -1493,12 +1534,12 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
+ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
+ {
+ struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
+- struct hnae3_handle *h = hns3_get_handle(netdev);
+- struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
++ struct hnae3_knic_private_info *kinfo;
+ u8 tc = mqprio_qopt->qopt.num_tc;
+ u16 mode = mqprio_qopt->mode;
+ u8 hw = mqprio_qopt->qopt.hw;
++ struct hnae3_handle *h;
+
+ if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
+ mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
+@@ -1510,6 +1551,9 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
+ if (!netdev)
+ return -EINVAL;
+
++ h = hns3_get_handle(netdev);
++ kinfo = &h->kinfo;
++
+ return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
+ kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
+ }
+@@ -1895,9 +1939,9 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+- if (!ae_dev) {
++ if (!ae_dev || !ae_dev->ops) {
+ dev_err(&pdev->dev,
+- "Can't recover - error happened during device init\n");
++ "Can't recover - error happened before device initialized\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+@@ -1916,6 +1960,9 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
+
+ dev_info(dev, "requesting reset due to PCI error\n");
+
++ if (!ae_dev || !ae_dev->ops)
++ return PCI_ERS_RESULT_NONE;
++
+ /* request the reset */
+ if (ae_dev->ops->reset_event) {
+ if (!ae_dev->override_pci_need_reset)
+@@ -3331,8 +3378,6 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
+ hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
+
+ if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
+- irq_set_affinity_notifier(tqp_vector->vector_irq,
+- NULL);
+ irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
+ free_irq(tqp_vector->vector_irq, tqp_vector);
+ tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
+@@ -3858,7 +3903,7 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
+
+ hns3_del_all_fd_rules(netdev, true);
+
+- hns3_force_clear_all_rx_ring(handle);
++ hns3_clear_all_ring(handle, true);
+
+ hns3_uninit_phy(netdev);
+
+@@ -4030,40 +4075,26 @@ static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
+ }
+ }
+
+-static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
+-{
+- struct net_device *ndev = h->kinfo.netdev;
+- struct hns3_nic_priv *priv = netdev_priv(ndev);
+- struct hns3_enet_ring *ring;
+- u32 i;
+-
+- for (i = 0; i < h->kinfo.num_tqps; i++) {
+- ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+- hns3_force_clear_rx_ring(ring);
+- }
+-}
+-
+-static void hns3_clear_all_ring(struct hnae3_handle *h)
++static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
+ {
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ u32 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+- struct netdev_queue *dev_queue;
+ struct hns3_enet_ring *ring;
+
+ ring = priv->ring_data[i].ring;
+ hns3_clear_tx_ring(ring);
+- dev_queue = netdev_get_tx_queue(ndev,
+- priv->ring_data[i].queue_index);
+- netdev_tx_reset_queue(dev_queue);
+
+ ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
+ /* Continue to clear other rings even if clearing some
+ * rings failed.
+ */
+- hns3_clear_rx_ring(ring);
++ if (force)
++ hns3_force_clear_rx_ring(ring);
++ else
++ hns3_clear_rx_ring(ring);
+ }
+ }
+
+@@ -4272,7 +4303,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
+ return 0;
+ }
+
+- hns3_force_clear_all_rx_ring(handle);
++ hns3_clear_all_ring(handle, true);
++ hns3_reset_tx_queue(priv->ae_handle);
+
+ hns3_nic_uninit_vector_data(priv);
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index d1588ea6132c..24fce343e7fc 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -243,11 +243,13 @@ static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
+
+ skb_get(skb);
+ tx_ret = hns3_nic_net_xmit(skb, ndev);
+- if (tx_ret == NETDEV_TX_OK)
++ if (tx_ret == NETDEV_TX_OK) {
+ good_cnt++;
+- else
++ } else {
++ kfree_skb(skb);
+ netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n",
+ tx_ret);
++ }
+ }
+ if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
+ ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index d3b1f8cb1155..f2bffc05e902 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -552,8 +552,7 @@ static u8 *hclge_comm_get_strings(u32 stringset,
+ return buff;
+
+ for (i = 0; i < size; i++) {
+- snprintf(buff, ETH_GSTRING_LEN,
+- strs[i].desc);
++ snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
+ buff = buff + ETH_GSTRING_LEN;
+ }
+
+@@ -1058,6 +1057,7 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
++ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
+ }
+
+ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
+@@ -2389,6 +2389,15 @@ static int hclge_mac_init(struct hclge_dev *hdev)
+ return ret;
+ }
+
++ if (hdev->hw.mac.support_autoneg) {
++ ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
++ if (ret) {
++ dev_err(&hdev->pdev->dev,
++ "Config mac autoneg fail ret=%d\n", ret);
++ return ret;
++ }
++ }
++
+ mac->link = 0;
+
+ if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
+@@ -2508,6 +2517,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
+
+ static void hclge_update_port_capability(struct hclge_mac *mac)
+ {
++ /* update fec ability by speed */
++ hclge_convert_setting_fec(mac);
++
+ /* firmware can not identify back plane type, the media type
+ * read from configuration can help deal it
+ */
+@@ -2580,6 +2592,11 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
+ mac->speed_ability = le32_to_cpu(resp->speed_ability);
+ mac->autoneg = resp->autoneg;
+ mac->support_autoneg = resp->autoneg_ability;
++ mac->speed_type = QUERY_ACTIVE_SPEED;
++ if (!resp->active_fec)
++ mac->fec_mode = 0;
++ else
++ mac->fec_mode = BIT(resp->active_fec);
+ } else {
+ mac->speed_type = QUERY_SFP_SPEED;
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+index 1e8134892d77..32d6a59b731a 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+@@ -224,6 +224,13 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
+
++ /* supported flag is Pause and Asym Pause, but default advertising
++ * should be rx on, tx on, so need clear Asym Pause in advertising
++ * flag
++ */
++ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
++ phydev->advertising);
++
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+index a7bbb6d3091a..0d53062f7bb5 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+@@ -54,7 +54,8 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
+ u32 tick;
+
+ /* Calc tick */
+- if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
++ if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
++ ir > HCLGE_ETHER_MAX_RATE)
+ return -EINVAL;
+
+ tick = tick_array[shaper_level];
+@@ -1124,6 +1125,9 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
+ int ret;
+ u8 i;
+
++ if (vport->vport_id >= HNAE3_MAX_TC)
++ return -EINVAL;
++
+ ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
+ if (ret)
+ return ret;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 5d53467ee2d2..3b02745605d4 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -2512,6 +2512,12 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
+ return ret;
+ }
+
++ if (pdev->revision >= 0x21) {
++ ret = hclgevf_set_promisc_mode(hdev, true);
++ if (ret)
++ return ret;
++ }
++
+ dev_info(&hdev->pdev->dev, "Reset done\n");
+
+ return 0;
+@@ -2591,9 +2597,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+ * firmware makes sure broadcast packets can be accepted.
+ * For revision 0x21, default to enable broadcast promisc mode.
+ */
+- ret = hclgevf_set_promisc_mode(hdev, true);
+- if (ret)
+- goto err_config;
++ if (pdev->revision >= 0x21) {
++ ret = hclgevf_set_promisc_mode(hdev, true);
++ if (ret)
++ goto err_config;
++ }
+
+ /* Initialize RSS for this VF */
+ ret = hclgevf_rss_init_hw(hdev);
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+index 06d1509d57f7..26422bc9ca8c 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+@@ -1236,6 +1236,9 @@ static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
+ unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
+ #endif
+
++ if (!size)
++ return;
++
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+
+@@ -1260,6 +1263,9 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
+ {
+ struct iavf_rx_buffer *rx_buffer;
+
++ if (!size)
++ return NULL;
++
+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ prefetchw(rx_buffer->page);
+
+@@ -1290,7 +1296,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
+ struct iavf_rx_buffer *rx_buffer,
+ unsigned int size)
+ {
+- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
++ void *va;
+ #if (PAGE_SIZE < 8192)
+ unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
+ #else
+@@ -1299,7 +1305,10 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
+ unsigned int headlen;
+ struct sk_buff *skb;
+
++ if (!rx_buffer)
++ return NULL;
+ /* prefetch first cache line of first page */
++ va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ prefetch(va);
+ #if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+@@ -1354,7 +1363,7 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
+ struct iavf_rx_buffer *rx_buffer,
+ unsigned int size)
+ {
+- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
++ void *va;
+ #if (PAGE_SIZE < 8192)
+ unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
+ #else
+@@ -1363,7 +1372,10 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
+ #endif
+ struct sk_buff *skb;
+
++ if (!rx_buffer)
++ return NULL;
+ /* prefetch first cache line of first page */
++ va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ prefetch(va);
+ #if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+@@ -1398,6 +1410,9 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
+ static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
+ struct iavf_rx_buffer *rx_buffer)
+ {
++ if (!rx_buffer)
++ return;
++
+ if (iavf_can_reuse_rx_page(rx_buffer)) {
+ /* hand second half of page back to the ring */
+ iavf_reuse_rx_page(rx_ring, rx_buffer);
+@@ -1496,11 +1511,12 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
+ * verified the descriptor has been written back.
+ */
+ dma_rmb();
++#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
++ if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
++ break;
+
+ size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
+ IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
+- if (!size)
+- break;
+
+ iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
+ rx_buffer = iavf_get_rx_buffer(rx_ring, size);
+@@ -1516,7 +1532,8 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+- rx_buffer->pagecnt_bias++;
++ if (rx_buffer)
++ rx_buffer->pagecnt_bias++;
+ break;
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 792e6e42030e..754c7080c3fc 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -451,7 +451,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
+ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
+ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
+ void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+-void ice_napi_del(struct ice_vsi *vsi);
+ #ifdef CONFIG_DCB
+ int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
+ void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index fbf1eba0cc2a..f14fa51cc704 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -2754,19 +2754,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
+
+ if (vsi->type == ICE_VSI_VF)
+ vf = &pf->vf[vsi->vf_id];
+- /* do not unregister and free netdevs while driver is in the reset
+- * recovery pending state. Since reset/rebuild happens through PF
+- * service task workqueue, its not a good idea to unregister netdev
+- * that is associated to the PF that is running the work queue items
+- * currently. This is done to avoid check_flush_dependency() warning
+- * on this wq
++ /* do not unregister while driver is in the reset recovery pending
++ * state. Since reset/rebuild happens through PF service task workqueue,
++ * it's not a good idea to unregister netdev that is associated to the
++ * PF that is running the work queue items currently. This is done to
++ * avoid check_flush_dependency() warning on this wq
+ */
+- if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
+- ice_napi_del(vsi);
++ if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
+ unregister_netdev(vsi->netdev);
+- free_netdev(vsi->netdev);
+- vsi->netdev = NULL;
+- }
+
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
+ ice_rss_clean(vsi);
+@@ -2799,6 +2794,13 @@ int ice_vsi_release(struct ice_vsi *vsi)
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ ice_vsi_delete(vsi);
+ ice_vsi_free_q_vectors(vsi);
++
++ /* make sure unregister_netdev() was called by checking __ICE_DOWN */
++ if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
++ free_netdev(vsi->netdev);
++ vsi->netdev = NULL;
++ }
++
+ ice_vsi_clear_rings(vsi);
+
+ ice_vsi_put_qs(vsi);
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 7843abf4d44d..1c803106e301 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -1161,16 +1161,16 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
+ }
+ }
+
+- /* see if one of the VFs needs to be reset */
+- for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
++ /* check to see if one of the VFs caused the MDD */
++ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ struct ice_vf *vf = &pf->vf[i];
+
+- mdd_detected = false;
++ bool vf_mdd_detected = false;
+
+ reg = rd32(hw, VP_MDET_TX_PQM(i));
+ if (reg & VP_MDET_TX_PQM_VALID_M) {
+ wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
+- mdd_detected = true;
++ vf_mdd_detected = true;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+@@ -1178,7 +1178,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
+ reg = rd32(hw, VP_MDET_TX_TCLAN(i));
+ if (reg & VP_MDET_TX_TCLAN_VALID_M) {
+ wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
+- mdd_detected = true;
++ vf_mdd_detected = true;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+@@ -1186,7 +1186,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
+ reg = rd32(hw, VP_MDET_TX_TDPU(i));
+ if (reg & VP_MDET_TX_TDPU_VALID_M) {
+ wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
+- mdd_detected = true;
++ vf_mdd_detected = true;
+ dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
+ i);
+ }
+@@ -1194,19 +1194,18 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
+ reg = rd32(hw, VP_MDET_RX(i));
+ if (reg & VP_MDET_RX_VALID_M) {
+ wr32(hw, VP_MDET_RX(i), 0xFFFF);
+- mdd_detected = true;
++ vf_mdd_detected = true;
+ dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
+ i);
+ }
+
+- if (mdd_detected) {
++ if (vf_mdd_detected) {
+ vf->num_mdd_events++;
+- dev_info(&pf->pdev->dev,
+- "Use PF Control I/F to re-enable the VF\n");
+- set_bit(ICE_VF_STATE_DIS, vf->vf_states);
++ if (vf->num_mdd_events > 1)
++ dev_info(&pf->pdev->dev, "VF %d has had %llu MDD events since last boot\n",
++ i, vf->num_mdd_events);
+ }
+ }
+-
+ }
+
+ /**
+@@ -1667,7 +1666,7 @@ skip_req_irq:
+ * ice_napi_del - Remove NAPI handler for the VSI
+ * @vsi: VSI for which NAPI handler is to be removed
+ */
+-void ice_napi_del(struct ice_vsi *vsi)
++static void ice_napi_del(struct ice_vsi *vsi)
+ {
+ int v_idx;
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+index a805cbdd69be..81ea77978355 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+@@ -1134,7 +1134,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
+ GFP_KERNEL);
+ if (!vfs) {
+ ret = -ENOMEM;
+- goto err_unroll_sriov;
++ goto err_pci_disable_sriov;
+ }
+ pf->vf = vfs;
+
+@@ -1154,12 +1154,19 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
+ pf->num_alloc_vfs = num_alloc_vfs;
+
+ /* VF resources get allocated during reset */
+- if (!ice_reset_all_vfs(pf, true))
++ if (!ice_reset_all_vfs(pf, true)) {
++ ret = -EIO;
+ goto err_unroll_sriov;
++ }
+
+ goto err_unroll_intr;
+
+ err_unroll_sriov:
++ pf->vf = NULL;
++ devm_kfree(&pf->pdev->dev, vfs);
++ vfs = NULL;
++ pf->num_alloc_vfs = 0;
++err_pci_disable_sriov:
+ pci_disable_sriov(pf->pdev);
+ err_unroll_intr:
+ /* rearm interrupts here */
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 39f33afc479c..005c1693efc8 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -5687,6 +5687,7 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
+ */
+ if (tx_ring->launchtime_enable) {
+ ts = ns_to_timespec64(first->skb->tstamp);
++ first->skb->tstamp = 0;
+ context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
+ } else {
+ context_desc->seqnum_seed = 0;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+index acba067cc15a..7c52ae8ac005 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+@@ -3226,7 +3226,8 @@ static int ixgbe_get_module_info(struct net_device *dev,
+ page_swap = true;
+ }
+
+- if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
++ if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
++ !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
+ /* We have a SFP, but it does not support SFF-8472 */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+index ff85ce5791a3..31629fc7e820 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+@@ -842,6 +842,9 @@ void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf)
+ struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ int i;
+
++ if (!ipsec)
++ return;
++
+ /* search rx sa table */
+ for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) {
+ if (!ipsec->rx_tbl[i].used)
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+index 214b01085718..6544c4539c0d 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+@@ -45,6 +45,7 @@
+ #define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+ #define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
+ #define IXGBE_SFF_ADDRESSING_MODE 0x4
++#define IXGBE_SFF_DDM_IMPLEMENTED 0x40
+ #define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
+ #define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
+ #define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
+diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
+index c5dac6bd2be4..ee7857298361 100644
+--- a/drivers/net/ethernet/marvell/mvmdio.c
++++ b/drivers/net/ethernet/marvell/mvmdio.c
+@@ -64,7 +64,7 @@
+
+ struct orion_mdio_dev {
+ void __iomem *regs;
+- struct clk *clk[3];
++ struct clk *clk[4];
+ /*
+ * If we have access to the error interrupt pin (which is
+ * somewhat misnamed as it not only reflects internal errors
+@@ -321,6 +321,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
+
+ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+ dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
++ if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
++ ret = -EPROBE_DEFER;
++ goto out_clk;
++ }
+ if (IS_ERR(dev->clk[i]))
+ break;
+ clk_prepare_enable(dev->clk[i]);
+@@ -362,6 +366,7 @@ out_mdio:
+ if (dev->err_interrupt > 0)
+ writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
+
++out_clk:
+ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+ if (IS_ERR(dev->clk[i]))
+ break;
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+index a57d17ab91f0..fb06c0aa620a 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+@@ -1242,6 +1242,12 @@ int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
+
+ input.fs = &info->fs;
+
++ /* We need to manually set the rss_ctx, since this info isn't present
++ * in info->fs
++ */
++ if (info->fs.flow_type & FLOW_RSS)
++ input.rss_ctx = info->rss_context;
++
+ ethtool_rule = ethtool_rx_flow_rule_create(&input);
+ if (IS_ERR(ethtool_rule)) {
+ ret = PTR_ERR(ethtool_rule);
+diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+index ae2240074d8e..5692c6087bbb 100644
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+@@ -312,7 +312,8 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
+ }
+
+ /* Set value */
+- pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK;
++ pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
++ shift & MVPP2_PRS_SRAM_SHIFT_MASK;
+
+ /* Reset and set operation */
+ mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index a8e8350b38aa..8db9fdbc03ea 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -4192,8 +4192,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
+ /* no need for full reset when exchanging programs */
+ reset = (!priv->channels.params.xdp_prog || !prog);
+
+- if (was_opened && reset)
+- mlx5e_close_locked(netdev);
+ if (was_opened && !reset) {
+ /* num_channels is invariant here, so we can take the
+ * batched reference right upfront.
+@@ -4205,20 +4203,31 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
+ }
+ }
+
+- /* exchange programs, extra prog reference we got from caller
+- * as long as we don't fail from this point onwards.
+- */
+- old_prog = xchg(&priv->channels.params.xdp_prog, prog);
++ if (was_opened && reset) {
++ struct mlx5e_channels new_channels = {};
++
++ new_channels.params = priv->channels.params;
++ new_channels.params.xdp_prog = prog;
++ mlx5e_set_rq_type(priv->mdev, &new_channels.params);
++ old_prog = priv->channels.params.xdp_prog;
++
++ err = mlx5e_safe_switch_channels(priv, &new_channels, NULL);
++ if (err)
++ goto unlock;
++ } else {
++ /* exchange programs, extra prog reference we got from caller
++ * as long as we don't fail from this point onwards.
++ */
++ old_prog = xchg(&priv->channels.params.xdp_prog, prog);
++ }
++
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+- if (reset) /* change RQ type according to priv->xdp_prog */
++ if (!was_opened && reset) /* change RQ type according to priv->xdp_prog */
+ mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
+
+- if (was_opened && reset)
+- err = mlx5e_open_locked(netdev);
+-
+- if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
++ if (!was_opened || reset)
+ goto unlock;
+
+ /* exchanging programs w/o reset, we update ref counts on behalf
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 6a921e24cd5e..acab26b88261 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -939,7 +939,7 @@ int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
+
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+- vport->vport);
++ mlx5_eswitch_vport_num_to_index(esw, vport->vport));
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
+ return -EOPNOTSUPP;
+@@ -1057,7 +1057,7 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
+
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+- vport->vport);
++ mlx5_eswitch_vport_num_to_index(esw, vport->vport));
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
+ return -EOPNOTSUPP;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+index fccdb06fc5c5..8c40739e0d1b 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
+@@ -3443,6 +3443,7 @@ static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
+ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
+ void __iomem *p_regview,
+ void __iomem *p_doorbells,
++ u64 db_phys_addr,
+ enum qed_pci_personality personality)
+ {
+ struct qed_dev *cdev = p_hwfn->cdev;
+@@ -3451,6 +3452,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
+ /* Split PCI bars evenly between hwfns */
+ p_hwfn->regview = p_regview;
+ p_hwfn->doorbells = p_doorbells;
++ p_hwfn->db_phys_addr = db_phys_addr;
+
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_hw_prepare(p_hwfn);
+@@ -3546,7 +3548,9 @@ int qed_hw_prepare(struct qed_dev *cdev,
+ /* Initialize the first hwfn - will learn number of hwfns */
+ rc = qed_hw_prepare_single(p_hwfn,
+ cdev->regview,
+- cdev->doorbells, personality);
++ cdev->doorbells,
++ cdev->db_phys_addr,
++ personality);
+ if (rc)
+ return rc;
+
+@@ -3555,22 +3559,25 @@ int qed_hw_prepare(struct qed_dev *cdev,
+ /* Initialize the rest of the hwfns */
+ if (cdev->num_hwfns > 1) {
+ void __iomem *p_regview, *p_doorbell;
+- u8 __iomem *addr;
++ u64 db_phys_addr;
++ u32 offset;
+
+ /* adjust bar offset for second engine */
+- addr = cdev->regview +
+- qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+- BAR_ID_0) / 2;
+- p_regview = addr;
++ offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
++ BAR_ID_0) / 2;
++ p_regview = cdev->regview + offset;
+
+- addr = cdev->doorbells +
+- qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+- BAR_ID_1) / 2;
+- p_doorbell = addr;
++ offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
++ BAR_ID_1) / 2;
++
++ p_doorbell = cdev->doorbells + offset;
++
++ db_phys_addr = cdev->db_phys_addr + offset;
+
+ /* prepare second hw function */
+ rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
+- p_doorbell, personality);
++ p_doorbell, db_phys_addr,
++ personality);
+
+ /* in case of error, need to free the previously
+ * initiliazed hwfn 0.
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+index ded556b7bab5..eeea8683d99b 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+@@ -2708,6 +2708,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
+ data.input.rx_num_desc = n_ooo_bufs * 2;
+ data.input.tx_num_desc = data.input.rx_num_desc;
+ data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
++ data.input.tx_tc = PKT_LB_TC;
++ data.input.tx_dest = QED_LL2_TX_DEST_LB;
+ data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
+ data.input.secondary_queue = true;
+ data.cbs = &cbs;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+index 7873d6dfd91f..13802b825d65 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+@@ -803,7 +803,7 @@ static int qed_rdma_add_user(void *rdma_cxt,
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size));
+
+- out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
++ out_params->dpi_phys_addr = p_hwfn->db_phys_addr +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size);
+
+diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
+index cba5881b2746..a10ef700f16d 100644
+--- a/drivers/net/ethernet/socionext/netsec.c
++++ b/drivers/net/ethernet/socionext/netsec.c
+@@ -1029,7 +1029,6 @@ static void netsec_free_dring(struct netsec_priv *priv, int id)
+ static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
+ {
+ struct netsec_desc_ring *dring = &priv->desc_ring[id];
+- int i;
+
+ dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
+ &dring->desc_dma, GFP_KERNEL);
+@@ -1040,19 +1039,6 @@ static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
+ if (!dring->desc)
+ goto err;
+
+- if (id == NETSEC_RING_TX) {
+- for (i = 0; i < DESC_NUM; i++) {
+- struct netsec_de *de;
+-
+- de = dring->vaddr + (DESC_SZ * i);
+- /* de->attr is not going to be accessed by the NIC
+- * until netsec_set_tx_de() is called.
+- * No need for a dma_wmb() here
+- */
+- de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
+- }
+- }
+-
+ return 0;
+ err:
+ netsec_free_dring(priv, id);
+@@ -1060,6 +1046,23 @@ err:
+ return -ENOMEM;
+ }
+
++static void netsec_setup_tx_dring(struct netsec_priv *priv)
++{
++ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
++ int i;
++
++ for (i = 0; i < DESC_NUM; i++) {
++ struct netsec_de *de;
++
++ de = dring->vaddr + (DESC_SZ * i);
++ /* de->attr is not going to be accessed by the NIC
++ * until netsec_set_tx_de() is called.
++ * No need for a dma_wmb() here
++ */
++ de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
++ }
++}
++
+ static int netsec_setup_rx_dring(struct netsec_priv *priv)
+ {
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+@@ -1361,6 +1364,7 @@ static int netsec_netdev_open(struct net_device *ndev)
+
+ pm_runtime_get_sync(priv->dev);
+
++ netsec_setup_tx_dring(priv);
+ ret = netsec_setup_rx_dring(priv);
+ if (ret) {
+ netif_err(priv, probe, priv->ndev,
+diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
+index ceb0d23f5041..c265cc5770e8 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -251,7 +251,7 @@ struct stmmac_safety_stats {
+ #define STMMAC_COAL_TX_TIMER 1000
+ #define STMMAC_MAX_COAL_TX_TICK 100000
+ #define STMMAC_TX_MAX_FRAMES 256
+-#define STMMAC_TX_FRAMES 25
++#define STMMAC_TX_FRAMES 1
+
+ /* Packets types */
+ enum packets_types {
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+index a69c34f605b1..98a15ba8be9f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+@@ -884,6 +884,11 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
+ * address. No need to mask it again.
+ */
+ reg |= 1 << H3_EPHY_ADDR_SHIFT;
++ } else {
++ /* For SoCs without internal PHY the PHY selection bit should be
++ * set to 0 (external PHY).
++ */
++ reg &= ~H3_EPHY_SELECT;
+ }
+
+ if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+index 9fff81170163..54f4ffb36d60 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+@@ -206,6 +206,12 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
+ GMAC_ADDR_LOW(reg));
+ reg++;
+ }
++
++ while (reg <= perfect_addr_number) {
++ writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
++ writel(0, ioaddr + GMAC_ADDR_LOW(reg));
++ reg++;
++ }
+ }
+
+ #ifdef FRAME_FILTER_DEBUG
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+index 99d772517242..e3850938cf2f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -443,14 +443,20 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
+ * are required
+ */
+ value |= GMAC_PACKET_FILTER_PR;
+- } else if (!netdev_uc_empty(dev)) {
+- int reg = 1;
++ } else {
+ struct netdev_hw_addr *ha;
++ int reg = 1;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ dwmac4_set_umac_addr(hw, ha->addr, reg);
+ reg++;
+ }
++
++ while (reg <= GMAC_MAX_PERFECT_ADDRESSES) {
++ writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
++ writel(0, ioaddr + GMAC_ADDR_LOW(reg));
++ reg++;
++ }
+ }
+
+ writel(value, ioaddr + GMAC_PACKET_FILTER);
+@@ -468,8 +474,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+ if (fc & FLOW_RX) {
+ pr_debug("\tReceive Flow-Control ON\n");
+ flow |= GMAC_RX_FLOW_CTRL_RFE;
+- writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
+ }
++ writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
++
+ if (fc & FLOW_TX) {
+ pr_debug("\tTransmit Flow-Control ON\n");
+
+@@ -477,7 +484,7 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+ pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
+
+ for (queue = 0; queue < tx_cnt; queue++) {
+- flow |= GMAC_TX_FLOW_CTRL_TFE;
++ flow = GMAC_TX_FLOW_CTRL_TFE;
+
+ if (duplex)
+ flow |=
+@@ -485,6 +492,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+
+ writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
+ }
++ } else {
++ for (queue = 0; queue < tx_cnt; queue++)
++ writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
+ }
+ }
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 06358fe5b245..dbee9b0113e3 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2048,6 +2048,9 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
+ &priv->xstats, chan);
+ struct stmmac_channel *ch = &priv->channel[chan];
+
++ if (status)
++ status |= handle_rx | handle_tx;
++
+ if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+ napi_schedule_irqoff(&ch->rx_napi);
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index 634fc484a0b3..4e3026f9abed 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -2179,6 +2179,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
+ return ret;
+ }
+
++ slave_data->slave_node = slave_node;
+ slave_data->phy_node = of_parse_phandle(slave_node,
+ "phy-handle", 0);
+ parp = of_get_property(slave_node, "phy_id", &lenp);
+@@ -2330,6 +2331,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, cpsw->dev);
++ ndev->dev.of_node = cpsw->slaves[1].data->slave_node;
+ ret = register_netdev(ndev);
+ if (ret)
+ dev_err(cpsw->dev, "cpsw: error registering net device\n");
+@@ -2507,6 +2509,7 @@ static int cpsw_probe(struct platform_device *pdev)
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, dev);
++ ndev->dev.of_node = cpsw->slaves[0].data->slave_node;
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(dev, "error registering net device\n");
+diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
+index 04795b97ee71..e32f11da2dce 100644
+--- a/drivers/net/ethernet/ti/cpsw_priv.h
++++ b/drivers/net/ethernet/ti/cpsw_priv.h
+@@ -272,6 +272,7 @@ struct cpsw_host_regs {
+ };
+
+ struct cpsw_slave_data {
++ struct device_node *slave_node;
+ struct device_node *phy_node;
+ char phy_id[MII_BUS_ID_SIZE];
+ int phy_if;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 831967f6eff8..65c16772e589 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -615,6 +615,10 @@ static void axienet_start_xmit_done(struct net_device *ndev)
+
+ ndev->stats.tx_packets += packets;
+ ndev->stats.tx_bytes += size;
++
++ /* Matches barrier in axienet_start_xmit */
++ smp_mb();
++
+ netif_wake_queue(ndev);
+ }
+
+@@ -670,9 +674,19 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+
+ if (axienet_check_tx_bd_space(lp, num_frag)) {
+- if (!netif_queue_stopped(ndev))
+- netif_stop_queue(ndev);
+- return NETDEV_TX_BUSY;
++ if (netif_queue_stopped(ndev))
++ return NETDEV_TX_BUSY;
++
++ netif_stop_queue(ndev);
++
++ /* Matches barrier in axienet_start_xmit_done */
++ smp_mb();
++
++ /* Space might have just been freed - check again */
++ if (axienet_check_tx_bd_space(lp, num_frag))
++ return NETDEV_TX_BUSY;
++
++ netif_wake_queue(ndev);
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index fc45b749db46..607f38712b4e 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -285,16 +285,29 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
+ return gtp_rx(pctx, skb, hdrlen, gtp->role);
+ }
+
+-static void gtp_encap_destroy(struct sock *sk)
++static void __gtp_encap_destroy(struct sock *sk)
+ {
+ struct gtp_dev *gtp;
+
+- gtp = rcu_dereference_sk_user_data(sk);
++ lock_sock(sk);
++ gtp = sk->sk_user_data;
+ if (gtp) {
++ if (gtp->sk0 == sk)
++ gtp->sk0 = NULL;
++ else
++ gtp->sk1u = NULL;
+ udp_sk(sk)->encap_type = 0;
+ rcu_assign_sk_user_data(sk, NULL);
+ sock_put(sk);
+ }
++ release_sock(sk);
++}
++
++static void gtp_encap_destroy(struct sock *sk)
++{
++ rtnl_lock();
++ __gtp_encap_destroy(sk);
++ rtnl_unlock();
+ }
+
+ static void gtp_encap_disable_sock(struct sock *sk)
+@@ -302,7 +315,7 @@ static void gtp_encap_disable_sock(struct sock *sk)
+ if (!sk)
+ return;
+
+- gtp_encap_destroy(sk);
++ __gtp_encap_destroy(sk);
+ }
+
+ static void gtp_encap_disable(struct gtp_dev *gtp)
+@@ -796,7 +809,8 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
+ goto out_sock;
+ }
+
+- if (rcu_dereference_sk_user_data(sock->sk)) {
++ lock_sock(sock->sk);
++ if (sock->sk->sk_user_data) {
+ sk = ERR_PTR(-EBUSY);
+ goto out_sock;
+ }
+@@ -812,6 +826,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
+ setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
+
+ out_sock:
++ release_sock(sock->sk);
+ sockfd_put(sock);
+ return sk;
+ }
+@@ -843,8 +858,13 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
+
+ if (data[IFLA_GTP_ROLE]) {
+ role = nla_get_u32(data[IFLA_GTP_ROLE]);
+- if (role > GTP_ROLE_SGSN)
++ if (role > GTP_ROLE_SGSN) {
++ if (sk0)
++ gtp_encap_disable_sock(sk0);
++ if (sk1u)
++ gtp_encap_disable_sock(sk1u);
+ return -EINVAL;
++ }
+ }
+
+ gtp->sk0 = sk0;
+@@ -945,7 +965,7 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
+
+ }
+
+- pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL);
++ pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
+ if (pctx == NULL)
+ return -ENOMEM;
+
+@@ -1034,6 +1054,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
+ return -EINVAL;
+ }
+
++ rtnl_lock();
+ rcu_read_lock();
+
+ gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
+@@ -1058,6 +1079,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
+
+ out_unlock:
+ rcu_read_unlock();
++ rtnl_unlock();
+ return err;
+ }
+
+@@ -1360,9 +1382,9 @@ late_initcall(gtp_init);
+
+ static void __exit gtp_fini(void)
+ {
+- unregister_pernet_subsys(>p_net_ops);
+ genl_unregister_family(>p_genl_family);
+ rtnl_link_unregister(>p_link_ops);
++ unregister_pernet_subsys(>p_net_ops);
+
+ pr_info("GTP module unloaded\n");
+ }
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index dcc93a873174..a3f8740c6163 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -948,6 +948,9 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
+ {
+ int rc;
+
++ if (!dev)
++ return -EINVAL;
++
+ rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
+ if (rc)
+ return rc;
+@@ -1290,6 +1293,9 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
+ struct device *d;
+ int rc;
+
++ if (!dev)
++ return ERR_PTR(-EINVAL);
++
+ /* Search the list of PHY devices on the mdio bus for the
+ * PHY with the requested name
+ */
+diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
+index 71812be0ac64..b6efd2d41dce 100644
+--- a/drivers/net/phy/sfp.c
++++ b/drivers/net/phy/sfp.c
+@@ -186,10 +186,11 @@ struct sfp {
+ struct gpio_desc *gpio[GPIO_MAX];
+
+ bool attached;
++ struct mutex st_mutex; /* Protects state */
+ unsigned int state;
+ struct delayed_work poll;
+ struct delayed_work timeout;
+- struct mutex sm_mutex;
++ struct mutex sm_mutex; /* Protects state machine */
+ unsigned char sm_mod_state;
+ unsigned char sm_dev_state;
+ unsigned short sm_state;
+@@ -1719,6 +1720,7 @@ static void sfp_check_state(struct sfp *sfp)
+ {
+ unsigned int state, i, changed;
+
++ mutex_lock(&sfp->st_mutex);
+ state = sfp_get_state(sfp);
+ changed = state ^ sfp->state;
+ changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT;
+@@ -1744,6 +1746,7 @@ static void sfp_check_state(struct sfp *sfp)
+ sfp_sm_event(sfp, state & SFP_F_LOS ?
+ SFP_E_LOS_HIGH : SFP_E_LOS_LOW);
+ rtnl_unlock();
++ mutex_unlock(&sfp->st_mutex);
+ }
+
+ static irqreturn_t sfp_irq(int irq, void *data)
+@@ -1774,6 +1777,7 @@ static struct sfp *sfp_alloc(struct device *dev)
+ sfp->dev = dev;
+
+ mutex_init(&sfp->sm_mutex);
++ mutex_init(&sfp->st_mutex);
+ INIT_DELAYED_WORK(&sfp->poll, sfp_poll);
+ INIT_DELAYED_WORK(&sfp->timeout, sfp_timeout);
+
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index c9bc96310ed4..ef548beba684 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -226,7 +226,7 @@ static void asix_phy_reset(struct usbnet *dev, unsigned int reset_bits)
+ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
+ {
+ int ret = 0;
+- u8 buf[ETH_ALEN];
++ u8 buf[ETH_ALEN] = {0};
+ int i;
+ unsigned long gpio_bits = dev->driver_info->data;
+
+@@ -677,7 +677,7 @@ static int asix_resume(struct usb_interface *intf)
+ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
+ {
+ int ret, i;
+- u8 buf[ETH_ALEN], chipcode = 0;
++ u8 buf[ETH_ALEN] = {0}, chipcode = 0;
+ u32 phyid;
+ struct asix_common_private *priv;
+
+@@ -1061,7 +1061,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
+ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
+ {
+ int ret;
+- u8 buf[ETH_ALEN];
++ u8 buf[ETH_ALEN] = {0};
+
+ usbnet_get_endpoints(dev,intf);
+
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 083f3f0bf37f..b4283f52a09d 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -804,6 +804,14 @@ static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
+ return f;
+ }
+
++static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac,
++ __be32 src_vni, struct vxlan_fdb *f)
++{
++ ++vxlan->addrcnt;
++ hlist_add_head_rcu(&f->hlist,
++ vxlan_fdb_head(vxlan, mac, src_vni));
++}
++
+ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __be16 port, __be32 src_vni,
+@@ -829,18 +837,13 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
+ return rc;
+ }
+
+- ++vxlan->addrcnt;
+- hlist_add_head_rcu(&f->hlist,
+- vxlan_fdb_head(vxlan, mac, src_vni));
+-
+ *fdb = f;
+
+ return 0;
+ }
+
+-static void vxlan_fdb_free(struct rcu_head *head)
++static void __vxlan_fdb_free(struct vxlan_fdb *f)
+ {
+- struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
+ struct vxlan_rdst *rd, *nd;
+
+ list_for_each_entry_safe(rd, nd, &f->remotes, list) {
+@@ -850,6 +853,13 @@ static void vxlan_fdb_free(struct rcu_head *head)
+ kfree(f);
+ }
+
++static void vxlan_fdb_free(struct rcu_head *head)
++{
++ struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
++
++ __vxlan_fdb_free(f);
++}
++
+ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
+ bool do_notify, bool swdev_notify)
+ {
+@@ -977,6 +987,7 @@ static int vxlan_fdb_update_create(struct vxlan_dev *vxlan,
+ if (rc < 0)
+ return rc;
+
++ vxlan_fdb_insert(vxlan, mac, src_vni, f);
+ rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
+ swdev_notify, extack);
+ if (rc)
+@@ -3571,12 +3582,17 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
+ if (err)
+ goto errout;
+
+- /* notify default fdb entry */
+ if (f) {
++ vxlan_fdb_insert(vxlan, all_zeros_mac,
++ vxlan->default_dst.remote_vni, f);
++
++ /* notify default fdb entry */
+ err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
+ RTM_NEWNEIGH, true, extack);
+- if (err)
+- goto errout;
++ if (err) {
++ vxlan_fdb_destroy(vxlan, f, false, false);
++ goto unregister;
++ }
+ }
+
+ list_add(&vxlan->next, &vn->vxlan_list);
+@@ -3588,7 +3604,8 @@ errout:
+ * destroy the entry by hand here.
+ */
+ if (f)
+- vxlan_fdb_destroy(vxlan, f, false, false);
++ __vxlan_fdb_free(f);
++unregister:
+ if (unregister)
+ unregister_netdevice(dev);
+ return err;
+diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+index c704ae371c4d..42931a669b02 100644
+--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
++++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+@@ -663,6 +663,13 @@ static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
+
+ mutex_lock(&ar->conf_mutex);
+
++ if (!arsta->tx_stats) {
++ ath10k_warn(ar, "failed to get tx stats");
++ mutex_unlock(&ar->conf_mutex);
++ kfree(buf);
++ return 0;
++ }
++
+ spin_lock_bh(&ar->data_lock);
+ for (k = 0; k < ATH10K_STATS_TYPE_MAX; k++) {
+ for (j = 0; j < ATH10K_COUNTER_TYPE_MAX; j++) {
+diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
+index 1acc622d2183..f22840bbc389 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
+@@ -2277,7 +2277,9 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+- if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
++ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
++ ath10k_txrx_tx_unref(htt, &tx_done);
++ } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
+ ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
+ tx_done.msdu_id, tx_done.status);
+ ath10k_txrx_tx_unref(htt, &tx_done);
+diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
+index ad082b7d7643..b242085c3c16 100644
+--- a/drivers/net/wireless/ath/ath10k/hw.c
++++ b/drivers/net/wireless/ath/ath10k/hw.c
+@@ -158,7 +158,7 @@ const struct ath10k_hw_values qca6174_values = {
+ };
+
+ const struct ath10k_hw_values qca99x0_values = {
+- .rtc_state_val_on = 5,
++ .rtc_state_val_on = 7,
+ .ce_count = 12,
+ .msi_assign_ce_max = 12,
+ .num_target_ce_config_wlan = 10,
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 9c703d287333..b500fd427595 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -1630,6 +1630,10 @@ static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
++ /* For mesh, probe response and beacon share the same template */
++ if (ieee80211_vif_is_mesh(vif))
++ return 0;
++
+ prb = ieee80211_proberesp_get(hw, vif);
+ if (!prb) {
+ ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
+@@ -5588,8 +5592,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
+ struct cfg80211_chan_def def;
+ u32 vdev_param, pdev_param, slottime, preamble;
+ u16 bitrate, hw_value;
+- u8 rate, basic_rate_idx;
+- int rateidx, ret = 0, hw_rate_code;
++ u8 rate, basic_rate_idx, rateidx;
++ int ret = 0, hw_rate_code, mcast_rate;
+ enum nl80211_band band;
+ const struct ieee80211_supported_band *sband;
+
+@@ -5776,7 +5780,11 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
+ if (changed & BSS_CHANGED_MCAST_RATE &&
+ !ath10k_mac_vif_chan(arvif->vif, &def)) {
+ band = def.chan->band;
+- rateidx = vif->bss_conf.mcast_rate[band] - 1;
++ mcast_rate = vif->bss_conf.mcast_rate[band];
++ if (mcast_rate > 0)
++ rateidx = mcast_rate - 1;
++ else
++ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
+index 2c27f407a851..6e5f7ae00253 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -2059,6 +2059,11 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+
++ ath10k_pci_irq_disable(ar);
++ ath10k_pci_irq_sync(ar);
++ napi_synchronize(&ar->napi);
++ napi_disable(&ar->napi);
++
+ /* Most likely the device has HTT Rx ring configured. The only way to
+ * prevent the device from accessing (and possible corrupting) host
+ * memory is to reset the chip now.
+@@ -2072,10 +2077,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
+ */
+ ath10k_pci_safe_chip_reset(ar);
+
+- ath10k_pci_irq_disable(ar);
+- ath10k_pci_irq_sync(ar);
+- napi_synchronize(&ar->napi);
+- napi_disable(&ar->napi);
+ ath10k_pci_flush(ar);
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
+index a7bc2c70d076..8f8f717a23ee 100644
+--- a/drivers/net/wireless/ath/ath10k/qmi.c
++++ b/drivers/net/wireless/ath/ath10k/qmi.c
+@@ -1002,6 +1002,7 @@ int ath10k_qmi_deinit(struct ath10k *ar)
+ qmi_handle_release(&qmi->qmi_hdl);
+ cancel_work_sync(&qmi->event_work);
+ destroy_workqueue(qmi->event_wq);
++ kfree(qmi);
+ ar_snoc->qmi = NULL;
+
+ return 0;
+diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
+index fae56c67766f..28bdf0212538 100644
+--- a/drivers/net/wireless/ath/ath10k/sdio.c
++++ b/drivers/net/wireless/ath/ath10k/sdio.c
+@@ -602,6 +602,10 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
+ full_len,
+ last_in_bundle,
+ last_in_bundle);
++ if (ret) {
++ ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
++ goto err;
++ }
+ }
+
+ ar_sdio->n_rx_pkts = i;
+@@ -2077,6 +2081,9 @@ static void ath10k_sdio_remove(struct sdio_func *func)
+ cancel_work_sync(&ar_sdio->wr_async_work);
+ ath10k_core_unregister(ar);
+ ath10k_core_destroy(ar);
++
++ flush_workqueue(ar_sdio->workqueue);
++ destroy_workqueue(ar_sdio->workqueue);
+ }
+
+ static const struct sdio_device_id ath10k_sdio_devices[] = {
+diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
+index c5818d28f55a..4102df016931 100644
+--- a/drivers/net/wireless/ath/ath10k/txrx.c
++++ b/drivers/net/wireless/ath/ath10k/txrx.c
+@@ -150,6 +150,9 @@ struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
+ {
+ struct ath10k_peer *peer;
+
++ if (peer_id >= BITS_PER_TYPE(peer->peer_ids))
++ return NULL;
++
+ lockdep_assert_held(&ar->data_lock);
+
+ list_for_each_entry(peer, &ar->peers, list)
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+index 582fb11f648a..02709fc99034 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+@@ -2840,8 +2840,10 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+- ieee80211_has_protected(hdr->frame_control))
++ ieee80211_has_protected(hdr->frame_control)) {
++ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ buf_len += IEEE80211_CCMP_MIC_LEN;
++ }
+
+ buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
+ buf_len = round_up(buf_len, 4);
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
+index e1c40bb69932..12f57f9adbba 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.h
++++ b/drivers/net/wireless/ath/ath10k/wmi.h
+@@ -4535,9 +4535,10 @@ enum wmi_10_4_stats_id {
+ };
+
+ enum wmi_tlv_stats_id {
+- WMI_TLV_STAT_PDEV = BIT(0),
+- WMI_TLV_STAT_VDEV = BIT(1),
+- WMI_TLV_STAT_PEER = BIT(2),
++ WMI_TLV_STAT_PEER = BIT(0),
++ WMI_TLV_STAT_AP = BIT(1),
++ WMI_TLV_STAT_PDEV = BIT(2),
++ WMI_TLV_STAT_VDEV = BIT(3),
+ WMI_TLV_STAT_PEER_EXTD = BIT(10),
+ };
+
+diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
+index 68854c45d0a4..9ab6aa9ded5c 100644
+--- a/drivers/net/wireless/ath/ath6kl/wmi.c
++++ b/drivers/net/wireless/ath/ath6kl/wmi.c
+@@ -1176,6 +1176,10 @@ static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
+ return -EINVAL;
+
+ ev = (struct wmi_pstream_timeout_event *) datap;
++ if (ev->traffic_class >= WMM_NUM_AC) {
++ ath6kl_err("invalid traffic class: %d\n", ev->traffic_class);
++ return -EINVAL;
++ }
+
+ /*
+ * When the pstream (fat pipe == AC) timesout, it means there were
+@@ -1517,6 +1521,10 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
+ return -EINVAL;
+
+ reply = (struct wmi_cac_event *) datap;
++ if (reply->ac >= WMM_NUM_AC) {
++ ath6kl_err("invalid AC: %d\n", reply->ac);
++ return -EINVAL;
++ }
+
+ if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
+ (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
+@@ -2633,7 +2641,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+ u16 active_tsids = 0;
+ int ret;
+
+- if (traffic_class > 3) {
++ if (traffic_class >= WMM_NUM_AC) {
+ ath6kl_err("invalid traffic class: %d\n", traffic_class);
+ return -EINVAL;
+ }
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 8581d917635a..b6773d613f0c 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -252,8 +252,9 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
+ /* Chip Revisions */
+ /******************/
+
+-static void ath9k_hw_read_revisions(struct ath_hw *ah)
++static bool ath9k_hw_read_revisions(struct ath_hw *ah)
+ {
++ u32 srev;
+ u32 val;
+
+ if (ah->get_mac_revision)
+@@ -269,25 +270,33 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
+ val = REG_READ(ah, AR_SREV);
+ ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
+ }
+- return;
++ return true;
+ case AR9300_DEVID_AR9340:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9340;
+- return;
++ return true;
+ case AR9300_DEVID_QCA955X:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9550;
+- return;
++ return true;
+ case AR9300_DEVID_AR953X:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9531;
+- return;
++ return true;
+ case AR9300_DEVID_QCA956X:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9561;
+- return;
++ return true;
+ }
+
+- val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
++ srev = REG_READ(ah, AR_SREV);
++
++ if (srev == -EIO) {
++ ath_err(ath9k_hw_common(ah),
++ "Failed to read SREV register");
++ return false;
++ }
++
++ val = srev & AR_SREV_ID;
+
+ if (val == 0xFF) {
+- val = REG_READ(ah, AR_SREV);
++ val = srev;
+ ah->hw_version.macVersion =
+ (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
+ ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
+@@ -306,6 +315,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
+ if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE)
+ ah->is_pciexpress = true;
+ }
++
++ return true;
+ }
+
+ /************************************/
+@@ -559,7 +570,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
+ struct ath_common *common = ath9k_hw_common(ah);
+ int r = 0;
+
+- ath9k_hw_read_revisions(ah);
++ if (!ath9k_hw_read_revisions(ah)) {
++ ath_err(common, "Could not read hardware revisions");
++ return -EOPNOTSUPP;
++ }
+
+ switch (ah->hw_version.macVersion) {
+ case AR_SREV_VERSION_5416_PCI:
+diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
+index 4e97f7f3b2a3..06e660858766 100644
+--- a/drivers/net/wireless/ath/ath9k/recv.c
++++ b/drivers/net/wireless/ath/ath9k/recv.c
+@@ -815,6 +815,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_hdr *hdr;
+ bool discard_current = sc->rx.discard_next;
++ bool is_phyerr;
+
+ /*
+ * Discard corrupt descriptors which are marked in
+@@ -827,8 +828,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+
+ /*
+ * Discard zero-length packets and packets smaller than an ACK
++ * which are not PHY_ERROR (short radar pulses have a length of 3)
+ */
+- if (rx_stats->rs_datalen < 10) {
++ is_phyerr = rx_stats->rs_status & ATH9K_RXERR_PHY;
++ if (!rx_stats->rs_datalen ||
++ (rx_stats->rs_datalen < 10 && !is_phyerr)) {
+ RX_STAT_INC(sc, rx_len_err);
+ goto corrupt;
+ }
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index b17e1ca40995..3be0aeedb9b5 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -668,7 +668,8 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
+ static void ath_tx_count_airtime(struct ath_softc *sc,
+ struct ieee80211_sta *sta,
+ struct ath_buf *bf,
+- struct ath_tx_status *ts)
++ struct ath_tx_status *ts,
++ u8 tid)
+ {
+ u32 airtime = 0;
+ int i;
+@@ -679,7 +680,7 @@ static void ath_tx_count_airtime(struct ath_softc *sc,
+ airtime += rate_dur * bf->rates[i].count;
+ }
+
+- ieee80211_sta_register_airtime(sta, ts->tid, airtime, 0);
++ ieee80211_sta_register_airtime(sta, tid, airtime, 0);
+ }
+
+ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
+@@ -709,7 +710,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
+ if (sta) {
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
+ tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
+- ath_tx_count_airtime(sc, sta, bf, ts);
++ ath_tx_count_airtime(sc, sta, bf, ts, tid->tidno);
+ if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
+ tid->clear_ps_filter = true;
+ }
+diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
+index d52b31b45df7..a274eb0d1968 100644
+--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
++++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
+@@ -111,7 +111,7 @@ static const struct radar_detector_specs jp_radar_ref_types[] = {
+ JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
+ JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
+ JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
+- JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
++ JP_PATTERN(3, 0, 4, 4000, 4000, 1, 18, 50, false),
+ JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
+ JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
+ JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
+diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
+index 3f5bd177d55f..b00a13d6d530 100644
+--- a/drivers/net/wireless/ath/wil6210/interrupt.c
++++ b/drivers/net/wireless/ath/wil6210/interrupt.c
+@@ -296,21 +296,24 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
+ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
+ {
+ struct wil6210_priv *wil = cookie;
+- u32 isr = wil_ioread32_and_clear(wil->csr +
+- HOSTADDR(RGF_DMA_EP_RX_ICR) +
+- offsetof(struct RGF_ICR, ICR));
++ u32 isr;
+ bool need_unmask = true;
+
++ wil6210_mask_irq_rx(wil);
++
++ isr = wil_ioread32_and_clear(wil->csr +
++ HOSTADDR(RGF_DMA_EP_RX_ICR) +
++ offsetof(struct RGF_ICR, ICR));
++
+ trace_wil6210_irq_rx(isr);
+ wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err_ratelimited(wil, "spurious IRQ: RX\n");
++ wil6210_unmask_irq_rx(wil);
+ return IRQ_NONE;
+ }
+
+- wil6210_mask_irq_rx(wil);
+-
+ /* RX_DONE and RX_HTRSH interrupts are the same if interrupt
+ * moderation is not used. Interrupt moderation may cause RX
+ * buffer overflow while RX_DONE is delayed. The required
+@@ -355,21 +358,24 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
+ static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
+ {
+ struct wil6210_priv *wil = cookie;
+- u32 isr = wil_ioread32_and_clear(wil->csr +
+- HOSTADDR(RGF_INT_GEN_RX_ICR) +
+- offsetof(struct RGF_ICR, ICR));
++ u32 isr;
+ bool need_unmask = true;
+
++ wil6210_mask_irq_rx_edma(wil);
++
++ isr = wil_ioread32_and_clear(wil->csr +
++ HOSTADDR(RGF_INT_GEN_RX_ICR) +
++ offsetof(struct RGF_ICR, ICR));
++
+ trace_wil6210_irq_rx(isr);
+ wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err(wil, "spurious IRQ: RX\n");
++ wil6210_unmask_irq_rx_edma(wil);
+ return IRQ_NONE;
+ }
+
+- wil6210_mask_irq_rx_edma(wil);
+-
+ if (likely(isr & BIT_RX_STATUS_IRQ)) {
+ wil_dbg_irq(wil, "RX status ring\n");
+ isr &= ~BIT_RX_STATUS_IRQ;
+@@ -403,21 +409,24 @@ static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
+ static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
+ {
+ struct wil6210_priv *wil = cookie;
+- u32 isr = wil_ioread32_and_clear(wil->csr +
+- HOSTADDR(RGF_INT_GEN_TX_ICR) +
+- offsetof(struct RGF_ICR, ICR));
++ u32 isr;
+ bool need_unmask = true;
+
++ wil6210_mask_irq_tx_edma(wil);
++
++ isr = wil_ioread32_and_clear(wil->csr +
++ HOSTADDR(RGF_INT_GEN_TX_ICR) +
++ offsetof(struct RGF_ICR, ICR));
++
+ trace_wil6210_irq_tx(isr);
+ wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err(wil, "spurious IRQ: TX\n");
++ wil6210_unmask_irq_tx_edma(wil);
+ return IRQ_NONE;
+ }
+
+- wil6210_mask_irq_tx_edma(wil);
+-
+ if (likely(isr & BIT_TX_STATUS_IRQ)) {
+ wil_dbg_irq(wil, "TX status ring\n");
+ isr &= ~BIT_TX_STATUS_IRQ;
+@@ -446,21 +455,24 @@ static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
+ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
+ {
+ struct wil6210_priv *wil = cookie;
+- u32 isr = wil_ioread32_and_clear(wil->csr +
+- HOSTADDR(RGF_DMA_EP_TX_ICR) +
+- offsetof(struct RGF_ICR, ICR));
++ u32 isr;
+ bool need_unmask = true;
+
++ wil6210_mask_irq_tx(wil);
++
++ isr = wil_ioread32_and_clear(wil->csr +
++ HOSTADDR(RGF_DMA_EP_TX_ICR) +
++ offsetof(struct RGF_ICR, ICR));
++
+ trace_wil6210_irq_tx(isr);
+ wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
+
+ if (unlikely(!isr)) {
+ wil_err_ratelimited(wil, "spurious IRQ: TX\n");
++ wil6210_unmask_irq_tx(wil);
+ return IRQ_NONE;
+ }
+
+- wil6210_mask_irq_tx(wil);
+-
+ if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) {
+ wil_dbg_irq(wil, "TX done\n");
+ isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
+@@ -532,20 +544,23 @@ static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
+ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
+ {
+ struct wil6210_priv *wil = cookie;
+- u32 isr = wil_ioread32_and_clear(wil->csr +
+- HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+- offsetof(struct RGF_ICR, ICR));
++ u32 isr;
++
++ wil6210_mask_irq_misc(wil, false);
++
++ isr = wil_ioread32_and_clear(wil->csr +
++ HOSTADDR(RGF_DMA_EP_MISC_ICR) +
++ offsetof(struct RGF_ICR, ICR));
+
+ trace_wil6210_irq_misc(isr);
+ wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
+
+ if (!isr) {
+ wil_err(wil, "spurious IRQ: MISC\n");
++ wil6210_unmask_irq_misc(wil, false);
+ return IRQ_NONE;
+ }
+
+- wil6210_mask_irq_misc(wil, false);
+-
+ if (isr & ISR_MISC_FW_ERROR) {
+ u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr);
+ u32 ucode_assert_code =
+@@ -580,7 +595,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
+ /* no need to handle HALP ICRs until next vote */
+ wil->halp.handle_icr = false;
+ wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
+- wil6210_mask_halp(wil);
++ wil6210_mask_irq_misc(wil, true);
+ complete(&wil->halp.comp);
+ }
+ }
+diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
+index 4ccfd1404458..d74837cce67f 100644
+--- a/drivers/net/wireless/ath/wil6210/txrx.c
++++ b/drivers/net/wireless/ath/wil6210/txrx.c
+@@ -750,6 +750,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
+ [GRO_HELD] = "GRO_HELD",
+ [GRO_NORMAL] = "GRO_NORMAL",
+ [GRO_DROP] = "GRO_DROP",
++ [GRO_CONSUMED] = "GRO_CONSUMED",
+ };
+
+ wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
+diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
+index d89cd41e78ac..89a75ff29410 100644
+--- a/drivers/net/wireless/ath/wil6210/wmi.c
++++ b/drivers/net/wireless/ath/wil6210/wmi.c
+@@ -3220,7 +3220,18 @@ static void wmi_event_handle(struct wil6210_priv *wil,
+ /* check if someone waits for this event */
+ if (wil->reply_id && wil->reply_id == id &&
+ wil->reply_mid == mid) {
+- WARN_ON(wil->reply_buf);
++ if (wil->reply_buf) {
++ /* event received while wmi_call is waiting
++ * with a buffer. Such event should be handled
++ * in wmi_recv_cmd function. Handling the event
++ * here means a previous wmi_call was timeout.
++ * Drop the event and do not handle it.
++ */
++ wil_err(wil,
++ "Old event (%d, %s) while wmi_call is waiting. Drop it and Continue waiting\n",
++ id, eventid2name(id));
++ return;
++ }
+
+ wmi_evt_call_handler(vif, id, evt_data,
+ len - sizeof(*wmi));
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+index 33d7bc5500db..c875e173771c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+@@ -2303,8 +2303,6 @@ void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
+ /* start recording again if the firmware is not crashed */
+ if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
+ fwrt->fw->dbg.dest_tlv) {
+- /* wait before we collect the data till the DBGC stop */
+- udelay(500);
+ iwl_fw_dbg_restart_recording(fwrt, ¶ms);
+ }
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+index fd0ad220e961..c5c015a66106 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+@@ -294,7 +294,10 @@ _iwl_fw_dbg_stop_recording(struct iwl_trans *trans,
+ }
+
+ iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, 0);
+- udelay(100);
++ /* wait for the DBGC to finish writing the internal buffer to DRAM to
++ * avoid halting the HW while writing
++ */
++ usleep_range(700, 1000);
+ iwl_write_umac_prph(trans, DBGC_OUT_CTRL, 0);
+ #ifdef CONFIG_IWLWIFI_DEBUGFS
+ trans->dbg_rec_on = false;
+@@ -324,7 +327,6 @@ _iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
+ iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1);
+ } else {
+ iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, params->in_sample);
+- udelay(100);
+ iwl_write_umac_prph(trans, DBGC_OUT_CTRL, params->out_ctrl);
+ }
+ }
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+index ff85d69c2a8c..557ee47bffd8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
++++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
+@@ -8,7 +8,7 @@
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright(c) 2018 Intel Corporation
++ * Copyright(c) 2018 - 2019 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+@@ -31,7 +31,7 @@
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+- * Copyright(c) 2018 Intel Corporation
++ * Copyright(c) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+@@ -134,6 +134,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
+ .len = { 0, },
+ };
+ struct iwl_rx_packet *pkt;
++ int ret;
+
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
+@@ -141,8 +142,13 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
+ else
+ cmd.id = SHARED_MEM_CFG;
+
+- if (WARN_ON(iwl_trans_send_cmd(fwrt->trans, &cmd)))
++ ret = iwl_trans_send_cmd(fwrt->trans, &cmd);
++
++ if (ret) {
++ WARN(ret != -ERFKILL,
++ "Could not send the SMEM command: %d\n", ret);
+ return;
++ }
+
+ pkt = cmd.resp_pkt;
+ if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+index 553554846009..93da96a7247c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+@@ -336,6 +336,7 @@ enum {
+ /* RF_ID value */
+ #define CSR_HW_RF_ID_TYPE_JF (0x00105100)
+ #define CSR_HW_RF_ID_TYPE_HR (0x0010A000)
++#define CSR_HW_RF_ID_TYPE_HR1 (0x0010c100)
+ #define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00)
+ #define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
+ #define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000)
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 153717587aeb..559f6df1a74d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -419,6 +419,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+
+ lockdep_assert_held(&mvm->mutex);
+
++ mvm->rfkill_safe_init_done = false;
++
+ iwl_init_notification_wait(&mvm->notif_wait,
+ &init_wait,
+ init_complete,
+@@ -537,8 +539,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+
+ lockdep_assert_held(&mvm->mutex);
+
+- if (WARN_ON_ONCE(mvm->rfkill_safe_init_done))
+- return 0;
++ mvm->rfkill_safe_init_done = false;
+
+ iwl_init_notification_wait(&mvm->notif_wait,
+ &calib_wait,
+@@ -1108,10 +1109,13 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
+
+ iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY);
+
++ mvm->rfkill_safe_init_done = false;
+ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+ if (ret)
+ return ret;
+
++ mvm->rfkill_safe_init_done = true;
++
+ iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE);
+
+ return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+index fdbabca0280e..964c7baabede 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+@@ -207,6 +207,12 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
+ },
+ };
+
++static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
++ enum set_key_cmd cmd,
++ struct ieee80211_vif *vif,
++ struct ieee80211_sta *sta,
++ struct ieee80211_key_conf *key);
++
+ void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+ {
+ if (!iwl_mvm_is_d0i3_supported(mvm))
+@@ -2636,7 +2642,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
+ {
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+- int ret;
++ int ret, i;
+
+ /*
+ * iwl_mvm_mac_ctxt_add() might read directly from the device
+@@ -2710,6 +2716,20 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
+ /* must be set before quota calculations */
+ mvmvif->ap_ibss_active = true;
+
++ /* send all the early keys to the device now */
++ for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) {
++ struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i];
++
++ if (!key)
++ continue;
++
++ mvmvif->ap_early_keys[i] = NULL;
++
++ ret = iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
++ if (ret)
++ goto out_quota_failed;
++ }
++
+ if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
+ iwl_mvm_vif_set_low_latency(mvmvif, true,
+ LOW_LATENCY_VIF_TYPE);
+@@ -3479,11 +3499,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+ {
++ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_key_pn *ptk_pn;
+ int keyidx = key->keyidx;
+- int ret;
++ int ret, i;
+ u8 key_offset;
+
+ if (iwlwifi_mod_params.swcrypto) {
+@@ -3556,6 +3577,22 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
+ break;
+ }
++
++ if (!mvmvif->ap_ibss_active) {
++ for (i = 0;
++ i < ARRAY_SIZE(mvmvif->ap_early_keys);
++ i++) {
++ if (!mvmvif->ap_early_keys[i]) {
++ mvmvif->ap_early_keys[i] = key;
++ break;
++ }
++ }
++
++ if (i >= ARRAY_SIZE(mvmvif->ap_early_keys))
++ ret = -ENOSPC;
++
++ break;
++ }
+ }
+
+ /* During FW restart, in order to restore the state as it was,
+@@ -3624,6 +3661,18 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+
+ break;
+ case DISABLE_KEY:
++ ret = -ENOENT;
++ for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) {
++ if (mvmvif->ap_early_keys[i] == key) {
++ mvmvif->ap_early_keys[i] = NULL;
++ ret = 0;
++ }
++ }
++
++ /* found in pending list - don't do anything else */
++ if (ret == 0)
++ break;
++
+ if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
+ ret = 0;
+ break;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+index 02efcf2189c4..88af1f0ba3f0 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+@@ -501,6 +501,9 @@ struct iwl_mvm_vif {
+ netdev_features_t features;
+
+ struct iwl_probe_resp_data __rcu *probe_resp_data;
++
++ /* we can only have 2 GTK + 2 IGTK active at a time */
++ struct ieee80211_key_conf *ap_early_keys[4];
+ };
+
+ static inline struct iwl_mvm_vif *
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 0c2aabc842f9..96f8d38ea321 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -726,6 +726,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
+
+ memcpy(&info, skb->cb, sizeof(info));
+
++ if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
++ return -1;
++
+ if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
+ return -1;
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+index f496d1bcb643..1719a5ff77a9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+@@ -169,7 +169,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+
+ memcpy(iml_img, trans->iml, trans->iml_len);
+
+- iwl_enable_interrupts(trans);
++ iwl_enable_fw_load_int_ctx_info(trans);
+
+ /* kick FW self load */
+ iwl_write64(trans, CSR_CTXT_INFO_ADDR,
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+index 8969b47bacf2..d38cefbb779e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+@@ -222,7 +222,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
+
+ trans_pcie->ctxt_info = ctxt_info;
+
+- iwl_enable_interrupts(trans);
++ iwl_enable_fw_load_int_ctx_info(trans);
+
+ /* Configure debug, if exists */
+ if (iwl_pcie_dbg_on(trans))
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+index 85973dd57234..dcb3a3768cbd 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+@@ -874,6 +874,33 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
+ }
+ }
+
++static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
++{
++ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++ IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
++
++ if (!trans_pcie->msix_enabled) {
++ /*
++ * When we'll receive the ALIVE interrupt, the ISR will call
++ * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
++ * interrupt (which is not really needed anymore) but also the
++ * RX interrupt which will allow us to receive the ALIVE
++ * notification (which is Rx) and continue the flow.
++ */
++ trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
++ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
++ } else {
++ iwl_enable_hw_int_msk_msix(trans,
++ MSIX_HW_INT_CAUSES_REG_ALIVE);
++ /*
++ * Leave all the FH causes enabled to get the ALIVE
++ * notification.
++ */
++ iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
++ }
++}
++
+ static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
+ {
+ return index & (q->n_window - 1);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+index 31b3591f71d1..e5220905dff1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+@@ -1827,26 +1827,26 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
+ goto out;
+ }
+
+- if (iwl_have_debug_level(IWL_DL_ISR)) {
+- /* NIC fires this, but we don't use it, redundant with WAKEUP */
+- if (inta & CSR_INT_BIT_SCD) {
+- IWL_DEBUG_ISR(trans,
+- "Scheduler finished to transmit the frame/frames.\n");
+- isr_stats->sch++;
+- }
++ /* NIC fires this, but we don't use it, redundant with WAKEUP */
++ if (inta & CSR_INT_BIT_SCD) {
++ IWL_DEBUG_ISR(trans,
++ "Scheduler finished to transmit the frame/frames.\n");
++ isr_stats->sch++;
++ }
+
+- /* Alive notification via Rx interrupt will do the real work */
+- if (inta & CSR_INT_BIT_ALIVE) {
+- IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+- isr_stats->alive++;
+- if (trans->cfg->gen2) {
+- /*
+- * We can restock, since firmware configured
+- * the RFH
+- */
+- iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
+- }
++ /* Alive notification via Rx interrupt will do the real work */
++ if (inta & CSR_INT_BIT_ALIVE) {
++ IWL_DEBUG_ISR(trans, "Alive interrupt\n");
++ isr_stats->alive++;
++ if (trans->cfg->gen2) {
++ /*
++ * We can restock, since firmware configured
++ * the RFH
++ */
++ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
+ }
++
++ handled |= CSR_INT_BIT_ALIVE;
+ }
+
+ /* Safely ignore these bits for debug checks below */
+@@ -1965,6 +1965,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ iwl_enable_rfkill_int(trans);
++ /* Re-enable the ALIVE / Rx interrupt if it occurred */
++ else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
++ iwl_enable_fw_load_int_ctx_info(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+
+ out:
+@@ -2108,10 +2111,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
+ return IRQ_NONE;
+ }
+
+- if (iwl_have_debug_level(IWL_DL_ISR))
+- IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
+- inta_fh,
++ if (iwl_have_debug_level(IWL_DL_ISR)) {
++ IWL_DEBUG_ISR(trans,
++ "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
++ inta_fh, trans_pcie->fh_mask,
+ iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
++ if (inta_fh & ~trans_pcie->fh_mask)
++ IWL_DEBUG_ISR(trans,
++ "We got a masked interrupt (0x%08x)\n",
++ inta_fh & ~trans_pcie->fh_mask);
++ }
++
++ inta_fh &= trans_pcie->fh_mask;
+
+ if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
+ inta_fh & MSIX_FH_INT_CAUSES_Q0) {
+@@ -2151,11 +2162,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
+ }
+
+ /* After checking FH register check HW register */
+- if (iwl_have_debug_level(IWL_DL_ISR))
++ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ IWL_DEBUG_ISR(trans,
+- "ISR inta_hw 0x%08x, enabled 0x%08x\n",
+- inta_hw,
++ "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
++ inta_hw, trans_pcie->hw_mask,
+ iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
++ if (inta_hw & ~trans_pcie->hw_mask)
++ IWL_DEBUG_ISR(trans,
++ "We got a masked interrupt 0x%08x\n",
++ inta_hw & ~trans_pcie->hw_mask);
++ }
++
++ inta_hw &= trans_pcie->hw_mask;
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+index 8507a7bdcfdd..ea1d2bed502d 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+@@ -273,6 +273,15 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+ * paging memory cannot be freed included since FW will still use it
+ */
+ iwl_pcie_ctxt_info_free(trans);
++
++ /*
++ * Re-enable all the interrupts, including the RF-Kill one, now that
++ * the firmware is alive.
++ */
++ iwl_enable_interrupts(trans);
++ mutex_lock(&trans_pcie->mutex);
++ iwl_pcie_check_hw_rf_kill(trans);
++ mutex_unlock(&trans_pcie->mutex);
+ }
+
+ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index dfa1bed124aa..199eddea82a9 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -3575,9 +3575,11 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+ trans->cfg = &iwlax210_2ax_cfg_so_gf4_a0;
+ }
+ } else if (cfg == &iwl_ax101_cfg_qu_hr) {
+- if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+- trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
++ if ((CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
++ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
++ trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
++ (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
++ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
+ trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+index b8f48d10f27a..a27bc6791aa7 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+@@ -96,6 +96,9 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
+ bool unicast, remove_pad, insert_ccmp_hdr = false;
+ int i, idx;
+
++ if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
++ return -EINVAL;
++
+ memset(status, 0, sizeof(*status));
+
+ unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
+diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
+index 66d60283e456..f6a0454abe04 100644
+--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
++++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
+@@ -185,10 +185,23 @@ static void mt7601u_complete_rx(struct urb *urb)
+ struct mt7601u_rx_queue *q = &dev->rx_q;
+ unsigned long flags;
+
+- spin_lock_irqsave(&dev->rx_lock, flags);
++ /* do no schedule rx tasklet if urb has been unlinked
++ * or the device has been removed
++ */
++ switch (urb->status) {
++ case -ECONNRESET:
++ case -ESHUTDOWN:
++ case -ENOENT:
++ return;
++ default:
++ dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
++ urb->status);
++ /* fall through */
++ case 0:
++ break;
++ }
+
+- if (mt7601u_urb_has_error(urb))
+- dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
++ spin_lock_irqsave(&dev->rx_lock, flags);
+ if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
+ goto out;
+
+@@ -220,14 +233,25 @@ static void mt7601u_complete_tx(struct urb *urb)
+ struct sk_buff *skb;
+ unsigned long flags;
+
+- spin_lock_irqsave(&dev->tx_lock, flags);
++ switch (urb->status) {
++ case -ECONNRESET:
++ case -ESHUTDOWN:
++ case -ENOENT:
++ return;
++ default:
++ dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
++ urb->status);
++ /* fall through */
++ case 0:
++ break;
++ }
+
+- if (mt7601u_urb_has_error(urb))
+- dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
++ spin_lock_irqsave(&dev->tx_lock, flags);
+ if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
+ goto out;
+
+ skb = q->e[q->start].skb;
++ q->e[q->start].skb = NULL;
+ trace_mt_tx_dma_done(dev, skb);
+
+ __skb_queue_tail(&dev->tx_skb_done, skb);
+@@ -355,19 +379,9 @@ int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
+ static void mt7601u_kill_rx(struct mt7601u_dev *dev)
+ {
+ int i;
+- unsigned long flags;
+-
+- spin_lock_irqsave(&dev->rx_lock, flags);
+-
+- for (i = 0; i < dev->rx_q.entries; i++) {
+- int next = dev->rx_q.end;
+
+- spin_unlock_irqrestore(&dev->rx_lock, flags);
+- usb_poison_urb(dev->rx_q.e[next].urb);
+- spin_lock_irqsave(&dev->rx_lock, flags);
+- }
+-
+- spin_unlock_irqrestore(&dev->rx_lock, flags);
++ for (i = 0; i < dev->rx_q.entries; i++)
++ usb_poison_urb(dev->rx_q.e[i].urb);
+ }
+
+ static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
+@@ -437,10 +451,10 @@ static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
+ {
+ int i;
+
+- WARN_ON(q->used);
+-
+ for (i = 0; i < q->entries; i++) {
+ usb_poison_urb(q->e[i].urb);
++ if (q->e[i].skb)
++ mt7601u_tx_status(q->dev, q->e[i].skb);
+ usb_free_urb(q->e[i].urb);
+ }
+ }
+diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
+index 906e19c5f628..f3dff8319a4c 100644
+--- a/drivers/net/wireless/mediatek/mt7601u/tx.c
++++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
+@@ -109,9 +109,9 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+- spin_lock(&dev->mac_lock);
++ spin_lock_bh(&dev->mac_lock);
+ ieee80211_tx_status(dev->hw, skb);
+- spin_unlock(&dev->mac_lock);
++ spin_unlock_bh(&dev->mac_lock);
+ }
+
+ static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
+diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+index 67b81c7221c4..7e3a621b9c0d 100644
+--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
++++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+@@ -372,14 +372,9 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
+ struct queue_entry *entry = (struct queue_entry *)urb->context;
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+
+- if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
++ if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
+ return;
+
+- /*
+- * Report the frame as DMA done
+- */
+- rt2x00lib_dmadone(entry);
+-
+ /*
+ * Check if the received data is simply too small
+ * to be actually valid, or if the urb is signaling
+@@ -388,6 +383,11 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
+ if (urb->actual_length < entry->queue->desc_size || urb->status)
+ set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
+
++ /*
++ * Report the frame as DMA done
++ */
++ rt2x00lib_dmadone(entry);
++
+ /*
+ * Schedule the delayed work for reading the RX status
+ * from the device.
+diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
+index e24fda5e9087..34d68dbf4b4c 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
++++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
+@@ -1064,13 +1064,13 @@ int rtl_usb_probe(struct usb_interface *intf,
+ rtlpriv->cfg->ops->read_eeprom_info(hw);
+ err = _rtl_usb_init(hw);
+ if (err)
+- goto error_out;
++ goto error_out2;
+ rtl_usb_init_sw(hw);
+ /* Init mac80211 sw */
+ err = rtl_init_core(hw);
+ if (err) {
+ pr_err("Can't allocate sw for mac80211\n");
+- goto error_out;
++ goto error_out2;
+ }
+ if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
+ pr_err("Can't init_sw_vars\n");
+@@ -1091,6 +1091,7 @@ int rtl_usb_probe(struct usb_interface *intf,
+
+ error_out:
+ rtl_deinit_core(hw);
++error_out2:
+ _rtl_usb_io_handler_release(hw);
+ usb_put_dev(udev);
+ complete(&rtlpriv->firmware_loading_complete);
+diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
+index 49fc18ee0565..6d22b0f83b3b 100644
+--- a/drivers/nvdimm/dax_devs.c
++++ b/drivers/nvdimm/dax_devs.c
+@@ -118,7 +118,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
+ nvdimm_bus_unlock(&ndns->dev);
+ if (!dax_dev)
+ return -ENOMEM;
+- pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
++ pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ nd_pfn->pfn_sb = pfn_sb;
+ rc = nd_pfn_validate(nd_pfn, DAX_SIG);
+ dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>");
+diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
+index f58b849e455b..dfb2bcda8f5a 100644
+--- a/drivers/nvdimm/pfn.h
++++ b/drivers/nvdimm/pfn.h
+@@ -28,6 +28,7 @@ struct nd_pfn_sb {
+ __le32 end_trunc;
+ /* minor-version-2 record the base alignment of the mapping */
+ __le32 align;
++ /* minor-version-3 guarantee the padding and flags are zero */
+ u8 padding[4000];
+ __le64 checksum;
+ };
+diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
+index 0f81fc56bbfd..4977424693b0 100644
+--- a/drivers/nvdimm/pfn_devs.c
++++ b/drivers/nvdimm/pfn_devs.c
+@@ -412,6 +412,15 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
+ return 0;
+ }
+
++/**
++ * nd_pfn_validate - read and validate info-block
++ * @nd_pfn: fsdax namespace runtime state / properties
++ * @sig: 'devdax' or 'fsdax' signature
++ *
++ * Upon return the info-block buffer contents (->pfn_sb) are
++ * indeterminate when validation fails, and a coherent info-block
++ * otherwise.
++ */
+ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
+ {
+ u64 checksum, offset;
+@@ -557,7 +566,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
+ nvdimm_bus_unlock(&ndns->dev);
+ if (!pfn_dev)
+ return -ENOMEM;
+- pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
++ pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ nd_pfn = to_nd_pfn(pfn_dev);
+ nd_pfn->pfn_sb = pfn_sb;
+ rc = nd_pfn_validate(nd_pfn, PFN_SIG);
+@@ -694,7 +703,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+ u64 checksum;
+ int rc;
+
+- pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
++ pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
+ if (!pfn_sb)
+ return -ENOMEM;
+
+@@ -703,11 +712,14 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+ sig = DAX_SIG;
+ else
+ sig = PFN_SIG;
++
+ rc = nd_pfn_validate(nd_pfn, sig);
+ if (rc != -ENODEV)
+ return rc;
+
+ /* no info block, do init */;
++ memset(pfn_sb, 0, sizeof(*pfn_sb));
++
+ nd_region = to_nd_region(nd_pfn->dev.parent);
+ if (nd_region->ro) {
+ dev_info(&nd_pfn->dev,
+@@ -760,7 +772,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
+ memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
+ memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
+ pfn_sb->version_major = cpu_to_le16(1);
+- pfn_sb->version_minor = cpu_to_le16(2);
++ pfn_sb->version_minor = cpu_to_le16(3);
+ pfn_sb->start_pad = cpu_to_le32(start_pad);
+ pfn_sb->end_trunc = cpu_to_le32(end_trunc);
+ pfn_sb->align = cpu_to_le32(nd_pfn->align);
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 120fb593d1da..22c68e3b71d5 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3344,6 +3344,14 @@ static void nvme_ns_remove(struct nvme_ns *ns)
+ return;
+
+ nvme_fault_inject_fini(ns);
++
++ mutex_lock(&ns->ctrl->subsys->lock);
++ list_del_rcu(&ns->siblings);
++ mutex_unlock(&ns->ctrl->subsys->lock);
++ synchronize_rcu(); /* guarantee not available in head->list */
++ nvme_mpath_clear_current_path(ns);
++ synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
++
+ if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
+ del_gendisk(ns->disk);
+ blk_cleanup_queue(ns->queue);
+@@ -3351,16 +3359,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
+ blk_integrity_unregister(ns->disk);
+ }
+
+- mutex_lock(&ns->ctrl->subsys->lock);
+- list_del_rcu(&ns->siblings);
+- nvme_mpath_clear_current_path(ns);
+- mutex_unlock(&ns->ctrl->subsys->lock);
+-
+ down_write(&ns->ctrl->namespaces_rwsem);
+ list_del_init(&ns->list);
+ up_write(&ns->ctrl->namespaces_rwsem);
+
+- synchronize_srcu(&ns->head->srcu);
+ nvme_mpath_check_last_path(ns);
+ nvme_put_ns(ns);
+ }
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 524d6bd6d095..f5bc1c30cef5 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2068,6 +2068,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
+ .priv = dev,
+ };
+ unsigned int irq_queues, this_p_queues;
++ unsigned int nr_cpus = num_possible_cpus();
+
+ /*
+ * Poll queues don't need interrupts, but we need at least one IO
+@@ -2078,7 +2079,10 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
+ this_p_queues = nr_io_queues - 1;
+ irq_queues = 1;
+ } else {
+- irq_queues = nr_io_queues - this_p_queues + 1;
++ if (nr_cpus < nr_io_queues - this_p_queues)
++ irq_queues = nr_cpus + 1;
++ else
++ irq_queues = nr_io_queues - this_p_queues + 1;
+ }
+ dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
+
+@@ -2480,11 +2484,13 @@ static void nvme_reset_work(struct work_struct *work)
+ struct nvme_dev *dev =
+ container_of(work, struct nvme_dev, ctrl.reset_work);
+ bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
+- int result = -ENODEV;
++ int result;
+ enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
+
+- if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
++ if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
++ result = -ENODEV;
+ goto out;
++ }
+
+ /*
+ * If we're called to reset a live controller first shut it down before
+@@ -2528,6 +2534,7 @@ static void nvme_reset_work(struct work_struct *work)
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller CONNECTING\n");
++ result = -EBUSY;
+ goto out;
+ }
+
+@@ -2588,6 +2595,7 @@ static void nvme_reset_work(struct work_struct *work)
+ if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller state %d\n", new_state);
++ result = -ENODEV;
+ goto out;
+ }
+
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 3a9789388bfb..6b2f7cadec3c 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -682,7 +682,7 @@ static int _set_opp_custom(const struct opp_table *opp_table,
+
+ data->old_opp.rate = old_freq;
+ size = sizeof(*old_supply) * opp_table->regulator_count;
+- if (IS_ERR(old_supply))
++ if (!old_supply)
+ memset(data->old_opp.supplies, 0, size);
+ else
+ memcpy(data->old_opp.supplies, old_supply, size);
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 0ed235d560e3..5d1713069d14 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -178,6 +178,8 @@ static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+
+ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
+ {
++ /* Ensure that PERST has been asserted for at least 100 ms */
++ msleep(100);
+ gpiod_set_value_cansleep(pcie->reset, 0);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+ }
+diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
+index 82acd6155adf..40b625458afa 100644
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -1875,6 +1875,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
+ static void hv_eject_device_work(struct work_struct *work)
+ {
+ struct pci_eject_response *ejct_pkt;
++ struct hv_pcibus_device *hbus;
+ struct hv_pci_dev *hpdev;
+ struct pci_dev *pdev;
+ unsigned long flags;
+@@ -1885,6 +1886,7 @@ static void hv_eject_device_work(struct work_struct *work)
+ } ctxt;
+
+ hpdev = container_of(work, struct hv_pci_dev, wrk);
++ hbus = hpdev->hbus;
+
+ WARN_ON(hpdev->state != hv_pcichild_ejecting);
+
+@@ -1895,8 +1897,7 @@ static void hv_eject_device_work(struct work_struct *work)
+ * because hbus->pci_bus may not exist yet.
+ */
+ wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
+- pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
+- wslot);
++ pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
+ if (pdev) {
+ pci_lock_rescan_remove();
+ pci_stop_and_remove_bus_device(pdev);
+@@ -1904,9 +1905,9 @@ static void hv_eject_device_work(struct work_struct *work)
+ pci_unlock_rescan_remove();
+ }
+
+- spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
++ spin_lock_irqsave(&hbus->device_list_lock, flags);
+ list_del(&hpdev->list_entry);
+- spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
++ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+
+ if (hpdev->pci_slot)
+ pci_destroy_slot(hpdev->pci_slot);
+@@ -1915,7 +1916,7 @@ static void hv_eject_device_work(struct work_struct *work)
+ ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
+ ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
+ ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
+- vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt,
++ vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
+ sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
+ VM_PKT_DATA_INBAND, 0);
+
+@@ -1924,7 +1925,9 @@ static void hv_eject_device_work(struct work_struct *work)
+ /* For the two refs got in new_pcichild_device() */
+ put_pcichild(hpdev);
+ put_pcichild(hpdev);
+- put_hvpcibus(hpdev->hbus);
++ /* hpdev has been freed. Do not use it any more. */
++
++ put_hvpcibus(hbus);
+ }
+
+ /**
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 8abc843b1615..720da09d4d73 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1004,15 +1004,10 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
+ if (state == PCI_D0) {
+ pci_platform_power_transition(dev, PCI_D0);
+ /*
+- * Mandatory power management transition delays, see
+- * PCI Express Base Specification Revision 2.0 Section
+- * 6.6.1: Conventional Reset. Do not delay for
+- * devices powered on/off by corresponding bridge,
+- * because have already delayed for the bridge.
++ * Mandatory power management transition delays are
++ * handled in the PCIe portdrv resume hooks.
+ */
+ if (dev->runtime_d3cold) {
+- if (dev->d3cold_delay && !dev->imm_ready)
+- msleep(dev->d3cold_delay);
+ /*
+ * When powering on a bridge from D3cold, the
+ * whole hierarchy may be powered on into
+@@ -2065,6 +2060,13 @@ static void pci_pme_list_scan(struct work_struct *work)
+ */
+ if (bridge && bridge->current_state != PCI_D0)
+ continue;
++ /*
++ * If the device is in D3cold it should not be
++ * polled either.
++ */
++ if (pme_dev->dev->current_state == PCI_D3cold)
++ continue;
++
+ pci_pme_wakeup(pme_dev->dev, NULL);
+ } else {
+ list_del(&pme_dev->list);
+@@ -4568,14 +4570,16 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
+
+ return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
+ }
++
+ /**
+- * pcie_wait_for_link - Wait until link is active or inactive
++ * pcie_wait_for_link_delay - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
++ * @delay: Delay to wait after link has become active (in ms)
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+-bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
++bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay)
+ {
+ int timeout = 1000;
+ bool ret;
+@@ -4612,13 +4616,25 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+ timeout -= 10;
+ }
+ if (active && ret)
+- msleep(100);
++ msleep(delay);
+ else if (ret != active)
+ pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
+ active ? "set" : "cleared");
+ return ret == active;
+ }
+
++/**
++ * pcie_wait_for_link - Wait until link is active or inactive
++ * @pdev: Bridge device
++ * @active: waiting for active or inactive?
++ *
++ * Use this to wait till link becomes active or inactive.
++ */
++bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
++{
++ return pcie_wait_for_link_delay(pdev, active, 100);
++}
++
+ void pci_reset_secondary_bus(struct pci_dev *dev)
+ {
+ u16 ctrl;
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 9cb99380c61e..59802b3def4b 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -493,6 +493,7 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
+ void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
+ u32 service);
+
++bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay);
+ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
+ #ifdef CONFIG_PCIEASPM
+ void pcie_aspm_init_link_state(struct pci_dev *pdev);
+diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
+index 1b330129089f..308c3e0c4a34 100644
+--- a/drivers/pci/pcie/portdrv_core.c
++++ b/drivers/pci/pcie/portdrv_core.c
+@@ -9,6 +9,7 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/kernel.h>
++#include <linux/delay.h>
+ #include <linux/errno.h>
+ #include <linux/pm.h>
+ #include <linux/pm_runtime.h>
+@@ -378,6 +379,67 @@ static int pm_iter(struct device *dev, void *data)
+ return 0;
+ }
+
++static int get_downstream_delay(struct pci_bus *bus)
++{
++ struct pci_dev *pdev;
++ int min_delay = 100;
++ int max_delay = 0;
++
++ list_for_each_entry(pdev, &bus->devices, bus_list) {
++ if (!pdev->imm_ready)
++ min_delay = 0;
++ else if (pdev->d3cold_delay < min_delay)
++ min_delay = pdev->d3cold_delay;
++ if (pdev->d3cold_delay > max_delay)
++ max_delay = pdev->d3cold_delay;
++ }
++
++ return max(min_delay, max_delay);
++}
++
++/*
++ * wait_for_downstream_link - Wait for downstream link to establish
++ * @pdev: PCIe port whose downstream link is waited
++ *
++ * Handle delays according to PCIe 4.0 section 6.6.1 before configuration
++ * access to the downstream component is permitted.
++ *
++ * This blocks PCI core resume of the hierarchy below this port until the
++ * link is trained. Should be called before resuming port services to
++ * prevent pciehp from starting to tear-down the hierarchy too soon.
++ */
++static void wait_for_downstream_link(struct pci_dev *pdev)
++{
++ int delay;
++
++ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
++ pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
++ return;
++
++ if (pci_dev_is_disconnected(pdev))
++ return;
++
++ if (!pdev->subordinate || list_empty(&pdev->subordinate->devices) ||
++ !pdev->bridge_d3)
++ return;
++
++ delay = get_downstream_delay(pdev->subordinate);
++ if (!delay)
++ return;
++
++ dev_dbg(&pdev->dev, "waiting downstream link for %d ms\n", delay);
++
++ /*
++ * If downstream port does not support speeds greater than 5 GT/s
++ * need to wait 100ms. For higher speeds (gen3) we need to wait
++ * first for the data link layer to become active.
++ */
++ if (pcie_get_speed_cap(pdev) <= PCIE_SPEED_5_0GT)
++ msleep(delay);
++ else
++ pcie_wait_for_link_delay(pdev, true, delay);
++}
++
+ /**
+ * pcie_port_device_suspend - suspend port services associated with a PCIe port
+ * @dev: PCI Express port to handle
+@@ -391,6 +453,8 @@ int pcie_port_device_suspend(struct device *dev)
+ int pcie_port_device_resume_noirq(struct device *dev)
+ {
+ size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
++
++ wait_for_downstream_link(to_pci_dev(dev));
+ return device_for_each_child(dev, &off, pm_iter);
+ }
+
+@@ -421,6 +485,8 @@ int pcie_port_device_runtime_suspend(struct device *dev)
+ int pcie_port_device_runtime_resume(struct device *dev)
+ {
+ size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
++
++ wait_for_downstream_link(to_pci_dev(dev));
+ return device_for_each_child(dev, &off, pm_iter);
+ }
+ #endif /* PM */
+diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
+index cd91b4179b10..43abdfd0deed 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
++++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
+@@ -1074,6 +1074,7 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
+
+ .start_ctrl = PCS_START | PLL_READY_GATE_EN,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
++ .mask_pcs_ready = PHYSTATUS,
+ .mask_com_pcs_ready = PCS_READY,
+
+ .has_phy_com_ctrl = true,
+@@ -1253,6 +1254,7 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
++ .mask_pcs_ready = PHYSTATUS,
+ .mask_com_pcs_ready = PCS_READY,
+ };
+
+@@ -1547,7 +1549,7 @@ static int qcom_qmp_phy_enable(struct phy *phy)
+ status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
+ mask = cfg->mask_pcs_ready;
+
+- ret = readl_poll_timeout(status, val, !(val & mask), 1,
++ ret = readl_poll_timeout(status, val, val & mask, 1,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
+index 673f8a128397..f5795adc5a6e 100644
+--- a/drivers/ras/cec.c
++++ b/drivers/ras/cec.c
+@@ -369,7 +369,9 @@ static int pfn_set(void *data, u64 val)
+ {
+ *(u64 *)data = val;
+
+- return cec_add_elem(val);
++ cec_add_elem(val);
++
++ return 0;
+ }
+
+ DEFINE_DEBUGFS_ATTRIBUTE(pfn_ops, u64_get, pfn_set, "0x%llx\n");
+diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
+index da37b4ccd834..0309823d2c72 100644
+--- a/drivers/regulator/da9211-regulator.c
++++ b/drivers/regulator/da9211-regulator.c
+@@ -289,6 +289,8 @@ static struct da9211_pdata *da9211_parse_regulators_dt(
+ 0,
+ GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ "da9211-enable");
++ if (IS_ERR(pdata->gpiod_ren[n]))
++ pdata->gpiod_ren[n] = NULL;
+ n++;
+ }
+
+diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
+index 134c62db36c5..8812c2c3cfc2 100644
+--- a/drivers/regulator/s2mps11.c
++++ b/drivers/regulator/s2mps11.c
+@@ -372,8 +372,8 @@ static const struct regulator_desc s2mps11_regulators[] = {
+ regulator_desc_s2mps11_buck1_4(4),
+ regulator_desc_s2mps11_buck5,
+ regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
+- regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
+- regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
++ regulator_desc_s2mps11_buck67810(7, MIN_750_MV, STEP_12_5_MV),
++ regulator_desc_s2mps11_buck67810(8, MIN_750_MV, STEP_12_5_MV),
+ regulator_desc_s2mps11_buck9,
+ regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
+ };
+@@ -821,9 +821,12 @@ static void s2mps14_pmic_dt_parse_ext_control_gpio(struct platform_device *pdev,
+ 0,
+ GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ "s2mps11-regulator");
+- if (IS_ERR(gpio[reg])) {
++ if (PTR_ERR(gpio[reg]) == -ENOENT)
++ gpio[reg] = NULL;
++ else if (IS_ERR(gpio[reg])) {
+ dev_err(&pdev->dev, "Failed to get control GPIO for %d/%s\n",
+ reg, rdata[reg].name);
++ gpio[reg] = NULL;
+ continue;
+ }
+ if (gpio[reg])
+diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
+index bb9d1a083299..6ca27e9d5ef7 100644
+--- a/drivers/regulator/s5m8767.c
++++ b/drivers/regulator/s5m8767.c
+@@ -574,7 +574,9 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
+ 0,
+ GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ "s5m8767");
+- if (IS_ERR(rdata->ext_control_gpiod))
++ if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT)
++ rdata->ext_control_gpiod = NULL;
++ else if (IS_ERR(rdata->ext_control_gpiod))
+ return PTR_ERR(rdata->ext_control_gpiod);
+
+ rdata->id = i;
+diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
+index ca39b3d55123..10ea4b5a0f55 100644
+--- a/drivers/regulator/tps65090-regulator.c
++++ b/drivers/regulator/tps65090-regulator.c
+@@ -371,11 +371,12 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
+ "dcdc-ext-control-gpios", 0,
+ gflags,
+ "tps65090");
+- if (IS_ERR(rpdata->gpiod))
+- return ERR_CAST(rpdata->gpiod);
+- if (!rpdata->gpiod)
++ if (PTR_ERR(rpdata->gpiod) == -ENOENT) {
+ dev_err(&pdev->dev,
+ "could not find DCDC external control GPIO\n");
++ rpdata->gpiod = NULL;
++ } else if (IS_ERR(rpdata->gpiod))
++ return ERR_CAST(rpdata->gpiod);
+ }
+
+ if (of_property_read_u32(tps65090_matches[idx].of_node,
+diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
+index 7b7620de2acd..730c4e68094b 100644
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -736,6 +736,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
+
+ switch (state) {
+ case SLSB_P_OUTPUT_EMPTY:
++ case SLSB_P_OUTPUT_PENDING:
+ /* the adapter got it */
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
+ "out empty:%1d %02x", q->nr, count);
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index d94496ee6883..296bbc3c4606 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -11,6 +11,7 @@
+ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+ #include <linux/blktrace_api.h>
++#include <linux/types.h>
+ #include <linux/slab.h>
+ #include <scsi/fc/fc_els.h>
+ #include "zfcp_ext.h"
+@@ -741,6 +742,7 @@ static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
+
+ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
+ {
++ const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
+ struct zfcp_adapter *adapter = req->adapter;
+ struct zfcp_qdio *qdio = adapter->qdio;
+ int req_id = req->req_id;
+@@ -757,8 +759,20 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
+ return -EIO;
+ }
+
++ /*
++ * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT.
++ * ONLY TOUCH SYNC req AGAIN ON req->completion.
++ *
++ * The request might complete and be freed concurrently at any point
++ * now. This is not protected by the QDIO-lock (req_q_lock). So any
++ * uncontrolled access after this might result in an use-after-free bug.
++ * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and
++ * when it is completed via req->completion, is it safe to use req
++ * again.
++ */
++
+ /* Don't increase for unsolicited status */
+- if (!zfcp_fsf_req_is_status_read_buffer(req))
++ if (!is_srb)
+ adapter->fsf_req_seq_no++;
+ adapter->req_no++;
+
+@@ -805,6 +819,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
+ retval = zfcp_fsf_req_send(req);
+ if (retval)
+ goto failed_req_send;
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+
+ goto out;
+
+@@ -914,8 +929,10 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
+ req->qtcb->bottom.support.req_handle = (u64) old_req_id;
+
+ zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
+- if (!zfcp_fsf_req_send(req))
++ if (!zfcp_fsf_req_send(req)) {
++ /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
+ goto out;
++ }
+
+ out_error_free:
+ zfcp_fsf_req_free(req);
+@@ -1098,6 +1115,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
+ ret = zfcp_fsf_req_send(req);
+ if (ret)
+ goto failed_send;
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+
+ goto out;
+
+@@ -1198,6 +1216,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
+ ret = zfcp_fsf_req_send(req);
+ if (ret)
+ goto failed_send;
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+
+ goto out;
+
+@@ -1243,6 +1262,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
+ zfcp_fsf_req_free(req);
+ erp_action->fsf_req_id = 0;
+ }
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+@@ -1279,8 +1299,10 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
+ zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+ retval = zfcp_fsf_req_send(req);
+ spin_unlock_irq(&qdio->req_q_lock);
+- if (!retval)
++ if (!retval) {
++ /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
+ wait_for_completion(&req->completion);
++ }
+
+ zfcp_fsf_req_free(req);
+ return retval;
+@@ -1330,6 +1352,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
+ zfcp_fsf_req_free(req);
+ erp_action->fsf_req_id = 0;
+ }
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+@@ -1372,8 +1395,10 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
+ retval = zfcp_fsf_req_send(req);
+ spin_unlock_irq(&qdio->req_q_lock);
+
+- if (!retval)
++ if (!retval) {
++ /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
+ wait_for_completion(&req->completion);
++ }
+
+ zfcp_fsf_req_free(req);
+
+@@ -1493,6 +1518,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
+ erp_action->fsf_req_id = 0;
+ put_device(&port->dev);
+ }
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+@@ -1557,6 +1583,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
+ zfcp_fsf_req_free(req);
+ erp_action->fsf_req_id = 0;
+ }
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+@@ -1600,6 +1627,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
+ {
+ struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+ struct zfcp_fsf_req *req;
++ unsigned long req_id = 0;
+ int retval = -EIO;
+
+ spin_lock_irq(&qdio->req_q_lock);
+@@ -1622,14 +1650,17 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
+ hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
+ req->data = wka_port;
+
++ req_id = req->req_id;
++
+ zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+ retval = zfcp_fsf_req_send(req);
+ if (retval)
+ zfcp_fsf_req_free(req);
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ if (!retval)
+- zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
++ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
+ return retval;
+ }
+
+@@ -1655,6 +1686,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
+ {
+ struct zfcp_qdio *qdio = wka_port->adapter->qdio;
+ struct zfcp_fsf_req *req;
++ unsigned long req_id = 0;
+ int retval = -EIO;
+
+ spin_lock_irq(&qdio->req_q_lock);
+@@ -1677,14 +1709,17 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
+ req->data = wka_port;
+ req->qtcb->header.port_handle = wka_port->handle;
+
++ req_id = req->req_id;
++
+ zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+ retval = zfcp_fsf_req_send(req);
+ if (retval)
+ zfcp_fsf_req_free(req);
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ if (!retval)
+- zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
++ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
+ return retval;
+ }
+
+@@ -1776,6 +1811,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
+ zfcp_fsf_req_free(req);
+ erp_action->fsf_req_id = 0;
+ }
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+@@ -1899,6 +1935,7 @@ int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
+ zfcp_fsf_req_free(req);
+ erp_action->fsf_req_id = 0;
+ }
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+@@ -1987,6 +2024,7 @@ int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
+ zfcp_fsf_req_free(req);
+ erp_action->fsf_req_id = 0;
+ }
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+ out:
+ spin_unlock_irq(&qdio->req_q_lock);
+ return retval;
+@@ -2299,6 +2337,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
+ retval = zfcp_fsf_req_send(req);
+ if (unlikely(retval))
+ goto failed_scsi_cmnd;
++ /* NOTE: DO NOT TOUCH req PAST THIS POINT! */
+
+ goto out;
+
+@@ -2373,8 +2412,10 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
+ zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
+
+ zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
+- if (!zfcp_fsf_req_send(req))
++ if (!zfcp_fsf_req_send(req)) {
++ /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
+ goto out;
++ }
+
+ zfcp_fsf_req_free(req);
+ req = NULL;
+diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
+index fe0535affc14..d9fa9cf2fd8b 100644
+--- a/drivers/scsi/NCR5380.c
++++ b/drivers/scsi/NCR5380.c
+@@ -709,6 +709,8 @@ static void NCR5380_main(struct work_struct *work)
+ NCR5380_information_transfer(instance);
+ done = 0;
+ }
++ if (!hostdata->connected)
++ NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ spin_unlock_irq(&hostdata->lock);
+ if (!done)
+ cond_resched();
+@@ -1110,8 +1112,6 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
+ spin_lock_irq(&hostdata->lock);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_reselect(instance);
+- if (!hostdata->connected)
+- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n");
+ goto out;
+ }
+@@ -1119,7 +1119,6 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
+ if (err < 0) {
+ spin_lock_irq(&hostdata->lock);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+
+ /* Can't touch cmd if it has been reclaimed by the scsi ML */
+ if (!hostdata->selecting)
+@@ -1157,7 +1156,6 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
+ if (err < 0) {
+ shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ goto out;
+ }
+ if (!hostdata->selecting) {
+@@ -1763,10 +1761,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ scmd_printk(KERN_INFO, cmd,
+ "switching to slow handshake\n");
+ cmd->device->borken = 1;
+- sink = 1;
+- do_abort(instance);
+- cmd->result = DID_ERROR << 16;
+- /* XXX - need to source or sink data here, as appropriate */
++ do_reset(instance);
++ bus_reset_cleanup(instance);
+ }
+ } else {
+ /* Transfer a small chunk so that the
+@@ -1826,9 +1822,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ */
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+- /* Enable reselect interrupts */
+- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+-
+ maybe_release_dma_irq(instance);
+ return;
+ case MESSAGE_REJECT:
+@@ -1860,8 +1853,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ */
+ NCR5380_write(TARGET_COMMAND_REG, 0);
+
+- /* Enable reselect interrupts */
+- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ #ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_DMA_ENABLE;
+ #endif
+@@ -1964,7 +1955,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
+ cmd->result = DID_ERROR << 16;
+ complete_cmd(instance, cmd);
+ maybe_release_dma_irq(instance);
+- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+ return;
+ }
+ msgout = NOP;
+diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
+index efca509b92b0..5935fd6d1a05 100644
+--- a/drivers/scsi/NCR5380.h
++++ b/drivers/scsi/NCR5380.h
+@@ -235,7 +235,7 @@ struct NCR5380_cmd {
+ #define NCR5380_PIO_CHUNK_SIZE 256
+
+ /* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
+-#define NCR5380_REG_POLL_TIME 15
++#define NCR5380_REG_POLL_TIME 10
+
+ static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
+ {
+diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
+index dba9517d9553..c5169d31c966 100644
+--- a/drivers/scsi/mac_scsi.c
++++ b/drivers/scsi/mac_scsi.c
+@@ -4,6 +4,8 @@
+ *
+ * Copyright 1998, Michael Schmitz <mschmitz@lbl.gov>
+ *
++ * Copyright 2019 Finn Thain
++ *
+ * derived in part from:
+ */
+ /*
+@@ -12,6 +14,7 @@
+ * Copyright 1995, Russell King
+ */
+
++#include <linux/delay.h>
+ #include <linux/types.h>
+ #include <linux/module.h>
+ #include <linux/ioport.h>
+@@ -53,7 +56,7 @@ static int setup_cmd_per_lun = -1;
+ module_param(setup_cmd_per_lun, int, 0);
+ static int setup_sg_tablesize = -1;
+ module_param(setup_sg_tablesize, int, 0);
+-static int setup_use_pdma = -1;
++static int setup_use_pdma = 512;
+ module_param(setup_use_pdma, int, 0);
+ static int setup_hostid = -1;
+ module_param(setup_hostid, int, 0);
+@@ -90,101 +93,217 @@ static int __init mac_scsi_setup(char *str)
+ __setup("mac5380=", mac_scsi_setup);
+ #endif /* !MODULE */
+
+-/* Pseudo DMA asm originally by Ove Edlund */
+-
+-#define CP_IO_TO_MEM(s,d,n) \
+-__asm__ __volatile__ \
+- (" cmp.w #4,%2\n" \
+- " bls 8f\n" \
+- " move.w %1,%%d0\n" \
+- " neg.b %%d0\n" \
+- " and.w #3,%%d0\n" \
+- " sub.w %%d0,%2\n" \
+- " bra 2f\n" \
+- " 1: move.b (%0),(%1)+\n" \
+- " 2: dbf %%d0,1b\n" \
+- " move.w %2,%%d0\n" \
+- " lsr.w #5,%%d0\n" \
+- " bra 4f\n" \
+- " 3: move.l (%0),(%1)+\n" \
+- "31: move.l (%0),(%1)+\n" \
+- "32: move.l (%0),(%1)+\n" \
+- "33: move.l (%0),(%1)+\n" \
+- "34: move.l (%0),(%1)+\n" \
+- "35: move.l (%0),(%1)+\n" \
+- "36: move.l (%0),(%1)+\n" \
+- "37: move.l (%0),(%1)+\n" \
+- " 4: dbf %%d0,3b\n" \
+- " move.w %2,%%d0\n" \
+- " lsr.w #2,%%d0\n" \
+- " and.w #7,%%d0\n" \
+- " bra 6f\n" \
+- " 5: move.l (%0),(%1)+\n" \
+- " 6: dbf %%d0,5b\n" \
+- " and.w #3,%2\n" \
+- " bra 8f\n" \
+- " 7: move.b (%0),(%1)+\n" \
+- " 8: dbf %2,7b\n" \
+- " moveq.l #0, %2\n" \
+- " 9: \n" \
+- ".section .fixup,\"ax\"\n" \
+- " .even\n" \
+- "91: moveq.l #1, %2\n" \
+- " jra 9b\n" \
+- "94: moveq.l #4, %2\n" \
+- " jra 9b\n" \
+- ".previous\n" \
+- ".section __ex_table,\"a\"\n" \
+- " .align 4\n" \
+- " .long 1b,91b\n" \
+- " .long 3b,94b\n" \
+- " .long 31b,94b\n" \
+- " .long 32b,94b\n" \
+- " .long 33b,94b\n" \
+- " .long 34b,94b\n" \
+- " .long 35b,94b\n" \
+- " .long 36b,94b\n" \
+- " .long 37b,94b\n" \
+- " .long 5b,94b\n" \
+- " .long 7b,91b\n" \
+- ".previous" \
+- : "=a"(s), "=a"(d), "=d"(n) \
+- : "0"(s), "1"(d), "2"(n) \
+- : "d0")
++/*
++ * According to "Inside Macintosh: Devices", Mac OS requires disk drivers to
++ * specify the number of bytes between the delays expected from a SCSI target.
++ * This allows the operating system to "prevent bus errors when a target fails
++ * to deliver the next byte within the processor bus error timeout period."
++ * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets
++ * so bus errors are unavoidable.
++ *
++ * If a MOVE.B instruction faults, we assume that zero bytes were transferred
++ * and simply retry. That assumption probably depends on target behaviour but
++ * seems to hold up okay. The NOP provides synchronization: without it the
++ * fault can sometimes occur after the program counter has moved past the
++ * offending instruction. Post-increment addressing can't be used.
++ */
++
++#define MOVE_BYTE(operands) \
++ asm volatile ( \
++ "1: moveb " operands " \n" \
++ "11: nop \n" \
++ " addq #1,%0 \n" \
++ " subq #1,%1 \n" \
++ "40: \n" \
++ " \n" \
++ ".section .fixup,\"ax\" \n" \
++ ".even \n" \
++ "90: movel #1, %2 \n" \
++ " jra 40b \n" \
++ ".previous \n" \
++ " \n" \
++ ".section __ex_table,\"a\" \n" \
++ ".align 4 \n" \
++ ".long 1b,90b \n" \
++ ".long 11b,90b \n" \
++ ".previous \n" \
++ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
++
++/*
++ * If a MOVE.W (or MOVE.L) instruction faults, it cannot be retried because
++ * the residual byte count would be uncertain. In that situation the MOVE_WORD
++ * macro clears n in the fixup section to abort the transfer.
++ */
++
++#define MOVE_WORD(operands) \
++ asm volatile ( \
++ "1: movew " operands " \n" \
++ "11: nop \n" \
++ " subq #2,%1 \n" \
++ "40: \n" \
++ " \n" \
++ ".section .fixup,\"ax\" \n" \
++ ".even \n" \
++ "90: movel #0, %1 \n" \
++ " movel #2, %2 \n" \
++ " jra 40b \n" \
++ ".previous \n" \
++ " \n" \
++ ".section __ex_table,\"a\" \n" \
++ ".align 4 \n" \
++ ".long 1b,90b \n" \
++ ".long 11b,90b \n" \
++ ".previous \n" \
++ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
++
++#define MOVE_16_WORDS(operands) \
++ asm volatile ( \
++ "1: movew " operands " \n" \
++ "2: movew " operands " \n" \
++ "3: movew " operands " \n" \
++ "4: movew " operands " \n" \
++ "5: movew " operands " \n" \
++ "6: movew " operands " \n" \
++ "7: movew " operands " \n" \
++ "8: movew " operands " \n" \
++ "9: movew " operands " \n" \
++ "10: movew " operands " \n" \
++ "11: movew " operands " \n" \
++ "12: movew " operands " \n" \
++ "13: movew " operands " \n" \
++ "14: movew " operands " \n" \
++ "15: movew " operands " \n" \
++ "16: movew " operands " \n" \
++ "17: nop \n" \
++ " subl #32,%1 \n" \
++ "40: \n" \
++ " \n" \
++ ".section .fixup,\"ax\" \n" \
++ ".even \n" \
++ "90: movel #0, %1 \n" \
++ " movel #2, %2 \n" \
++ " jra 40b \n" \
++ ".previous \n" \
++ " \n" \
++ ".section __ex_table,\"a\" \n" \
++ ".align 4 \n" \
++ ".long 1b,90b \n" \
++ ".long 2b,90b \n" \
++ ".long 3b,90b \n" \
++ ".long 4b,90b \n" \
++ ".long 5b,90b \n" \
++ ".long 6b,90b \n" \
++ ".long 7b,90b \n" \
++ ".long 8b,90b \n" \
++ ".long 9b,90b \n" \
++ ".long 10b,90b \n" \
++ ".long 11b,90b \n" \
++ ".long 12b,90b \n" \
++ ".long 13b,90b \n" \
++ ".long 14b,90b \n" \
++ ".long 15b,90b \n" \
++ ".long 16b,90b \n" \
++ ".long 17b,90b \n" \
++ ".previous \n" \
++ : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
++
++#define MAC_PDMA_DELAY 32
++
++static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n)
++{
++ unsigned char *addr = start;
++ int result = 0;
++
++ if (n >= 1) {
++ MOVE_BYTE("%3@,%0@");
++ if (result)
++ goto out;
++ }
++ if (n >= 1 && ((unsigned long)addr & 1)) {
++ MOVE_BYTE("%3@,%0@");
++ if (result)
++ goto out;
++ }
++ while (n >= 32)
++ MOVE_16_WORDS("%3@,%0@+");
++ while (n >= 2)
++ MOVE_WORD("%3@,%0@+");
++ if (result)
++ return start - addr; /* Negated to indicate uncertain length */
++ if (n == 1)
++ MOVE_BYTE("%3@,%0@");
++out:
++ return addr - start;
++}
++
++static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n)
++{
++ unsigned char *addr = start;
++ int result = 0;
++
++ if (n >= 1) {
++ MOVE_BYTE("%0@,%3@");
++ if (result)
++ goto out;
++ }
++ if (n >= 1 && ((unsigned long)addr & 1)) {
++ MOVE_BYTE("%0@,%3@");
++ if (result)
++ goto out;
++ }
++ while (n >= 32)
++ MOVE_16_WORDS("%0@+,%3@");
++ while (n >= 2)
++ MOVE_WORD("%0@+,%3@");
++ if (result)
++ return start - addr; /* Negated to indicate uncertain length */
++ if (n == 1)
++ MOVE_BYTE("%0@,%3@");
++out:
++ return addr - start;
++}
+
+ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
+ unsigned char *dst, int len)
+ {
+ u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
+ unsigned char *d = dst;
+- int n = len;
+- int transferred;
++
++ hostdata->pdma_residual = len;
+
+ while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
+ BASR_DRQ | BASR_PHASE_MATCH,
+ BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
+- CP_IO_TO_MEM(s, d, n);
++ int bytes;
+
+- transferred = d - dst - n;
+- hostdata->pdma_residual = len - transferred;
++ bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512));
+
+- /* No bus error. */
+- if (n == 0)
++ if (bytes > 0) {
++ d += bytes;
++ hostdata->pdma_residual -= bytes;
++ }
++
++ if (hostdata->pdma_residual == 0)
+ return 0;
+
+- /* Target changed phase early? */
+ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
+- BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
+- scmd_printk(KERN_ERR, hostdata->connected,
++ BUS_AND_STATUS_REG, BASR_ACK,
++ BASR_ACK, HZ / 64) < 0)
++ scmd_printk(KERN_DEBUG, hostdata->connected,
+ "%s: !REQ and !ACK\n", __func__);
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
+ return 0;
+
++ if (bytes == 0)
++ udelay(MAC_PDMA_DELAY);
++
++ if (bytes >= 0)
++ continue;
++
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
+- "%s: bus error (%d/%d)\n", __func__, transferred, len);
++ "%s: bus error (%d/%d)\n", __func__, d - dst, len);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+- d = dst + transferred;
+- n = len - transferred;
++ return -1;
+ }
+
+ scmd_printk(KERN_ERR, hostdata->connected,
+@@ -193,93 +312,27 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
+ return -1;
+ }
+
+-
+-#define CP_MEM_TO_IO(s,d,n) \
+-__asm__ __volatile__ \
+- (" cmp.w #4,%2\n" \
+- " bls 8f\n" \
+- " move.w %0,%%d0\n" \
+- " neg.b %%d0\n" \
+- " and.w #3,%%d0\n" \
+- " sub.w %%d0,%2\n" \
+- " bra 2f\n" \
+- " 1: move.b (%0)+,(%1)\n" \
+- " 2: dbf %%d0,1b\n" \
+- " move.w %2,%%d0\n" \
+- " lsr.w #5,%%d0\n" \
+- " bra 4f\n" \
+- " 3: move.l (%0)+,(%1)\n" \
+- "31: move.l (%0)+,(%1)\n" \
+- "32: move.l (%0)+,(%1)\n" \
+- "33: move.l (%0)+,(%1)\n" \
+- "34: move.l (%0)+,(%1)\n" \
+- "35: move.l (%0)+,(%1)\n" \
+- "36: move.l (%0)+,(%1)\n" \
+- "37: move.l (%0)+,(%1)\n" \
+- " 4: dbf %%d0,3b\n" \
+- " move.w %2,%%d0\n" \
+- " lsr.w #2,%%d0\n" \
+- " and.w #7,%%d0\n" \
+- " bra 6f\n" \
+- " 5: move.l (%0)+,(%1)\n" \
+- " 6: dbf %%d0,5b\n" \
+- " and.w #3,%2\n" \
+- " bra 8f\n" \
+- " 7: move.b (%0)+,(%1)\n" \
+- " 8: dbf %2,7b\n" \
+- " moveq.l #0, %2\n" \
+- " 9: \n" \
+- ".section .fixup,\"ax\"\n" \
+- " .even\n" \
+- "91: moveq.l #1, %2\n" \
+- " jra 9b\n" \
+- "94: moveq.l #4, %2\n" \
+- " jra 9b\n" \
+- ".previous\n" \
+- ".section __ex_table,\"a\"\n" \
+- " .align 4\n" \
+- " .long 1b,91b\n" \
+- " .long 3b,94b\n" \
+- " .long 31b,94b\n" \
+- " .long 32b,94b\n" \
+- " .long 33b,94b\n" \
+- " .long 34b,94b\n" \
+- " .long 35b,94b\n" \
+- " .long 36b,94b\n" \
+- " .long 37b,94b\n" \
+- " .long 5b,94b\n" \
+- " .long 7b,91b\n" \
+- ".previous" \
+- : "=a"(s), "=a"(d), "=d"(n) \
+- : "0"(s), "1"(d), "2"(n) \
+- : "d0")
+-
+ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
+ unsigned char *src, int len)
+ {
+ unsigned char *s = src;
+ u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
+- int n = len;
+- int transferred;
++
++ hostdata->pdma_residual = len;
+
+ while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
+ BASR_DRQ | BASR_PHASE_MATCH,
+ BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
+- CP_MEM_TO_IO(s, d, n);
++ int bytes;
+
+- transferred = s - src - n;
+- hostdata->pdma_residual = len - transferred;
++ bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512));
+
+- /* Target changed phase early? */
+- if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
+- BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
+- scmd_printk(KERN_ERR, hostdata->connected,
+- "%s: !REQ and !ACK\n", __func__);
+- if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
+- return 0;
++ if (bytes > 0) {
++ s += bytes;
++ hostdata->pdma_residual -= bytes;
++ }
+
+- /* No bus error. */
+- if (n == 0) {
++ if (hostdata->pdma_residual == 0) {
+ if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
+ TCR_LAST_BYTE_SENT,
+ TCR_LAST_BYTE_SENT, HZ / 64) < 0)
+@@ -288,17 +341,29 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
+ return 0;
+ }
+
++ if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
++ BUS_AND_STATUS_REG, BASR_ACK,
++ BASR_ACK, HZ / 64) < 0)
++ scmd_printk(KERN_DEBUG, hostdata->connected,
++ "%s: !REQ and !ACK\n", __func__);
++ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
++ return 0;
++
++ if (bytes == 0)
++ udelay(MAC_PDMA_DELAY);
++
++ if (bytes >= 0)
++ continue;
++
+ dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
+- "%s: bus error (%d/%d)\n", __func__, transferred, len);
++ "%s: bus error (%d/%d)\n", __func__, s - src, len);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+- s = src + transferred;
+- n = len - transferred;
++ return -1;
+ }
+
+ scmd_printk(KERN_ERR, hostdata->connected,
+ "%s: phase mismatch or !DRQ\n", __func__);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
+-
+ return -1;
+ }
+
+@@ -306,7 +371,7 @@ static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
+ struct scsi_cmnd *cmd)
+ {
+ if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
+- cmd->SCp.this_residual < 16)
++ cmd->SCp.this_residual < setup_use_pdma)
+ return 0;
+
+ return cmd->SCp.this_residual;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 3dd1df472dc6..7237114a1d53 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -6155,7 +6155,8 @@ megasas_get_target_prop(struct megasas_instance *instance,
+ int ret;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+- u16 targetId = (sdev->channel % 2) + sdev->id;
++ u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
++ sdev->id;
+
+ cmd = megasas_get_cmd(instance);
+
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 65d0a10c76ad..40f392569664 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -72,11 +72,11 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
+ struct kmem_cache *cache;
+ int ret = 0;
+
++ mutex_lock(&scsi_sense_cache_mutex);
+ cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
+ if (cache)
+- return 0;
++ goto exit;
+
+- mutex_lock(&scsi_sense_cache_mutex);
+ if (shost->unchecked_isa_dma) {
+ scsi_sense_isadma_cache =
+ kmem_cache_create("scsi_sense_cache(DMA)",
+@@ -92,7 +92,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
+ if (!scsi_sense_cache)
+ ret = -ENOMEM;
+ }
+-
++ exit:
+ mutex_unlock(&scsi_sense_cache_mutex);
+ return ret;
+ }
+diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
+index 7334024b64f1..f9d1df0509c6 100644
+--- a/drivers/scsi/sd_zbc.c
++++ b/drivers/scsi/sd_zbc.c
+@@ -417,7 +417,7 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
+ {
+ struct gendisk *disk = sdkp->disk;
+ unsigned int nr_zones;
+- u32 zone_blocks;
++ u32 zone_blocks = 0;
+ int ret;
+
+ if (!sd_is_zoned(sdkp))
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index 9b91188a85f9..2cc6d9951b52 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -417,7 +417,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = rs->dma_addr_tx,
+ .dst_addr_width = rs->n_bytes,
+- .dst_maxburst = rs->fifo_len / 2,
++ .dst_maxburst = rs->fifo_len / 4,
+ };
+
+ dmaengine_slave_config(master->dma_tx, &txconf);
+@@ -518,7 +518,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
+ else
+ writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
+
+- writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
++ writel_relaxed(rs->fifo_len / 2, rs->regs + ROCKCHIP_SPI_DMATDLR);
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
+ writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
+
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 5e4654032bfa..29916e446143 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -2286,11 +2286,6 @@ int spi_register_controller(struct spi_controller *ctlr)
+ if (status)
+ return status;
+
+- /* even if it's just one always-selected device, there must
+- * be at least one chipselect
+- */
+- if (ctlr->num_chipselect == 0)
+- return -EINVAL;
+ if (ctlr->bus_num >= 0) {
+ /* devices with a fixed bus num must check-in with the num */
+ mutex_lock(&board_lock);
+@@ -2361,6 +2356,13 @@ int spi_register_controller(struct spi_controller *ctlr)
+ }
+ }
+
++ /*
++ * Even if it's just one always-selected device, there must
++ * be at least one chipselect.
++ */
++ if (!ctlr->num_chipselect)
++ return -EINVAL;
++
+ status = device_add(&ctlr->dev);
+ if (status < 0) {
+ /* free bus id */
+diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+index 30e2edc0cec5..b88855c7ffe8 100644
+--- a/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
++++ b/drivers/staging/media/davinci_vpfe/dm365_ipipe.c
+@@ -1251,10 +1251,10 @@ static int ipipe_s_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
+ struct vpfe_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
+ unsigned int i;
+ int rval = 0;
++ struct ipipe_module_params *params;
+
+ for (i = 0; i < ARRAY_SIZE(ipipe_modules); i++) {
+ const struct ipipe_module_if *module_if;
+- struct ipipe_module_params *params;
+ void *from, *to;
+ size_t size;
+
+@@ -1265,25 +1265,30 @@ static int ipipe_s_config(struct v4l2_subdev *sd, struct vpfe_ipipe_config *cfg)
+ from = *(void **)((void *)cfg + module_if->config_offset);
+
+ params = kmalloc(sizeof(*params), GFP_KERNEL);
++ if (!params)
++ return -ENOMEM;
+ to = (void *)params + module_if->param_offset;
+ size = module_if->param_size;
+
+ if (to && from && size) {
+ if (copy_from_user(to, (void __user *)from, size)) {
+ rval = -EFAULT;
+- break;
++ goto error_free;
+ }
+ rval = module_if->set(ipipe, to);
+ if (rval)
+- goto error;
++ goto error_free;
+ } else if (to && !from && size) {
+ rval = module_if->set(ipipe, NULL);
+ if (rval)
+- goto error;
++ goto error_free;
+ }
+ kfree(params);
+ }
+-error:
++ return rval;
++
++error_free:
++ kfree(params);
+ return rval;
+ }
+
+diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
+index 510202a3b091..84cca18e3e9d 100644
+--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
++++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
+@@ -419,6 +419,9 @@ static int vpfe_open(struct file *file)
+ /* If decoder is not initialized. initialize it */
+ if (!video->initialized && vpfe_update_pipe_state(video)) {
+ mutex_unlock(&video->lock);
++ v4l2_fh_del(&handle->vfh);
++ v4l2_fh_exit(&handle->vfh);
++ kfree(handle);
+ return -ENODEV;
+ }
+ /* Increment device users counter */
+diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
+index 19455f425416..7d7bdfdd852a 100644
+--- a/drivers/staging/media/imx/imx7-mipi-csis.c
++++ b/drivers/staging/media/imx/imx7-mipi-csis.c
+@@ -456,13 +456,9 @@ static void mipi_csis_set_params(struct csi_state *state)
+ MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW_CTRL);
+ }
+
+-static void mipi_csis_clk_enable(struct csi_state *state)
++static int mipi_csis_clk_enable(struct csi_state *state)
+ {
+- int ret;
+-
+- ret = clk_bulk_prepare_enable(state->num_clks, state->clks);
+- if (ret < 0)
+- dev_err(state->dev, "failed to enable clocks\n");
++ return clk_bulk_prepare_enable(state->num_clks, state->clks);
+ }
+
+ static void mipi_csis_clk_disable(struct csi_state *state)
+@@ -973,7 +969,11 @@ static int mipi_csis_probe(struct platform_device *pdev)
+ if (ret < 0)
+ return ret;
+
+- mipi_csis_clk_enable(state);
++ ret = mipi_csis_clk_enable(state);
++ if (ret < 0) {
++ dev_err(state->dev, "failed to enable clocks: %d\n", ret);
++ return ret;
++ }
+
+ ret = devm_request_irq(dev, state->irq, mipi_csis_irq_handler,
+ 0, dev_name(dev), state);
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index fa783531ee88..a02448105527 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -63,7 +63,7 @@ struct usb_dev_state {
+ unsigned int discsignr;
+ struct pid *disc_pid;
+ const struct cred *cred;
+- void __user *disccontext;
++ sigval_t disccontext;
+ unsigned long ifclaimed;
+ u32 disabled_bulk_eps;
+ bool privileges_dropped;
+@@ -90,6 +90,7 @@ struct async {
+ unsigned int ifnum;
+ void __user *userbuffer;
+ void __user *userurb;
++ sigval_t userurb_sigval;
+ struct urb *urb;
+ struct usb_memory *usbm;
+ unsigned int mem_usage;
+@@ -582,22 +583,19 @@ static void async_completed(struct urb *urb)
+ {
+ struct async *as = urb->context;
+ struct usb_dev_state *ps = as->ps;
+- struct kernel_siginfo sinfo;
+ struct pid *pid = NULL;
+ const struct cred *cred = NULL;
+ unsigned long flags;
+- int signr;
++ sigval_t addr;
++ int signr, errno;
+
+ spin_lock_irqsave(&ps->lock, flags);
+ list_move_tail(&as->asynclist, &ps->async_completed);
+ as->status = urb->status;
+ signr = as->signr;
+ if (signr) {
+- clear_siginfo(&sinfo);
+- sinfo.si_signo = as->signr;
+- sinfo.si_errno = as->status;
+- sinfo.si_code = SI_ASYNCIO;
+- sinfo.si_addr = as->userurb;
++ errno = as->status;
++ addr = as->userurb_sigval;
+ pid = get_pid(as->pid);
+ cred = get_cred(as->cred);
+ }
+@@ -615,7 +613,7 @@ static void async_completed(struct urb *urb)
+ spin_unlock_irqrestore(&ps->lock, flags);
+
+ if (signr) {
+- kill_pid_info_as_cred(sinfo.si_signo, &sinfo, pid, cred);
++ kill_pid_usb_asyncio(signr, errno, addr, pid, cred);
+ put_pid(pid);
+ put_cred(cred);
+ }
+@@ -1427,7 +1425,7 @@ find_memory_area(struct usb_dev_state *ps, const struct usbdevfs_urb *uurb)
+
+ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb,
+ struct usbdevfs_iso_packet_desc __user *iso_frame_desc,
+- void __user *arg)
++ void __user *arg, sigval_t userurb_sigval)
+ {
+ struct usbdevfs_iso_packet_desc *isopkt = NULL;
+ struct usb_host_endpoint *ep;
+@@ -1727,6 +1725,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
+ isopkt = NULL;
+ as->ps = ps;
+ as->userurb = arg;
++ as->userurb_sigval = userurb_sigval;
+ if (as->usbm) {
+ unsigned long uurb_start = (unsigned long)uurb->buffer;
+
+@@ -1801,13 +1800,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
+ static int proc_submiturb(struct usb_dev_state *ps, void __user *arg)
+ {
+ struct usbdevfs_urb uurb;
++ sigval_t userurb_sigval;
+
+ if (copy_from_user(&uurb, arg, sizeof(uurb)))
+ return -EFAULT;
+
++ memset(&userurb_sigval, 0, sizeof(userurb_sigval));
++ userurb_sigval.sival_ptr = arg;
++
+ return proc_do_submiturb(ps, &uurb,
+ (((struct usbdevfs_urb __user *)arg)->iso_frame_desc),
+- arg);
++ arg, userurb_sigval);
+ }
+
+ static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg)
+@@ -1977,7 +1980,7 @@ static int proc_disconnectsignal_compat(struct usb_dev_state *ps, void __user *a
+ if (copy_from_user(&ds, arg, sizeof(ds)))
+ return -EFAULT;
+ ps->discsignr = ds.signr;
+- ps->disccontext = compat_ptr(ds.context);
++ ps->disccontext.sival_int = ds.context;
+ return 0;
+ }
+
+@@ -2005,13 +2008,17 @@ static int get_urb32(struct usbdevfs_urb *kurb,
+ static int proc_submiturb_compat(struct usb_dev_state *ps, void __user *arg)
+ {
+ struct usbdevfs_urb uurb;
++ sigval_t userurb_sigval;
+
+ if (get_urb32(&uurb, (struct usbdevfs_urb32 __user *)arg))
+ return -EFAULT;
+
++ memset(&userurb_sigval, 0, sizeof(userurb_sigval));
++ userurb_sigval.sival_int = ptr_to_compat(arg);
++
+ return proc_do_submiturb(ps, &uurb,
+ ((struct usbdevfs_urb32 __user *)arg)->iso_frame_desc,
+- arg);
++ arg, userurb_sigval);
+ }
+
+ static int processcompl_compat(struct async *as, void __user * __user *arg)
+@@ -2092,7 +2099,7 @@ static int proc_disconnectsignal(struct usb_dev_state *ps, void __user *arg)
+ if (copy_from_user(&ds, arg, sizeof(ds)))
+ return -EFAULT;
+ ps->discsignr = ds.signr;
+- ps->disccontext = ds.context;
++ ps->disccontext.sival_ptr = ds.context;
+ return 0;
+ }
+
+@@ -2614,22 +2621,15 @@ const struct file_operations usbdev_file_operations = {
+ static void usbdev_remove(struct usb_device *udev)
+ {
+ struct usb_dev_state *ps;
+- struct kernel_siginfo sinfo;
+
+ while (!list_empty(&udev->filelist)) {
+ ps = list_entry(udev->filelist.next, struct usb_dev_state, list);
+ destroy_all_async(ps);
+ wake_up_all(&ps->wait);
+ list_del_init(&ps->list);
+- if (ps->discsignr) {
+- clear_siginfo(&sinfo);
+- sinfo.si_signo = ps->discsignr;
+- sinfo.si_errno = EPIPE;
+- sinfo.si_code = SI_ASYNCIO;
+- sinfo.si_addr = ps->disccontext;
+- kill_pid_info_as_cred(ps->discsignr, &sinfo,
+- ps->disc_pid, ps->cred);
+- }
++ if (ps->discsignr)
++ kill_pid_usb_asyncio(ps->discsignr, EPIPE, ps->disccontext,
++ ps->disc_pid, ps->cred);
+ }
+ }
+
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 2f94568ba385..2c8e60c7dbd8 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -3617,6 +3617,7 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
+ struct usb_device *hdev;
+ struct usb_device *udev;
+ int connect_change = 0;
++ u16 link_state;
+ int ret;
+
+ hdev = hub->hdev;
+@@ -3626,9 +3627,11 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
+ return 0;
+ usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND);
+ } else {
++ link_state = portstatus & USB_PORT_STAT_LINK_STATE;
+ if (!udev || udev->state != USB_STATE_SUSPENDED ||
+- (portstatus & USB_PORT_STAT_LINK_STATE) !=
+- USB_SS_PORT_LS_U0)
++ (link_state != USB_SS_PORT_LS_U0 &&
++ link_state != USB_SS_PORT_LS_U1 &&
++ link_state != USB_SS_PORT_LS_U2))
+ return 0;
+ }
+
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index d57ebdd616d9..247e5585af5d 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -35,7 +35,7 @@
+
+ #include "vhost.h"
+
+-static int experimental_zcopytx = 1;
++static int experimental_zcopytx = 0;
+ module_param(experimental_zcopytx, int, 0444);
+ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
+ " 1 -Enable; 0 - Disable");
+diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
+index d37dd5bb7a8f..559768dc2567 100644
+--- a/drivers/xen/balloon.c
++++ b/drivers/xen/balloon.c
+@@ -538,8 +538,15 @@ static void balloon_process(struct work_struct *work)
+ state = reserve_additional_memory();
+ }
+
+- if (credit < 0)
+- state = decrease_reservation(-credit, GFP_BALLOON);
++ if (credit < 0) {
++ long n_pages;
++
++ n_pages = min(-credit, si_mem_available());
++ state = decrease_reservation(n_pages, GFP_BALLOON);
++ if (state == BP_DONE && n_pages != -credit &&
++ n_pages < totalreserve_pages)
++ state = BP_EAGAIN;
++ }
+
+ state = update_schedule(state);
+
+@@ -578,6 +585,9 @@ static int add_ballooned_pages(int nr_pages)
+ }
+ }
+
++ if (si_mem_available() < nr_pages)
++ return -ENOMEM;
++
+ st = decrease_reservation(nr_pages, GFP_USER);
+ if (st != BP_DONE)
+ return -ENOMEM;
+@@ -710,7 +720,7 @@ static int __init balloon_init(void)
+ balloon_stats.schedule_delay = 1;
+ balloon_stats.max_schedule_delay = 32;
+ balloon_stats.retry_count = 1;
+- balloon_stats.max_retry_count = RETRY_UNLIMITED;
++ balloon_stats.max_retry_count = 4;
+
+ #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+ set_online_page_callback(&xen_online_page);
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index ff9b51055b14..2e8570c09789 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -1294,7 +1294,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
+ }
+
+ /* Rebind an evtchn so that it gets delivered to a specific cpu */
+-int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu)
++static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
+ {
+ struct evtchn_bind_vcpu bind_vcpu;
+ int masked;
+@@ -1328,7 +1328,6 @@ int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu)
+
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(xen_rebind_evtchn_to_cpu);
+
+ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
+ bool force)
+@@ -1342,6 +1341,15 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
+ return ret;
+ }
+
++/* To be called with desc->lock held. */
++int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu)
++{
++ struct irq_data *d = irq_desc_get_irq_data(desc);
++
++ return set_affinity_irq(d, cpumask_of(tcpu), false);
++}
++EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
++
+ static void enable_dynirq(struct irq_data *data)
+ {
+ int evtchn = evtchn_from_irq(data->irq);
+diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
+index f341b016672f..052b55a14ebc 100644
+--- a/drivers/xen/evtchn.c
++++ b/drivers/xen/evtchn.c
+@@ -447,7 +447,7 @@ static void evtchn_bind_interdom_next_vcpu(int evtchn)
+ this_cpu_write(bind_last_selected_cpu, selected_cpu);
+
+ /* unmask expects irqs to be disabled */
+- xen_rebind_evtchn_to_cpu(evtchn, selected_cpu);
++ xen_set_affinity_evtchn(desc, selected_cpu);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+
+diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
+index 84dd4a8980c5..75efb843b7b5 100644
+--- a/fs/btrfs/compression.c
++++ b/fs/btrfs/compression.c
+@@ -42,6 +42,22 @@ const char* btrfs_compress_type2str(enum btrfs_compression_type type)
+ return NULL;
+ }
+
++bool btrfs_compress_is_valid_type(const char *str, size_t len)
++{
++ int i;
++
++ for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
++ size_t comp_len = strlen(btrfs_compress_types[i]);
++
++ if (len < comp_len)
++ continue;
++
++ if (!strncmp(btrfs_compress_types[i], str, comp_len))
++ return true;
++ }
++ return false;
++}
++
+ static int btrfs_decompress_bio(struct compressed_bio *cb);
+
+ static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
+diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
+index 9976fe0f7526..b61879485e60 100644
+--- a/fs/btrfs/compression.h
++++ b/fs/btrfs/compression.h
+@@ -173,6 +173,7 @@ extern const struct btrfs_compress_op btrfs_lzo_compress;
+ extern const struct btrfs_compress_op btrfs_zstd_compress;
+
+ const char* btrfs_compress_type2str(enum btrfs_compression_type type);
++bool btrfs_compress_is_valid_type(const char *str, size_t len);
+
+ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
+
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 89f5be2bfb43..1c7533db16b0 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2721,6 +2721,11 @@ out_only_mutex:
+ * for detecting, at fsync time, if the inode isn't yet in the
+ * log tree or it's there but not up to date.
+ */
++ struct timespec64 now = current_time(inode);
++
++ inode_inc_iversion(inode);
++ inode->i_mtime = now;
++ inode->i_ctime = now;
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans)) {
+ err = PTR_ERR(trans);
+diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
+index a9e2e66152ee..af109c0ba720 100644
+--- a/fs/btrfs/props.c
++++ b/fs/btrfs/props.c
+@@ -257,11 +257,7 @@ static int prop_compression_validate(const char *value, size_t len)
+ if (!value)
+ return 0;
+
+- if (!strncmp("lzo", value, 3))
+- return 0;
+- else if (!strncmp("zlib", value, 4))
+- return 0;
+- else if (!strncmp("zstd", value, 4))
++ if (btrfs_compress_is_valid_type(value, len))
+ return 0;
+
+ return -EINVAL;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 3fc8d854d7fb..6c8297bcfeb7 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3322,6 +3322,30 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
+ return 0;
+ }
+
++/*
++ * Check if an inode was logged in the current transaction. We can't always rely
++ * on an inode's logged_trans value, because it's an in-memory only field and
++ * therefore not persisted. This means that its value is lost if the inode gets
++ * evicted and loaded again from disk (in which case it has a value of 0, and
++ * certainly it is smaller then any possible transaction ID), when that happens
++ * the full_sync flag is set in the inode's runtime flags, so on that case we
++ * assume eviction happened and ignore the logged_trans value, assuming the
++ * worst case, that the inode was logged before in the current transaction.
++ */
++static bool inode_logged(struct btrfs_trans_handle *trans,
++ struct btrfs_inode *inode)
++{
++ if (inode->logged_trans == trans->transid)
++ return true;
++
++ if (inode->last_trans == trans->transid &&
++ test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
++ !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
++ return true;
++
++ return false;
++}
++
+ /*
+ * If both a file and directory are logged, and unlinks or renames are
+ * mixed in, we have a few interesting corners:
+@@ -3356,7 +3380,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
+ int bytes_del = 0;
+ u64 dir_ino = btrfs_ino(dir);
+
+- if (dir->logged_trans < trans->transid)
++ if (!inode_logged(trans, dir))
+ return 0;
+
+ ret = join_running_log_trans(root);
+@@ -3460,7 +3484,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
+ u64 index;
+ int ret;
+
+- if (inode->logged_trans < trans->transid)
++ if (!inode_logged(trans, inode))
+ return 0;
+
+ ret = join_running_log_trans(root);
+@@ -5420,9 +5444,19 @@ log_extents:
+ }
+ }
+
++ /*
++ * Don't update last_log_commit if we logged that an inode exists after
++ * it was loaded to memory (full_sync bit set).
++ * This is to prevent data loss when we do a write to the inode, then
++ * the inode gets evicted after all delalloc was flushed, then we log
++ * it exists (due to a rename for example) and then fsync it. This last
++ * fsync would do nothing (not logging the extents previously written).
++ */
+ spin_lock(&inode->lock);
+ inode->logged_trans = trans->transid;
+- inode->last_log_commit = inode->last_sub_trans;
++ if (inode_only != LOG_INODE_EXISTS ||
++ !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
++ inode->last_log_commit = inode->last_sub_trans;
+ spin_unlock(&inode->lock);
+ out_unlock:
+ mutex_unlock(&inode->log_mutex);
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 183c37c0a8fc..7a57db8e2fa9 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -1007,7 +1007,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
+ * may block.
+ */
+ truncate_inode_pages_range(inode->i_mapping, pos,
+- (pos+len) | (PAGE_SIZE - 1));
++ PAGE_ALIGN(pos + len) - 1);
+
+ req->r_mtime = mtime;
+ }
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 761451f36e2d..5b7d4881a4f8 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -523,13 +523,16 @@ void ceph_free_inode(struct inode *inode)
+ kmem_cache_free(ceph_inode_cachep, ci);
+ }
+
+-void ceph_destroy_inode(struct inode *inode)
++void ceph_evict_inode(struct inode *inode)
+ {
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_inode_frag *frag;
+ struct rb_node *n;
+
+- dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
++ dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
++
++ truncate_inode_pages_final(&inode->i_data);
++ clear_inode(inode);
+
+ ceph_fscache_unregister_inode_cookie(ci);
+
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index d57fa60dcd43..74ca5970397f 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -840,10 +840,10 @@ static int ceph_remount(struct super_block *sb, int *flags, char *data)
+
+ static const struct super_operations ceph_super_ops = {
+ .alloc_inode = ceph_alloc_inode,
+- .destroy_inode = ceph_destroy_inode,
+ .free_inode = ceph_free_inode,
+ .write_inode = ceph_write_inode,
+ .drop_inode = ceph_drop_inode,
++ .evict_inode = ceph_evict_inode,
+ .sync_fs = ceph_sync_fs,
+ .put_super = ceph_put_super,
+ .remount_fs = ceph_remount,
+diff --git a/fs/ceph/super.h b/fs/ceph/super.h
+index 5f27e1f7f2d6..048409fba1a8 100644
+--- a/fs/ceph/super.h
++++ b/fs/ceph/super.h
+@@ -876,7 +876,7 @@ static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
+ extern const struct inode_operations ceph_file_iops;
+
+ extern struct inode *ceph_alloc_inode(struct super_block *sb);
+-extern void ceph_destroy_inode(struct inode *inode);
++extern void ceph_evict_inode(struct inode *inode);
+ extern void ceph_free_inode(struct inode *inode);
+ extern int ceph_drop_inode(struct inode *inode);
+
+diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
+index ed49222abecb..afa56237a0c3 100644
+--- a/fs/cifs/cifs_fs_sb.h
++++ b/fs/cifs/cifs_fs_sb.h
+@@ -83,5 +83,10 @@ struct cifs_sb_info {
+ * failover properly.
+ */
+ char *origin_fullpath; /* \\HOST\SHARE\[OPTIONAL PATH] */
++ /*
++ * Indicate whether serverino option was turned off later
++ * (cifs_autodisable_serverino) in order to match new mounts.
++ */
++ bool mnt_cifs_serverino_autodisabled;
+ };
+ #endif /* _CIFS_FS_SB_H */
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 8dd6637a3cbb..59380dd546a1 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -1223,11 +1223,11 @@ next_pdu:
+ atomic_read(&midCount));
+ cifs_dump_mem("Received Data is: ", bufs[i],
+ HEADER_SIZE(server));
++ smb2_add_credits_from_hdr(bufs[i], server);
+ #ifdef CONFIG_CIFS_DEBUG2
+ if (server->ops->dump_detail)
+ server->ops->dump_detail(bufs[i],
+ server);
+- smb2_add_credits_from_hdr(bufs[i], server);
+ cifs_dump_mids(server);
+ #endif /* CIFS_DEBUG2 */
+ }
+@@ -3460,12 +3460,16 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+ {
+ struct cifs_sb_info *old = CIFS_SB(sb);
+ struct cifs_sb_info *new = mnt_data->cifs_sb;
++ unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
++ unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;
+
+ if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
+ return 0;
+
+- if ((old->mnt_cifs_flags & CIFS_MOUNT_MASK) !=
+- (new->mnt_cifs_flags & CIFS_MOUNT_MASK))
++ if (old->mnt_cifs_serverino_autodisabled)
++ newflags &= ~CIFS_MOUNT_SERVER_INUM;
++
++ if (oldflags != newflags)
+ return 0;
+
+ /*
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index d7cc62252634..efaf7a3631ba 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -2408,6 +2408,8 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
+ struct inode *inode = d_inode(direntry);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifsInodeInfo *cifsInode = CIFS_I(inode);
++ struct cifsFileInfo *wfile;
++ struct cifs_tcon *tcon;
+ char *full_path = NULL;
+ int rc = -EACCES;
+ __u32 dosattr = 0;
+@@ -2454,6 +2456,20 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
+ mapping_set_error(inode->i_mapping, rc);
+ rc = 0;
+
++ if (attrs->ia_valid & ATTR_MTIME) {
++ rc = cifs_get_writable_file(cifsInode, false, &wfile);
++ if (!rc) {
++ tcon = tlink_tcon(wfile->tlink);
++ rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
++ cifsFileInfo_put(wfile);
++ if (rc)
++ return rc;
++ } else if (rc != -EBADF)
++ return rc;
++ else
++ rc = 0;
++ }
++
+ if (attrs->ia_valid & ATTR_SIZE) {
+ rc = cifs_set_file_size(inode, attrs, xid, full_path);
+ if (rc != 0)
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index b1a696a73f7c..f383877a6511 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -539,6 +539,7 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
+ tcon = cifs_sb_master_tcon(cifs_sb);
+
+ cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
++ cifs_sb->mnt_cifs_serverino_autodisabled = true;
+ cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s.\n",
+ tcon ? tcon->treeName : "new server");
+ cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS).\n");
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 278405d26c47..d8d9cdfa30b6 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -120,6 +120,8 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ SMB2_O_INFO_FILE, 0,
+ sizeof(struct smb2_file_all_info) +
+ PATH_MAX * 2, 0, NULL);
++ if (rc)
++ goto finished;
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
+ trace_smb3_query_info_compound_enter(xid, ses->Suid, tcon->tid,
+@@ -147,6 +149,8 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ COMPOUND_FID, current->tgid,
+ FILE_DISPOSITION_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
++ if (rc)
++ goto finished;
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
+ trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path);
+@@ -163,6 +167,8 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ COMPOUND_FID, current->tgid,
+ FILE_END_OF_FILE_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
++ if (rc)
++ goto finished;
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
+ trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
+@@ -180,6 +186,8 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ COMPOUND_FID, current->tgid,
+ FILE_BASIC_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
++ if (rc)
++ goto finished;
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
+ trace_smb3_set_info_compound_enter(xid, ses->Suid, tcon->tid,
+@@ -206,6 +214,8 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ COMPOUND_FID, current->tgid,
+ FILE_RENAME_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
++ if (rc)
++ goto finished;
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
+ trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
+@@ -231,6 +241,8 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ COMPOUND_FID, current->tgid,
+ FILE_LINK_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
++ if (rc)
++ goto finished;
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
+ trace_smb3_hardlink_enter(xid, ses->Suid, tcon->tid, full_path);
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 9fd56b0acd7e..2ec37dc589a7 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -694,8 +694,51 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
+
+ smb2_set_related(&rqst[1]);
+
++ /*
++ * We do not hold the lock for the open because in case
++ * SMB2_open needs to reconnect, it will end up calling
++ * cifs_mark_open_files_invalid() which takes the lock again
++ * thus causing a deadlock
++ */
++
++ mutex_unlock(&tcon->crfid.fid_mutex);
+ rc = compound_send_recv(xid, ses, flags, 2, rqst,
+ resp_buftype, rsp_iov);
++ mutex_lock(&tcon->crfid.fid_mutex);
++
++ /*
++ * Now we need to check again as the cached root might have
++ * been successfully re-opened from a concurrent process
++ */
++
++ if (tcon->crfid.is_valid) {
++ /* work was already done */
++
++ /* stash fids for close() later */
++ struct cifs_fid fid = {
++ .persistent_fid = pfid->persistent_fid,
++ .volatile_fid = pfid->volatile_fid,
++ };
++
++ /*
++ * caller expects this func to set pfid to a valid
++ * cached root, so we copy the existing one and get a
++ * reference.
++ */
++ memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
++ kref_get(&tcon->crfid.refcount);
++
++ mutex_unlock(&tcon->crfid.fid_mutex);
++
++ if (rc == 0) {
++ /* close extra handle outside of crit sec */
++ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
++ }
++ goto oshr_free;
++ }
++
++ /* Cached root is still invalid, continue normaly */
++
+ if (rc)
+ goto oshr_exit;
+
+@@ -729,8 +772,9 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
+ (char *)&tcon->crfid.file_all_info))
+ tcon->crfid.file_all_info_is_valid = 1;
+
+- oshr_exit:
++oshr_exit:
+ mutex_unlock(&tcon->crfid.fid_mutex);
++oshr_free:
+ SMB2_open_free(&rqst[0]);
+ SMB2_query_info_free(&rqst[1]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+@@ -2027,6 +2071,10 @@ smb2_set_related(struct smb_rqst *rqst)
+ struct smb2_sync_hdr *shdr;
+
+ shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
++ if (shdr == NULL) {
++ cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
++ return;
++ }
+ shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
+ }
+
+@@ -2041,6 +2089,12 @@ smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
+ unsigned long len = smb_rqst_len(server, rqst);
+ int i, num_padding;
+
++ shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
++ if (shdr == NULL) {
++ cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
++ return;
++ }
++
+ /* SMB headers in a compound are 8 byte aligned. */
+
+ /* No padding needed */
+@@ -2080,7 +2134,6 @@ smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
+ }
+
+ finished:
+- shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
+ shdr->NextCommand = cpu_to_le32(len);
+ }
+
+diff --git a/fs/coda/file.c b/fs/coda/file.c
+index 1cbc1f2298ee..43d371551d2b 100644
+--- a/fs/coda/file.c
++++ b/fs/coda/file.c
+@@ -27,6 +27,13 @@
+ #include "coda_linux.h"
+ #include "coda_int.h"
+
++struct coda_vm_ops {
++ atomic_t refcnt;
++ struct file *coda_file;
++ const struct vm_operations_struct *host_vm_ops;
++ struct vm_operations_struct vm_ops;
++};
++
+ static ssize_t
+ coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ {
+@@ -61,6 +68,34 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
+ return ret;
+ }
+
++static void
++coda_vm_open(struct vm_area_struct *vma)
++{
++ struct coda_vm_ops *cvm_ops =
++ container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
++
++ atomic_inc(&cvm_ops->refcnt);
++
++ if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open)
++ cvm_ops->host_vm_ops->open(vma);
++}
++
++static void
++coda_vm_close(struct vm_area_struct *vma)
++{
++ struct coda_vm_ops *cvm_ops =
++ container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
++
++ if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close)
++ cvm_ops->host_vm_ops->close(vma);
++
++ if (atomic_dec_and_test(&cvm_ops->refcnt)) {
++ vma->vm_ops = cvm_ops->host_vm_ops;
++ fput(cvm_ops->coda_file);
++ kfree(cvm_ops);
++ }
++}
++
+ static int
+ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
+ {
+@@ -68,6 +103,8 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
+ struct coda_inode_info *cii;
+ struct file *host_file;
+ struct inode *coda_inode, *host_inode;
++ struct coda_vm_ops *cvm_ops;
++ int ret;
+
+ cfi = CODA_FTOC(coda_file);
+ BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
+@@ -76,6 +113,13 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
+ if (!host_file->f_op->mmap)
+ return -ENODEV;
+
++ if (WARN_ON(coda_file != vma->vm_file))
++ return -EIO;
++
++ cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL);
++ if (!cvm_ops)
++ return -ENOMEM;
++
+ coda_inode = file_inode(coda_file);
+ host_inode = file_inode(host_file);
+
+@@ -89,6 +133,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
+ * the container file on us! */
+ else if (coda_inode->i_mapping != host_inode->i_mapping) {
+ spin_unlock(&cii->c_lock);
++ kfree(cvm_ops);
+ return -EBUSY;
+ }
+
+@@ -97,7 +142,29 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
+ cfi->cfi_mapcount++;
+ spin_unlock(&cii->c_lock);
+
+- return call_mmap(host_file, vma);
++ vma->vm_file = get_file(host_file);
++ ret = call_mmap(vma->vm_file, vma);
++
++ if (ret) {
++ /* if call_mmap fails, our caller will put coda_file so we
++ * should drop the reference to the host_file that we got.
++ */
++ fput(host_file);
++ kfree(cvm_ops);
++ } else {
++ /* here we add redirects for the open/close vm_operations */
++ cvm_ops->host_vm_ops = vma->vm_ops;
++ if (vma->vm_ops)
++ cvm_ops->vm_ops = *vma->vm_ops;
++
++ cvm_ops->vm_ops.open = coda_vm_open;
++ cvm_ops->vm_ops.close = coda_vm_close;
++ cvm_ops->coda_file = coda_file;
++ atomic_set(&cvm_ops->refcnt, 1);
++
++ vma->vm_ops = &cvm_ops->vm_ops;
++ }
++ return ret;
+ }
+
+ int coda_open(struct inode *coda_inode, struct file *coda_file)
+@@ -207,4 +274,3 @@ const struct file_operations coda_file_operations = {
+ .fsync = coda_fsync,
+ .splice_read = generic_file_splice_read,
+ };
+-
+diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
+index 335a362ee446..6f753198eeef 100644
+--- a/fs/crypto/crypto.c
++++ b/fs/crypto/crypto.c
+@@ -154,7 +154,10 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
+ struct crypto_skcipher *tfm = ci->ci_ctfm;
+ int res = 0;
+
+- BUG_ON(len == 0);
++ if (WARN_ON_ONCE(len <= 0))
++ return -EINVAL;
++ if (WARN_ON_ONCE(len % FS_CRYPTO_BLOCK_SIZE != 0))
++ return -EINVAL;
+
+ fscrypt_generate_iv(&iv, lblk_num, ci);
+
+@@ -238,8 +241,6 @@ struct page *fscrypt_encrypt_page(const struct inode *inode,
+ struct page *ciphertext_page = page;
+ int err;
+
+- BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
+-
+ if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
+ /* with inplace-encryption we just encrypt the page */
+ err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
+@@ -251,7 +252,8 @@ struct page *fscrypt_encrypt_page(const struct inode *inode,
+ return ciphertext_page;
+ }
+
+- BUG_ON(!PageLocked(page));
++ if (WARN_ON_ONCE(!PageLocked(page)))
++ return ERR_PTR(-EINVAL);
+
+ ctx = fscrypt_get_ctx(gfp_flags);
+ if (IS_ERR(ctx))
+@@ -299,8 +301,9 @@ EXPORT_SYMBOL(fscrypt_encrypt_page);
+ int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
+ unsigned int len, unsigned int offs, u64 lblk_num)
+ {
+- if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
+- BUG_ON(!PageLocked(page));
++ if (WARN_ON_ONCE(!PageLocked(page) &&
++ !(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)))
++ return -EINVAL;
+
+ return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
+ len, offs, GFP_NOFS);
+diff --git a/fs/dax.c b/fs/dax.c
+index d2c90bf1969a..01ca13c80bb4 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -123,6 +123,15 @@ static int dax_is_empty_entry(void *entry)
+ return xa_to_value(entry) & DAX_EMPTY;
+ }
+
++/*
++ * true if the entry that was found is of a smaller order than the entry
++ * we were looking for
++ */
++static bool dax_is_conflict(void *entry)
++{
++ return entry == XA_RETRY_ENTRY;
++}
++
+ /*
+ * DAX page cache entry locking
+ */
+@@ -195,11 +204,13 @@ static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
+ * Look up entry in page cache, wait for it to become unlocked if it
+ * is a DAX entry and return it. The caller must subsequently call
+ * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
+- * if it did.
++ * if it did. The entry returned may have a larger order than @order.
++ * If @order is larger than the order of the entry found in i_pages, this
++ * function returns a dax_is_conflict entry.
+ *
+ * Must be called with the i_pages lock held.
+ */
+-static void *get_unlocked_entry(struct xa_state *xas)
++static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
+ {
+ void *entry;
+ struct wait_exceptional_entry_queue ewait;
+@@ -210,6 +221,8 @@ static void *get_unlocked_entry(struct xa_state *xas)
+
+ for (;;) {
+ entry = xas_find_conflict(xas);
++ if (dax_entry_order(entry) < order)
++ return XA_RETRY_ENTRY;
+ if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
+ !dax_is_locked(entry))
+ return entry;
+@@ -254,7 +267,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
+ static void put_unlocked_entry(struct xa_state *xas, void *entry)
+ {
+ /* If we were the only waiter woken, wake the next one */
+- if (entry)
++ if (entry && dax_is_conflict(entry))
+ dax_wake_entry(xas, entry, false);
+ }
+
+@@ -461,7 +474,7 @@ void dax_unlock_page(struct page *page, dax_entry_t cookie)
+ * overlap with xarray value entries.
+ */
+ static void *grab_mapping_entry(struct xa_state *xas,
+- struct address_space *mapping, unsigned long size_flag)
++ struct address_space *mapping, unsigned int order)
+ {
+ unsigned long index = xas->xa_index;
+ bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
+@@ -469,20 +482,17 @@ static void *grab_mapping_entry(struct xa_state *xas,
+
+ retry:
+ xas_lock_irq(xas);
+- entry = get_unlocked_entry(xas);
++ entry = get_unlocked_entry(xas, order);
+
+ if (entry) {
++ if (dax_is_conflict(entry))
++ goto fallback;
+ if (!xa_is_value(entry)) {
+ xas_set_err(xas, EIO);
+ goto out_unlock;
+ }
+
+- if (size_flag & DAX_PMD) {
+- if (dax_is_pte_entry(entry)) {
+- put_unlocked_entry(xas, entry);
+- goto fallback;
+- }
+- } else { /* trying to grab a PTE entry */
++ if (order == 0) {
+ if (dax_is_pmd_entry(entry) &&
+ (dax_is_zero_entry(entry) ||
+ dax_is_empty_entry(entry))) {
+@@ -523,7 +533,11 @@ retry:
+ if (entry) {
+ dax_lock_entry(xas, entry);
+ } else {
+- entry = dax_make_entry(pfn_to_pfn_t(0), size_flag | DAX_EMPTY);
++ unsigned long flags = DAX_EMPTY;
++
++ if (order > 0)
++ flags |= DAX_PMD;
++ entry = dax_make_entry(pfn_to_pfn_t(0), flags);
+ dax_lock_entry(xas, entry);
+ if (xas_error(xas))
+ goto out_unlock;
+@@ -594,7 +608,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
+ if (WARN_ON_ONCE(!xa_is_value(entry)))
+ continue;
+ if (unlikely(dax_is_locked(entry)))
+- entry = get_unlocked_entry(&xas);
++ entry = get_unlocked_entry(&xas, 0);
+ if (entry)
+ page = dax_busy_page(entry);
+ put_unlocked_entry(&xas, entry);
+@@ -621,7 +635,7 @@ static int __dax_invalidate_entry(struct address_space *mapping,
+ void *entry;
+
+ xas_lock_irq(&xas);
+- entry = get_unlocked_entry(&xas);
++ entry = get_unlocked_entry(&xas, 0);
+ if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
+ goto out;
+ if (!trunc &&
+@@ -848,7 +862,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
+ if (unlikely(dax_is_locked(entry))) {
+ void *old_entry = entry;
+
+- entry = get_unlocked_entry(xas);
++ entry = get_unlocked_entry(xas, 0);
+
+ /* Entry got punched out / reallocated? */
+ if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
+@@ -1509,7 +1523,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
+ * entry is already in the array, for instance), it will return
+ * VM_FAULT_FALLBACK.
+ */
+- entry = grab_mapping_entry(&xas, mapping, DAX_PMD);
++ entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
+ if (xa_is_internal(entry)) {
+ result = xa_to_internal(entry);
+ goto fallback;
+@@ -1658,11 +1672,10 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
+ vm_fault_t ret;
+
+ xas_lock_irq(&xas);
+- entry = get_unlocked_entry(&xas);
++ entry = get_unlocked_entry(&xas, order);
+ /* Did we race with someone splitting entry or so? */
+- if (!entry ||
+- (order == 0 && !dax_is_pte_entry(entry)) ||
+- (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
++ if (!entry || dax_is_conflict(entry) ||
++ (order == 0 && !dax_is_pte_entry(entry))) {
+ put_unlocked_entry(&xas, entry);
+ xas_unlock_irq(&xas);
+ trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
+diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
+index 91d65f337d87..54bfa71ffdad 100644
+--- a/fs/ecryptfs/crypto.c
++++ b/fs/ecryptfs/crypto.c
+@@ -1004,8 +1004,10 @@ int ecryptfs_read_and_validate_header_region(struct inode *inode)
+
+ rc = ecryptfs_read_lower(file_size, 0, ECRYPTFS_SIZE_AND_MARKER_BYTES,
+ inode);
+- if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
+- return rc >= 0 ? -EINVAL : rc;
++ if (rc < 0)
++ return rc;
++ else if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
++ return -EINVAL;
+ rc = ecryptfs_validate_marker(marker);
+ if (!rc)
+ ecryptfs_i_size_init(file_size, inode);
+@@ -1367,8 +1369,10 @@ int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
+ ecryptfs_inode_to_lower(inode),
+ ECRYPTFS_XATTR_NAME, file_size,
+ ECRYPTFS_SIZE_AND_MARKER_BYTES);
+- if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
+- return rc >= 0 ? -EINVAL : rc;
++ if (rc < 0)
++ return rc;
++ else if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
++ return -EINVAL;
+ rc = ecryptfs_validate_marker(marker);
+ if (!rc)
+ ecryptfs_i_size_init(file_size, inode);
+diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
+index e41cbe8e81b9..9ebfb1b28430 100644
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -715,6 +715,7 @@ void wbc_detach_inode(struct writeback_control *wbc)
+ void wbc_account_io(struct writeback_control *wbc, struct page *page,
+ size_t bytes)
+ {
++ struct cgroup_subsys_state *css;
+ int id;
+
+ /*
+@@ -726,7 +727,12 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page,
+ if (!wbc->wb)
+ return;
+
+- id = mem_cgroup_css_from_page(page)->id;
++ css = mem_cgroup_css_from_page(page);
++ /* dead cgroups shouldn't contribute to inode ownership arbitration */
++ if (!(css->flags & CSS_ONLINE))
++ return;
++
++ id = css->id;
+
+ if (id == wbc->wb_id) {
+ wbc->wb_bytes += bytes;
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 57b6a45576ad..9f44ddc34c7b 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -140,19 +140,12 @@ struct nfs_cache_array {
+ struct nfs_cache_array_entry array[0];
+ };
+
+-struct readdirvec {
+- unsigned long nr;
+- unsigned long index;
+- struct page *pages[NFS_MAX_READDIR_RAPAGES];
+-};
+-
+ typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, bool);
+ typedef struct {
+ struct file *file;
+ struct page *page;
+ struct dir_context *ctx;
+ unsigned long page_index;
+- struct readdirvec pvec;
+ u64 *dir_cookie;
+ u64 last_cookie;
+ loff_t current_index;
+@@ -532,10 +525,6 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
+ struct nfs_cache_array *array;
+ unsigned int count = 0;
+ int status;
+- int max_rapages = NFS_MAX_READDIR_RAPAGES;
+-
+- desc->pvec.index = desc->page_index;
+- desc->pvec.nr = 0;
+
+ scratch = alloc_page(GFP_KERNEL);
+ if (scratch == NULL)
+@@ -560,40 +549,20 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
+ if (desc->plus)
+ nfs_prime_dcache(file_dentry(desc->file), entry);
+
+- status = nfs_readdir_add_to_array(entry, desc->pvec.pages[desc->pvec.nr]);
+- if (status == -ENOSPC) {
+- desc->pvec.nr++;
+- if (desc->pvec.nr == max_rapages)
+- break;
+- status = nfs_readdir_add_to_array(entry, desc->pvec.pages[desc->pvec.nr]);
+- }
++ status = nfs_readdir_add_to_array(entry, page);
+ if (status != 0)
+ break;
+ } while (!entry->eof);
+
+- /*
+- * page and desc->pvec.pages[0] are valid, don't need to check
+- * whether or not to be NULL.
+- */
+- copy_highpage(page, desc->pvec.pages[0]);
+-
+ out_nopages:
+ if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) {
+- array = kmap_atomic(desc->pvec.pages[desc->pvec.nr]);
++ array = kmap(page);
+ array->eof_index = array->size;
+ status = 0;
+- kunmap_atomic(array);
++ kunmap(page);
+ }
+
+ put_page(scratch);
+-
+- /*
+- * desc->pvec.nr > 0 means at least one page was completely filled,
+- * we should return -ENOSPC. Otherwise function
+- * nfs_readdir_xdr_to_array will enter infinite loop.
+- */
+- if (desc->pvec.nr > 0)
+- return -ENOSPC;
+ return status;
+ }
+
+@@ -627,24 +596,6 @@ out_freepages:
+ return -ENOMEM;
+ }
+
+-/*
+- * nfs_readdir_rapages_init initialize rapages by nfs_cache_array structure.
+- */
+-static
+-void nfs_readdir_rapages_init(nfs_readdir_descriptor_t *desc)
+-{
+- struct nfs_cache_array *array;
+- int max_rapages = NFS_MAX_READDIR_RAPAGES;
+- int index;
+-
+- for (index = 0; index < max_rapages; index++) {
+- array = kmap_atomic(desc->pvec.pages[index]);
+- memset(array, 0, sizeof(struct nfs_cache_array));
+- array->eof_index = -1;
+- kunmap_atomic(array);
+- }
+-}
+-
+ static
+ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, struct inode *inode)
+ {
+@@ -655,12 +606,6 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
+ int status = -ENOMEM;
+ unsigned int array_size = ARRAY_SIZE(pages);
+
+- /*
+- * This means we hit readdir rdpages miss, the preallocated rdpages
+- * are useless, the preallocate rdpages should be reinitialized.
+- */
+- nfs_readdir_rapages_init(desc);
+-
+ entry.prev_cookie = 0;
+ entry.cookie = desc->last_cookie;
+ entry.eof = 0;
+@@ -721,24 +666,9 @@ int nfs_readdir_filler(void *data, struct page* page)
+ struct inode *inode = file_inode(desc->file);
+ int ret;
+
+- /*
+- * If desc->page_index in range desc->pvec.index and
+- * desc->pvec.index + desc->pvec.nr, we get readdir cache hit.
+- */
+- if (desc->page_index >= desc->pvec.index &&
+- desc->page_index < (desc->pvec.index + desc->pvec.nr)) {
+- /*
+- * page and desc->pvec.pages[x] are valid, don't need to check
+- * whether or not to be NULL.
+- */
+- copy_highpage(page, desc->pvec.pages[desc->page_index - desc->pvec.index]);
+- ret = 0;
+- } else {
+- ret = nfs_readdir_xdr_to_array(desc, page, inode);
+- if (ret < 0)
+- goto error;
+- }
+-
++ ret = nfs_readdir_xdr_to_array(desc, page, inode);
++ if (ret < 0)
++ goto error;
+ SetPageUptodate(page);
+
+ if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) {
+@@ -903,7 +833,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
+ *desc = &my_desc;
+ struct nfs_open_dir_context *dir_ctx = file->private_data;
+ int res = 0;
+- int max_rapages = NFS_MAX_READDIR_RAPAGES;
+
+ dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n",
+ file, (long long)ctx->pos);
+@@ -923,12 +852,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
+ desc->decode = NFS_PROTO(inode)->decode_dirent;
+ desc->plus = nfs_use_readdirplus(inode, ctx);
+
+- res = nfs_readdir_alloc_pages(desc->pvec.pages, max_rapages);
+- if (res < 0)
+- return -ENOMEM;
+-
+- nfs_readdir_rapages_init(desc);
+-
+ if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
+ res = nfs_revalidate_mapping(inode, file->f_mapping);
+ if (res < 0)
+@@ -964,7 +887,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
+ break;
+ } while (!desc->eof);
+ out:
+- nfs_readdir_free_pages(desc->pvec.pages, max_rapages);
+ if (res > 0)
+ res = 0;
+ dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res);
+diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+index 19f856f45689..3eda40a320a5 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+@@ -257,7 +257,7 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
+ if (status == 0)
+ return 0;
+
+- if (mirror->mirror_ds == NULL)
++ if (IS_ERR_OR_NULL(mirror->mirror_ds))
+ return -EINVAL;
+
+ dserr = kmalloc(sizeof(*dserr), gfp_flags);
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 0b4a1a974411..53777813ca95 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1100,6 +1100,7 @@ int nfs_open(struct inode *inode, struct file *filp)
+ nfs_fscache_open_file(inode, filp);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(nfs_open);
+
+ /*
+ * This function is called whenever some part of NFS notices that
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 498fab72f70b..81e2fdff227e 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -69,8 +69,7 @@ struct nfs_clone_mount {
+ * Maximum number of pages that readdir can use for creating
+ * a vmapped array of pages.
+ */
+-#define NFS_MAX_READDIR_PAGES 64
+-#define NFS_MAX_READDIR_RAPAGES 8
++#define NFS_MAX_READDIR_PAGES 8
+
+ struct nfs_client_initdata {
+ unsigned long init_flags;
+diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
+index cf42a8b939e3..3a507c42c1ca 100644
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -49,7 +49,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
+ return err;
+
+ if ((openflags & O_ACCMODE) == 3)
+- openflags--;
++ return nfs_open(inode, filp);
+
+ /* We can't create new files here */
+ openflags &= ~(O_CREAT|O_EXCL);
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 83722e936b4a..bfe1f4625f60 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1890,7 +1890,7 @@ lookup_again:
+ spin_unlock(&ino->i_lock);
+ lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
+ !atomic_read(&lo->plh_outstanding)));
+- if (IS_ERR(lseg) || !list_empty(&lo->plh_segs))
++ if (IS_ERR(lseg))
+ goto out_put_layout_hdr;
+ pnfs_put_layout_hdr(lo);
+ goto lookup_again;
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index c74570736b24..36ad1b0d6259 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -499,6 +499,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
+
+ if (root->set_ownership)
+ root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
++ else {
++ inode->i_uid = GLOBAL_ROOT_UID;
++ inode->i_gid = GLOBAL_ROOT_GID;
++ }
+
+ return inode;
+ }
+diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
+index 89a80b568a17..7fbe8f058220 100644
+--- a/fs/pstore/inode.c
++++ b/fs/pstore/inode.c
+@@ -318,22 +318,21 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
+ goto fail;
+ inode->i_mode = S_IFREG | 0444;
+ inode->i_fop = &pstore_file_operations;
+- private = kzalloc(sizeof(*private), GFP_KERNEL);
+- if (!private)
+- goto fail_alloc;
+- private->record = record;
+-
+ scnprintf(name, sizeof(name), "%s-%s-%llu%s",
+ pstore_type_to_name(record->type),
+ record->psi->name, record->id,
+ record->compressed ? ".enc.z" : "");
+
++ private = kzalloc(sizeof(*private), GFP_KERNEL);
++ if (!private)
++ goto fail_inode;
++
+ dentry = d_alloc_name(root, name);
+ if (!dentry)
+ goto fail_private;
+
++ private->record = record;
+ inode->i_size = private->total_size = size;
+-
+ inode->i_private = private;
+
+ if (record->time.tv_sec)
+@@ -349,7 +348,7 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
+
+ fail_private:
+ free_pstore_private(private);
+-fail_alloc:
++fail_inode:
+ iput(inode);
+
+ fail:
+diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
+index 0e9bd9c83870..aa6c093d9ce9 100644
+--- a/include/asm-generic/bug.h
++++ b/include/asm-generic/bug.h
+@@ -104,8 +104,10 @@ extern void warn_slowpath_null(const char *file, const int line);
+ warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
+ #else
+ extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
+-#define __WARN() __WARN_TAINT(TAINT_WARN)
+-#define __WARN_printf(arg...) do { __warn_printk(arg); __WARN(); } while (0)
++#define __WARN() do { \
++ printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \
++} while (0)
++#define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg)
+ #define __WARN_printf_taint(taint, arg...) \
+ do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
+ #endif
+diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
+index c0d4df6a606f..9d3b745c3107 100644
+--- a/include/drm/drm_displayid.h
++++ b/include/drm/drm_displayid.h
+@@ -40,6 +40,7 @@
+ #define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
+ #define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
+ #define DATA_BLOCK_TILED_DISPLAY 0x12
++#define DATA_BLOCK_CTA 0x81
+
+ #define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
+
+@@ -90,4 +91,13 @@ struct displayid_detailed_timing_block {
+ struct displayid_block base;
+ struct displayid_detailed_timings_1 timings[0];
+ };
++
++#define for_each_displayid_db(displayid, block, idx, length) \
++ for ((block) = (struct displayid_block *)&(displayid)[idx]; \
++ (idx) + sizeof(struct displayid_block) <= (length) && \
++ (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \
++ (block)->num_bytes > 0; \
++ (idx) += (block)->num_bytes + sizeof(struct displayid_block), \
++ (block) = (struct displayid_block *)&(displayid)[idx])
++
+ #endif
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 592669bcc536..56e18d7fbc5a 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -681,7 +681,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
+ }
+ }
+
+-static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
++static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
+ {
+ return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
+ }
+@@ -1429,7 +1429,7 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
+ return false;
+ }
+
+-static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
++static inline sector_t bdev_zone_sectors(struct block_device *bdev)
+ {
+ struct request_queue *q = bdev_get_queue(bdev);
+
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 52ec0d9fa1f7..068793a619ca 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -116,10 +116,10 @@ enum cpuhp_state {
+ CPUHP_AP_PERF_ARM_ACPI_STARTING,
+ CPUHP_AP_PERF_ARM_STARTING,
+ CPUHP_AP_ARM_L2X0_STARTING,
++ CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
+ CPUHP_AP_ARM_ARCH_TIMER_STARTING,
+ CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
+ CPUHP_AP_JCORE_TIMER_STARTING,
+- CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
+ CPUHP_AP_ARM_TWD_STARTING,
+ CPUHP_AP_QCOM_TIMER_STARTING,
+ CPUHP_AP_TEGRA_TIMER_STARTING,
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index dd0b5f4e1e45..0a6dae2f2b84 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -633,6 +633,11 @@ static inline bool is_vmalloc_addr(const void *x)
+ return false;
+ #endif
+ }
++
++#ifndef is_ioremap_addr
++#define is_ioremap_addr(x) is_vmalloc_addr(x)
++#endif
++
+ #ifdef CONFIG_MMU
+ extern int is_vmalloc_or_module_addr(const void *x);
+ #else
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index b25d20822e75..3508f4508a11 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -586,7 +586,7 @@ static inline void rcu_preempt_sleep_check(void) { }
+ * read-side critical sections may be preempted and they may also block, but
+ * only when acquiring spinlocks that are subject to priority inheritance.
+ */
+-static inline void rcu_read_lock(void)
++static __always_inline void rcu_read_lock(void)
+ {
+ __rcu_read_lock();
+ __acquire(RCU);
+diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
+index 38a0f0785323..c68ca81db0a1 100644
+--- a/include/linux/sched/signal.h
++++ b/include/linux/sched/signal.h
+@@ -329,7 +329,7 @@ extern void force_sigsegv(int sig, struct task_struct *p);
+ extern int force_sig_info(int, struct kernel_siginfo *, struct task_struct *);
+ extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
+ extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
+-extern int kill_pid_info_as_cred(int, struct kernel_siginfo *, struct pid *,
++extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
+ const struct cred *);
+ extern int kill_pgrp(struct pid *pid, int sig, int priv);
+ extern int kill_pid(struct pid *pid, int sig, int priv);
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index 2ac40135b576..b36a1df93e7c 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -808,11 +808,12 @@ struct ipvs_master_sync_state {
+ struct ip_vs_sync_buff *sync_buff;
+ unsigned long sync_queue_len;
+ unsigned int sync_queue_delay;
+- struct task_struct *master_thread;
+ struct delayed_work master_wakeup_work;
+ struct netns_ipvs *ipvs;
+ };
+
++struct ip_vs_sync_thread_data;
++
+ /* How much time to keep dests in trash */
+ #define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
+
+@@ -943,7 +944,8 @@ struct netns_ipvs {
+ spinlock_t sync_lock;
+ struct ipvs_master_sync_state *ms;
+ spinlock_t sync_buff_lock;
+- struct task_struct **backup_threads;
++ struct ip_vs_sync_thread_data *master_tinfo;
++ struct ip_vs_sync_thread_data *backup_tinfo;
+ int threads_mask;
+ volatile int sync_state;
+ struct mutex sync_mutex;
+diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
+index d074b6d60f8a..ac3c047d058c 100644
+--- a/include/net/xdp_sock.h
++++ b/include/net/xdp_sock.h
+@@ -67,6 +67,8 @@ struct xdp_sock {
+ * in the SKB destructor callback.
+ */
+ spinlock_t tx_completion_lock;
++ /* Protects generic receive. */
++ spinlock_t rx_lock;
+ u64 rx_dropped;
+ };
+
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index 54873085f2da..0ae41b5df101 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -327,8 +327,8 @@ struct ib_rss_caps {
+ };
+
+ enum ib_tm_cap_flags {
+- /* Support tag matching on RC transport */
+- IB_TM_CAP_RC = 1 << 0,
++ /* Support tag matching with rendezvous offload for RC transport */
++ IB_TM_CAP_RNDV_RC = 1 << 0,
+ };
+
+ struct ib_tm_caps {
+diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
+index a7c602576b68..66fb592f2d5e 100644
+--- a/include/sound/hda_codec.h
++++ b/include/sound/hda_codec.h
+@@ -249,6 +249,8 @@ struct hda_codec {
+ unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
+ unsigned int force_pin_prefix:1; /* Add location prefix */
+ unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
++ unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
++
+ #ifdef CONFIG_PM
+ unsigned long power_on_acct;
+ unsigned long power_off_acct;
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index d85816878a52..cc1d060cbf13 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -1379,7 +1379,7 @@ TRACE_EVENT(rxrpc_rx_eproto,
+ ),
+
+ TP_fast_assign(
+- __entry->call = call->debug_id;
++ __entry->call = call ? call->debug_id : 0;
+ __entry->serial = serial;
+ __entry->why = why;
+ ),
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index a8b823c30b43..29a5bc3d5c66 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -3143,6 +3143,7 @@ struct bpf_prog_info {
+ char name[BPF_OBJ_NAME_LEN];
+ __u32 ifindex;
+ __u32 gpl_compatible:1;
++ __u32 :31; /* alignment pad */
+ __u64 netns_dev;
+ __u64 netns_ino;
+ __u32 nr_jited_ksyms;
+diff --git a/include/xen/events.h b/include/xen/events.h
+index a48897199975..c0e6a0598397 100644
+--- a/include/xen/events.h
++++ b/include/xen/events.h
+@@ -3,6 +3,7 @@
+ #define _XEN_EVENTS_H
+
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+ #ifdef CONFIG_PCI_MSI
+ #include <linux/msi.h>
+ #endif
+@@ -59,7 +60,7 @@ void evtchn_put(unsigned int evtchn);
+
+ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
+ void rebind_evtchn_irq(int evtchn, int irq);
+-int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu);
++int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);
+
+ static inline void notify_remote_via_evtchn(int port)
+ {
+diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
+index 4c2fa3ac56f6..29d781061cd5 100644
+--- a/kernel/bpf/Makefile
++++ b/kernel/bpf/Makefile
+@@ -1,5 +1,6 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-y := core.o
++CFLAGS_core.o += $(call cc-disable-warning, override-init)
+
+ obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o
+ obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 080e2bb644cc..f2148db91439 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -1364,10 +1364,10 @@ select_insn:
+ insn++;
+ CONT;
+ ALU_ARSH_X:
+- DST = (u64) (u32) ((*(s32 *) &DST) >> SRC);
++ DST = (u64) (u32) (((s32) DST) >> SRC);
+ CONT;
+ ALU_ARSH_K:
+- DST = (u64) (u32) ((*(s32 *) &DST) >> IMM);
++ DST = (u64) (u32) (((s32) DST) >> IMM);
+ CONT;
+ ALU64_ARSH_X:
+ (*(s64 *) &DST) >>= SRC;
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index a5c369e60343..11528bdaa9dc 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -6456,17 +6456,18 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
+ * the state of the call instruction (with WRITTEN set), and r0 comes
+ * from callee with its full parentage chain, anyway.
+ */
+- for (j = 0; j <= cur->curframe; j++)
+- for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
+- cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
+ /* clear write marks in current state: the writes we did are not writes
+ * our child did, so they don't screen off its reads from us.
+ * (There are no read marks in current state, because reads always mark
+ * their parent and current state never has children yet. Only
+ * explored_states can get read marks.)
+ */
+- for (i = 0; i < BPF_REG_FP; i++)
+- cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE;
++ for (j = 0; j <= cur->curframe; j++) {
++ for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
++ cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
++ for (i = 0; i < BPF_REG_FP; i++)
++ cur->frame[j]->regs[i].live = REG_LIVE_NONE;
++ }
+
+ /* all stack frames are accessible from callee, clear them all */
+ for (j = 0; j <= cur->curframe; j++) {
+diff --git a/kernel/iomem.c b/kernel/iomem.c
+index 93c264444510..62c92e43aa0d 100644
+--- a/kernel/iomem.c
++++ b/kernel/iomem.c
+@@ -121,7 +121,7 @@ EXPORT_SYMBOL(memremap);
+
+ void memunmap(void *addr)
+ {
+- if (is_vmalloc_addr(addr))
++ if (is_ioremap_addr(addr))
+ iounmap((void __iomem *) addr);
+ }
+ EXPORT_SYMBOL(memunmap);
+diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
+index 3ff4a1260885..b76703b2c0af 100644
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -754,6 +754,8 @@ void handle_fasteoi_nmi(struct irq_desc *desc)
+ unsigned int irq = irq_desc_get_irq(desc);
+ irqreturn_t res;
+
++ __kstat_incr_irqs_this_cpu(desc);
++
+ trace_irq_handler_entry(irq, action);
+ /*
+ * NMIs cannot be shared, there is only one action.
+@@ -968,6 +970,8 @@ void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
+ unsigned int irq = irq_desc_get_irq(desc);
+ irqreturn_t res;
+
++ __kstat_incr_irqs_this_cpu(desc);
++
+ trace_irq_handler_entry(irq, action);
+ res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
+ trace_irq_handler_exit(irq, action, res);
+diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
+index c52b737ab8e3..9484e88dabc2 100644
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -680,6 +680,8 @@ int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
+ * @hwirq: The HW irq number to convert to a logical one
+ * @regs: Register file coming from the low-level handling code
+ *
++ * This function must be called from an NMI context.
++ *
+ * Returns: 0 on success, or -EINVAL if conversion has failed
+ */
+ int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
+@@ -689,7 +691,10 @@ int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
+ unsigned int irq;
+ int ret = 0;
+
+- nmi_enter();
++ /*
++ * NMI context needs to be setup earlier in order to deal with tracing.
++ */
++ WARN_ON(!in_nmi());
+
+ irq = irq_find_mapping(domain, hwirq);
+
+@@ -702,7 +707,6 @@ int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
+ else
+ ret = -EINVAL;
+
+- nmi_exit();
+ set_irq_regs(old_regs);
+ return ret;
+ }
+@@ -946,6 +950,11 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
+ *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
+ }
+
++static bool irq_is_nmi(struct irq_desc *desc)
++{
++ return desc->istate & IRQS_NMI;
++}
++
+ /**
+ * kstat_irqs - Get the statistics for an interrupt
+ * @irq: The interrupt number
+@@ -963,7 +972,8 @@ unsigned int kstat_irqs(unsigned int irq)
+ if (!desc || !desc->kstat_irqs)
+ return 0;
+ if (!irq_settings_is_per_cpu_devid(desc) &&
+- !irq_settings_is_per_cpu(desc))
++ !irq_settings_is_per_cpu(desc) &&
++ !irq_is_nmi(desc))
+ return desc->tot_count;
+
+ for_each_possible_cpu(cpu)
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index c47788fa85f9..dbc936ccf149 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3703,19 +3703,19 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+ if (depth) {
+ hlock = curr->held_locks + depth - 1;
+ if (hlock->class_idx == class_idx && nest_lock) {
+- if (hlock->references) {
+- /*
+- * Check: unsigned int references:12, overflow.
+- */
+- if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
+- return 0;
++ if (!references)
++ references++;
+
++ if (!hlock->references)
+ hlock->references++;
+- } else {
+- hlock->references = 2;
+- }
+
+- return 1;
++ hlock->references += references;
++
++ /* Overflow */
++ if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
++ return 0;
++
++ return 2;
+ }
+ }
+
+@@ -3921,22 +3921,33 @@ out:
+ }
+
+ static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
+- int idx)
++ int idx, unsigned int *merged)
+ {
+ struct held_lock *hlock;
++ int first_idx = idx;
+
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ return 0;
+
+ for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
+- if (!__lock_acquire(hlock->instance,
++ switch (__lock_acquire(hlock->instance,
+ hlock_class(hlock)->subclass,
+ hlock->trylock,
+ hlock->read, hlock->check,
+ hlock->hardirqs_off,
+ hlock->nest_lock, hlock->acquire_ip,
+- hlock->references, hlock->pin_count))
++ hlock->references, hlock->pin_count)) {
++ case 0:
+ return 1;
++ case 1:
++ break;
++ case 2:
++ *merged += (idx == first_idx);
++ break;
++ default:
++ WARN_ON(1);
++ return 0;
++ }
+ }
+ return 0;
+ }
+@@ -3947,9 +3958,9 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
+ unsigned long ip)
+ {
+ struct task_struct *curr = current;
++ unsigned int depth, merged = 0;
+ struct held_lock *hlock;
+ struct lock_class *class;
+- unsigned int depth;
+ int i;
+
+ if (unlikely(!debug_locks))
+@@ -3974,14 +3985,14 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
+ curr->lockdep_depth = i;
+ curr->curr_chain_key = hlock->prev_chain_key;
+
+- if (reacquire_held_locks(curr, depth, i))
++ if (reacquire_held_locks(curr, depth, i, &merged))
+ return 0;
+
+ /*
+ * I took it apart and put it back together again, except now I have
+ * these 'spare' parts.. where shall I put them.
+ */
+- if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
++ if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged))
+ return 0;
+ return 1;
+ }
+@@ -3989,8 +4000,8 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
+ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
+ {
+ struct task_struct *curr = current;
++ unsigned int depth, merged = 0;
+ struct held_lock *hlock;
+- unsigned int depth;
+ int i;
+
+ if (unlikely(!debug_locks))
+@@ -4015,7 +4026,11 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
+ hlock->read = 1;
+ hlock->acquire_ip = ip;
+
+- if (reacquire_held_locks(curr, depth, i))
++ if (reacquire_held_locks(curr, depth, i, &merged))
++ return 0;
++
++ /* Merging can't happen with unchanged classes.. */
++ if (DEBUG_LOCKS_WARN_ON(merged))
+ return 0;
+
+ /*
+@@ -4024,6 +4039,7 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
+ */
+ if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
+ return 0;
++
+ return 1;
+ }
+
+@@ -4038,8 +4054,8 @@ static int
+ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
+ {
+ struct task_struct *curr = current;
++ unsigned int depth, merged = 1;
+ struct held_lock *hlock;
+- unsigned int depth;
+ int i;
+
+ if (unlikely(!debug_locks))
+@@ -4094,14 +4110,15 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
+ if (i == depth-1)
+ return 1;
+
+- if (reacquire_held_locks(curr, depth, i + 1))
++ if (reacquire_held_locks(curr, depth, i + 1, &merged))
+ return 0;
+
+ /*
+ * We had N bottles of beer on the wall, we drank one, but now
+ * there's not N-1 bottles of beer left on the wall...
++ * Pouring two of the bottles together is acceptable.
+ */
+- DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1);
++ DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged);
+
+ /*
+ * Since reacquire_held_locks() would have called check_chain_key()
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 2d2fddbb7a4c..15a8ad63f4ff 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -267,7 +267,12 @@ static void padata_reorder(struct parallel_data *pd)
+ * The next object that needs serialization might have arrived to
+ * the reorder queues in the meantime, we will be called again
+ * from the timer function if no one else cares for it.
++ *
++ * Ensure reorder_objects is read after pd->lock is dropped so we see
++ * an increment from another task in padata_do_serial. Pairs with
++ * smp_mb__after_atomic in padata_do_serial.
+ */
++ smp_mb();
+ if (atomic_read(&pd->reorder_objects)
+ && !(pinst->flags & PADATA_RESET))
+ mod_timer(&pd->timer, jiffies + HZ);
+@@ -387,6 +392,13 @@ void padata_do_serial(struct padata_priv *padata)
+ list_add_tail(&padata->list, &pqueue->reorder.list);
+ spin_unlock(&pqueue->reorder.lock);
+
++ /*
++ * Ensure the atomic_inc of reorder_objects above is ordered correctly
++ * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
++ * in padata_reorder.
++ */
++ smp_mb__after_atomic();
++
+ put_cpu();
+
+ /* If we're running on the wrong CPU, call padata_reorder() via a
+diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
+index f54bc7cb6c2d..6d726cef241c 100644
+--- a/kernel/pid_namespace.c
++++ b/kernel/pid_namespace.c
+@@ -326,7 +326,7 @@ int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
+ }
+
+ read_lock(&tasklist_lock);
+- force_sig(SIGKILL, pid_ns->child_reaper);
++ send_sig(SIGKILL, pid_ns->child_reaper, 1);
+ read_unlock(&tasklist_lock);
+
+ do_exit(0);
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 158f04ec1d4f..c3cc6d85ec52 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -326,7 +326,7 @@ EXPORT_SYMBOL(release_resource);
+ *
+ * If a resource is found, returns 0 and @*res is overwritten with the part
+ * of the resource that's within [@start..@end]; if none is found, returns
+- * -1 or -EINVAL for other invalid parameters.
++ * -ENODEV. Returns -EINVAL for invalid parameters.
+ *
+ * This function walks the whole tree and not just first level children
+ * unless @first_lvl is true.
+@@ -365,16 +365,16 @@ static int find_next_iomem_res(resource_size_t start, resource_size_t end,
+ break;
+ }
+
++ if (p) {
++ /* copy data */
++ res->start = max(start, p->start);
++ res->end = min(end, p->end);
++ res->flags = p->flags;
++ res->desc = p->desc;
++ }
++
+ read_unlock(&resource_lock);
+- if (!p)
+- return -1;
+-
+- /* copy data */
+- res->start = max(start, p->start);
+- res->end = min(end, p->end);
+- res->flags = p->flags;
+- res->desc = p->desc;
+- return 0;
++ return p ? 0 : -ENODEV;
+ }
+
+ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 874c427742a9..4d5962232a55 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5123,7 +5123,7 @@ long __sched io_schedule_timeout(long timeout)
+ }
+ EXPORT_SYMBOL(io_schedule_timeout);
+
+-void io_schedule(void)
++void __sched io_schedule(void)
+ {
+ int token;
+
+diff --git a/kernel/sched/sched-pelt.h b/kernel/sched/sched-pelt.h
+index a26473674fb7..c529706bed11 100644
+--- a/kernel/sched/sched-pelt.h
++++ b/kernel/sched/sched-pelt.h
+@@ -1,7 +1,7 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ /* Generated by Documentation/scheduler/sched-pelt; do not modify. */
+
+-static const u32 runnable_avg_yN_inv[] = {
++static const u32 runnable_avg_yN_inv[] __maybe_unused = {
+ 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
+ 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
+ 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
+diff --git a/kernel/signal.c b/kernel/signal.c
+index edf8915ddd54..8aad311cbd59 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -1057,27 +1057,6 @@ static inline bool legacy_queue(struct sigpending *signals, int sig)
+ return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
+ }
+
+-#ifdef CONFIG_USER_NS
+-static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
+-{
+- if (current_user_ns() == task_cred_xxx(t, user_ns))
+- return;
+-
+- if (SI_FROMKERNEL(info))
+- return;
+-
+- rcu_read_lock();
+- info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
+- make_kuid(current_user_ns(), info->si_uid));
+- rcu_read_unlock();
+-}
+-#else
+-static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
+-{
+- return;
+-}
+-#endif
+-
+ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
+ enum pid_type type, int from_ancestor_ns)
+ {
+@@ -1135,7 +1114,11 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
+ q->info.si_code = SI_USER;
+ q->info.si_pid = task_tgid_nr_ns(current,
+ task_active_pid_ns(t));
+- q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
++ rcu_read_lock();
++ q->info.si_uid =
++ from_kuid_munged(task_cred_xxx(t, user_ns),
++ current_uid());
++ rcu_read_unlock();
+ break;
+ case (unsigned long) SEND_SIG_PRIV:
+ clear_siginfo(&q->info);
+@@ -1147,13 +1130,8 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
+ break;
+ default:
+ copy_siginfo(&q->info, info);
+- if (from_ancestor_ns)
+- q->info.si_pid = 0;
+ break;
+ }
+-
+- userns_fixup_signal_uid(&q->info, t);
+-
+ } else if (!is_si_special(info)) {
+ if (sig >= SIGRTMIN && info->si_code != SI_USER) {
+ /*
+@@ -1197,6 +1175,28 @@ ret:
+ return ret;
+ }
+
++static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
++{
++ bool ret = false;
++ switch (siginfo_layout(info->si_signo, info->si_code)) {
++ case SIL_KILL:
++ case SIL_CHLD:
++ case SIL_RT:
++ ret = true;
++ break;
++ case SIL_TIMER:
++ case SIL_POLL:
++ case SIL_FAULT:
++ case SIL_FAULT_MCEERR:
++ case SIL_FAULT_BNDERR:
++ case SIL_FAULT_PKUERR:
++ case SIL_SYS:
++ ret = false;
++ break;
++ }
++ return ret;
++}
++
+ static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
+ enum pid_type type)
+ {
+@@ -1206,7 +1206,20 @@ static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct
+ from_ancestor_ns = si_fromuser(info) &&
+ !task_pid_nr_ns(current, task_active_pid_ns(t));
+ #endif
++ if (!is_si_special(info) && has_si_pid_and_uid(info)) {
++ struct user_namespace *t_user_ns;
++
++ rcu_read_lock();
++ t_user_ns = task_cred_xxx(t, user_ns);
++ if (current_user_ns() != t_user_ns) {
++ kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
++ info->si_uid = from_kuid_munged(t_user_ns, uid);
++ }
++ rcu_read_unlock();
+
++ if (!task_pid_nr_ns(current, task_active_pid_ns(t)))
++ info->si_pid = 0;
++ }
+ return __send_signal(sig, info, t, type, from_ancestor_ns);
+ }
+
+@@ -1440,13 +1453,44 @@ static inline bool kill_as_cred_perm(const struct cred *cred,
+ uid_eq(cred->uid, pcred->uid);
+ }
+
+-/* like kill_pid_info(), but doesn't use uid/euid of "current" */
+-int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid,
+- const struct cred *cred)
++/*
++ * The usb asyncio usage of siginfo is wrong. The glibc support
++ * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
++ * AKA after the generic fields:
++ * kernel_pid_t si_pid;
++ * kernel_uid32_t si_uid;
++ * sigval_t si_value;
++ *
++ * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
++ * after the generic fields is:
++ * void __user *si_addr;
++ *
++ * This is a practical problem when there is a 64bit big endian kernel
++ * and a 32bit userspace. As the 32bit address will encoded in the low
++ * 32bits of the pointer. Those low 32bits will be stored at higher
++ * address than appear in a 32 bit pointer. So userspace will not
++ * see the address it was expecting for it's completions.
++ *
++ * There is nothing in the encoding that can allow
++ * copy_siginfo_to_user32 to detect this confusion of formats, so
++ * handle this by requiring the caller of kill_pid_usb_asyncio to
++ * notice when this situration takes place and to store the 32bit
++ * pointer in sival_int, instead of sival_addr of the sigval_t addr
++ * parameter.
++ */
++int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
++ struct pid *pid, const struct cred *cred)
+ {
+- int ret = -EINVAL;
++ struct kernel_siginfo info;
+ struct task_struct *p;
+ unsigned long flags;
++ int ret = -EINVAL;
++
++ clear_siginfo(&info);
++ info.si_signo = sig;
++ info.si_errno = errno;
++ info.si_code = SI_ASYNCIO;
++ *((sigval_t *)&info.si_pid) = addr;
+
+ if (!valid_signal(sig))
+ return ret;
+@@ -1457,17 +1501,17 @@ int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid,
+ ret = -ESRCH;
+ goto out_unlock;
+ }
+- if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
++ if (!kill_as_cred_perm(cred, p)) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+- ret = security_task_kill(p, info, sig, cred);
++ ret = security_task_kill(p, &info, sig, cred);
+ if (ret)
+ goto out_unlock;
+
+ if (sig) {
+ if (lock_task_sighand(p, &flags)) {
+- ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
++ ret = __send_signal(sig, &info, p, PIDTYPE_TGID, 0);
+ unlock_task_sighand(p, &flags);
+ } else
+ ret = -ESRCH;
+@@ -1476,7 +1520,7 @@ out_unlock:
+ rcu_read_unlock();
+ return ret;
+ }
+-EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
++EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
+
+ /*
+ * kill_something_info() interprets pid in interesting ways just like kill(2).
+@@ -4477,6 +4521,28 @@ static inline void siginfo_buildtime_checks(void)
+ CHECK_OFFSET(si_syscall);
+ CHECK_OFFSET(si_arch);
+ #undef CHECK_OFFSET
++
++ /* usb asyncio */
++ BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
++ offsetof(struct siginfo, si_addr));
++ if (sizeof(int) == sizeof(void __user *)) {
++ BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
++ sizeof(void __user *));
++ } else {
++ BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
++ sizeof_field(struct siginfo, si_uid)) !=
++ sizeof(void __user *));
++ BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
++ offsetof(struct siginfo, si_uid));
++ }
++#ifdef CONFIG_COMPAT
++ BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
++ offsetof(struct compat_siginfo, si_addr));
++ BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
++ sizeof(compat_uptr_t));
++ BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
++ sizeof_field(struct siginfo, si_pid));
++#endif
+ }
+
+ void __init signals_init(void)
+diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
+index 8de4f789dc1b..65eb796610dc 100644
+--- a/kernel/time/ntp.c
++++ b/kernel/time/ntp.c
+@@ -43,6 +43,7 @@ static u64 tick_length_base;
+ #define MAX_TICKADJ 500LL /* usecs */
+ #define MAX_TICKADJ_SCALED \
+ (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
++#define MAX_TAI_OFFSET 100000
+
+ /*
+ * phase-lock loop variables
+@@ -691,7 +692,8 @@ static inline void process_adjtimex_modes(const struct __kernel_timex *txc,
+ time_constant = max(time_constant, 0l);
+ }
+
+- if (txc->modes & ADJ_TAI && txc->constant >= 0)
++ if (txc->modes & ADJ_TAI &&
++ txc->constant >= 0 && txc->constant <= MAX_TAI_OFFSET)
+ *time_tai = txc->constant;
+
+ if (txc->modes & ADJ_OFFSET)
+diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
+index 98ba50dcb1b2..acb326f5f50a 100644
+--- a/kernel/time/timer_list.c
++++ b/kernel/time/timer_list.c
+@@ -282,23 +282,6 @@ static inline void timer_list_header(struct seq_file *m, u64 now)
+ SEQ_printf(m, "\n");
+ }
+
+-static int timer_list_show(struct seq_file *m, void *v)
+-{
+- struct timer_list_iter *iter = v;
+-
+- if (iter->cpu == -1 && !iter->second_pass)
+- timer_list_header(m, iter->now);
+- else if (!iter->second_pass)
+- print_cpu(m, iter->cpu, iter->now);
+-#ifdef CONFIG_GENERIC_CLOCKEVENTS
+- else if (iter->cpu == -1 && iter->second_pass)
+- timer_list_show_tickdevices_header(m);
+- else
+- print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
+-#endif
+- return 0;
+-}
+-
+ void sysrq_timer_list_show(void)
+ {
+ u64 now = ktime_to_ns(ktime_get());
+@@ -317,6 +300,24 @@ void sysrq_timer_list_show(void)
+ return;
+ }
+
++#ifdef CONFIG_PROC_FS
++static int timer_list_show(struct seq_file *m, void *v)
++{
++ struct timer_list_iter *iter = v;
++
++ if (iter->cpu == -1 && !iter->second_pass)
++ timer_list_header(m, iter->now);
++ else if (!iter->second_pass)
++ print_cpu(m, iter->cpu, iter->now);
++#ifdef CONFIG_GENERIC_CLOCKEVENTS
++ else if (iter->cpu == -1 && iter->second_pass)
++ timer_list_show_tickdevices_header(m);
++ else
++ print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
++#endif
++ return 0;
++}
++
+ static void *move_iter(struct timer_list_iter *iter, loff_t offset)
+ {
+ for (; offset; offset--) {
+@@ -376,3 +377,4 @@ static int __init init_timer_list_procfs(void)
+ return 0;
+ }
+ __initcall(init_timer_list_procfs);
++#endif
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index ba751f993c3b..cab4a5398f1d 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -1109,17 +1109,10 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
+ for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+ unsigned long ip = field->caller[i];
+
+- if (ip == ULONG_MAX || trace_seq_has_overflowed(s))
++ if (!ip || trace_seq_has_overflowed(s))
+ break;
+
+ trace_seq_puts(s, " => ");
+-
+- if (!ip) {
+- trace_seq_puts(s, "??");
+- trace_seq_putc(s, '\n');
+- continue;
+- }
+-
+ seq_print_user_ip(s, mm, ip, flags);
+ trace_seq_putc(s, '\n');
+ }
+diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c
+index 1db74eb098d0..121beb2f0930 100644
+--- a/lib/reed_solomon/decode_rs.c
++++ b/lib/reed_solomon/decode_rs.c
+@@ -42,8 +42,18 @@
+ BUG_ON(pad < 0 || pad >= nn);
+
+ /* Does the caller provide the syndrome ? */
+- if (s != NULL)
+- goto decode;
++ if (s != NULL) {
++ for (i = 0; i < nroots; i++) {
++ /* The syndrome is in index form,
++ * so nn represents zero
++ */
++ if (s[i] != nn)
++ goto decode;
++ }
++
++ /* syndrome is zero, no errors to correct */
++ return 0;
++ }
+
+ /* form the syndromes; i.e., evaluate data(x) at roots of
+ * g(x) */
+@@ -99,9 +109,9 @@
+ if (no_eras > 0) {
+ /* Init lambda to be the erasure locator polynomial */
+ lambda[1] = alpha_to[rs_modnn(rs,
+- prim * (nn - 1 - eras_pos[0]))];
++ prim * (nn - 1 - (eras_pos[0] + pad)))];
+ for (i = 1; i < no_eras; i++) {
+- u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i]));
++ u = rs_modnn(rs, prim * (nn - 1 - (eras_pos[i] + pad)));
+ for (j = i + 1; j > 0; j--) {
+ tmp = index_of[lambda[j - 1]];
+ if (tmp != nn) {
+diff --git a/lib/scatterlist.c b/lib/scatterlist.c
+index 2882d9ba6607..eacb82468437 100644
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -676,17 +676,18 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
+ {
+ if (!miter->__remaining) {
+ struct scatterlist *sg;
+- unsigned long pgoffset;
+
+ if (!__sg_page_iter_next(&miter->piter))
+ return false;
+
+ sg = miter->piter.sg;
+- pgoffset = miter->piter.sg_pgoffset;
+
+- miter->__offset = pgoffset ? 0 : sg->offset;
++ miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
++ miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
++ miter->__offset &= PAGE_SIZE - 1;
+ miter->__remaining = sg->offset + sg->length -
+- (pgoffset << PAGE_SHIFT) - miter->__offset;
++ (miter->piter.sg_pgoffset << PAGE_SHIFT) -
++ miter->__offset;
+ miter->__remaining = min_t(unsigned long, miter->__remaining,
+ PAGE_SIZE - miter->__offset);
+ }
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index ba9138a4a1de..591eafafbd8c 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3530,12 +3530,13 @@ static int memcg_stat_show(struct seq_file *m, void *v)
+ if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
+ continue;
+ seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
+- (u64)memcg_page_state(memcg, i) * PAGE_SIZE);
++ (u64)memcg_page_state(memcg, memcg1_stats[i]) *
++ PAGE_SIZE);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
+ seq_printf(m, "total_%s %llu\n", memcg1_event_names[i],
+- (u64)memcg_events(memcg, i));
++ (u64)memcg_events(memcg, memcg1_events[i]));
+
+ for (i = 0; i < NR_LRU_LISTS; i++)
+ seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i],
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 910e02c793ff..96aafbf8ce4e 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2125,7 +2125,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
+ * 10TB 320 32GB
+ */
+ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
+- struct scan_control *sc, bool actual_reclaim)
++ struct scan_control *sc, bool trace)
+ {
+ enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
+ struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+@@ -2151,7 +2151,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
+ * rid of the stale workingset quickly.
+ */
+ refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE);
+- if (file && actual_reclaim && lruvec->refaults != refaults) {
++ if (file && lruvec->refaults != refaults) {
+ inactive_ratio = 0;
+ } else {
+ gb = (inactive + active) >> (30 - PAGE_SHIFT);
+@@ -2161,7 +2161,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
+ inactive_ratio = 1;
+ }
+
+- if (actual_reclaim)
++ if (trace)
+ trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx,
+ lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive,
+ lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active,
+diff --git a/mm/z3fold.c b/mm/z3fold.c
+index 985732c8b025..dfcd69d08c1e 100644
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -924,7 +924,16 @@ retry:
+ set_bit(PAGE_HEADLESS, &page->private);
+ goto headless;
+ }
+- __SetPageMovable(page, pool->inode->i_mapping);
++ if (can_sleep) {
++ lock_page(page);
++ __SetPageMovable(page, pool->inode->i_mapping);
++ unlock_page(page);
++ } else {
++ if (trylock_page(page)) {
++ __SetPageMovable(page, pool->inode->i_mapping);
++ unlock_page(page);
++ }
++ }
+ z3fold_page_lock(zhdr);
+
+ found:
+@@ -1331,6 +1340,7 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
+
+ VM_BUG_ON_PAGE(!PageMovable(page), page);
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
++ VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
+
+ zhdr = page_address(page);
+ pool = zhdr_to_pool(zhdr);
+diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
+index 549938af02e7..a3cd90a74012 100644
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -767,10 +767,16 @@ static struct p9_trans_module p9_virtio_trans = {
+ /* The standard init function */
+ static int __init p9_virtio_init(void)
+ {
++ int rc;
++
+ INIT_LIST_HEAD(&virtio_chan_list);
+
+ v9fs_register_trans(&p9_virtio_trans);
+- return register_virtio_driver(&p9_virtio_drv);
++ rc = register_virtio_driver(&p9_virtio_drv);
++ if (rc)
++ v9fs_unregister_trans(&p9_virtio_trans);
++
++ return rc;
+ }
+
+ static void __exit p9_virtio_cleanup(void)
+diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
+index 29420ebb8f07..3963eb11c3fb 100644
+--- a/net/9p/trans_xen.c
++++ b/net/9p/trans_xen.c
+@@ -530,13 +530,19 @@ static struct xenbus_driver xen_9pfs_front_driver = {
+
+ static int p9_trans_xen_init(void)
+ {
++ int rc;
++
+ if (!xen_domain())
+ return -ENODEV;
+
+ pr_info("Initialising Xen transport for 9pfs\n");
+
+ v9fs_register_trans(&p9_xen_trans);
+- return xenbus_register_frontend(&xen_9pfs_front_driver);
++ rc = xenbus_register_frontend(&xen_9pfs_front_driver);
++ if (rc)
++ v9fs_unregister_trans(&p9_xen_trans);
++
++ return rc;
+ }
+ module_init(p9_trans_xen_init);
+
+diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
+index bd4138ddf7e0..240ed70912d6 100644
+--- a/net/batman-adv/bat_iv_ogm.c
++++ b/net/batman-adv/bat_iv_ogm.c
+@@ -2337,7 +2337,7 @@ batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1,
+ return ret;
+ }
+
+-static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
++static void batadv_iv_iface_enabled(struct batadv_hard_iface *hard_iface)
+ {
+ /* begin scheduling originator messages on that interface */
+ batadv_iv_ogm_schedule(hard_iface);
+@@ -2683,8 +2683,8 @@ unlock:
+ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
+ .name = "BATMAN_IV",
+ .iface = {
+- .activate = batadv_iv_iface_activate,
+ .enable = batadv_iv_ogm_iface_enable,
++ .enabled = batadv_iv_iface_enabled,
+ .disable = batadv_iv_ogm_iface_disable,
+ .update_mac = batadv_iv_ogm_iface_update_mac,
+ .primary_set = batadv_iv_ogm_primary_iface_set,
+diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
+index 79d1731b8306..3719cfd026f0 100644
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -795,6 +795,9 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
+
+ batadv_hardif_recalc_extra_skbroom(soft_iface);
+
++ if (bat_priv->algo_ops->iface.enabled)
++ bat_priv->algo_ops->iface.enabled(hard_iface);
++
+ out:
+ return 0;
+
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 1ddfd5e011ee..8a482c5ec67b 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -3813,6 +3813,8 @@ static void batadv_tt_purge(struct work_struct *work)
+ */
+ void batadv_tt_free(struct batadv_priv *bat_priv)
+ {
++ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_ROAM, 1);
++
+ batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
+ batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);
+
+diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
+index 74b644738a36..e0b25104cbfa 100644
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -2129,6 +2129,9 @@ struct batadv_algo_iface_ops {
+ /** @enable: init routing info when hard-interface is enabled */
+ int (*enable)(struct batadv_hard_iface *hard_iface);
+
++ /** @enabled: notification when hard-interface was enabled (optional) */
++ void (*enabled)(struct batadv_hard_iface *hard_iface);
++
+ /** @disable: de-init routing info when hard-interface is disabled */
+ void (*disable)(struct batadv_hard_iface *hard_iface);
+
+diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
+index 1555b0c6f7ec..9001bf331d56 100644
+--- a/net/bluetooth/6lowpan.c
++++ b/net/bluetooth/6lowpan.c
+@@ -180,10 +180,16 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
+ }
+
+ if (!rt) {
+- nexthop = &lowpan_cb(skb)->gw;
+-
+- if (ipv6_addr_any(nexthop))
+- return NULL;
++ if (ipv6_addr_any(&lowpan_cb(skb)->gw)) {
++ /* There is neither route nor gateway,
++ * probably the destination is a direct peer.
++ */
++ nexthop = daddr;
++ } else {
++ /* There is a known gateway
++ */
++ nexthop = &lowpan_cb(skb)->gw;
++ }
+ } else {
+ nexthop = rt6_nexthop(rt, daddr);
+
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 9e4fcf406d9c..17c50a98e7f7 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -5588,6 +5588,11 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
+ return send_conn_param_neg_reply(hdev, handle,
+ HCI_ERROR_UNKNOWN_CONN_ID);
+
++ if (min < hcon->le_conn_min_interval ||
++ max > hcon->le_conn_max_interval)
++ return send_conn_param_neg_reply(hdev, handle,
++ HCI_ERROR_INVALID_LL_PARAMS);
++
+ if (hci_check_conn_params(min, max, latency, timeout))
+ return send_conn_param_neg_reply(hdev, handle,
+ HCI_ERROR_INVALID_LL_PARAMS);
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index a442e21f3894..5abd423b55fa 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -775,7 +775,7 @@ static int hidp_setup_hid(struct hidp_session *session,
+ hid->version = req->version;
+ hid->country = req->country;
+
+- strncpy(hid->name, req->name, sizeof(hid->name));
++ strscpy(hid->name, req->name, sizeof(hid->name));
+
+ snprintf(hid->phys, sizeof(hid->phys), "%pMR",
+ &l2cap_pi(session->ctrl_sock->sk)->chan->src);
+diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
+index 2151913892ce..03be6a4baef3 100644
+--- a/net/bluetooth/hidp/sock.c
++++ b/net/bluetooth/hidp/sock.c
+@@ -192,6 +192,7 @@ static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
+ ca.version = ca32.version;
+ ca.flags = ca32.flags;
+ ca.idle_to = ca32.idle_to;
++ ca32.name[sizeof(ca32.name) - 1] = '\0';
+ memcpy(ca.name, ca32.name, 128);
+
+ csock = sockfd_lookup(ca.ctrl_sock, &err);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 5406d7cd46ad..32d2be9d6858 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4394,6 +4394,12 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+
+ l2cap_chan_lock(chan);
+
++ if (chan->state != BT_DISCONN) {
++ l2cap_chan_unlock(chan);
++ mutex_unlock(&conn->chan_lock);
++ return 0;
++ }
++
+ l2cap_chan_hold(chan);
+ l2cap_chan_del(chan, 0);
+
+@@ -5291,7 +5297,14 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
+
+ memset(&rsp, 0, sizeof(rsp));
+
+- err = hci_check_conn_params(min, max, latency, to_multiplier);
++ if (min < hcon->le_conn_min_interval ||
++ max > hcon->le_conn_max_interval) {
++ BT_DBG("requested connection interval exceeds current bounds.");
++ err = -EINVAL;
++ } else {
++ err = hci_check_conn_params(min, max, latency, to_multiplier);
++ }
++
+ if (err)
+ rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+ else
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index e68c715f8d37..6c2b4e6e87ba 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -2579,6 +2579,19 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
+ goto distribute;
+ }
+
++ /* Drop IRK if peer is using identity address during pairing but is
++ * providing different address as identity information.
++ *
++ * Microsoft Surface Precision Mouse is known to have this bug.
++ */
++ if (hci_is_identity_address(&hcon->dst, hcon->dst_type) &&
++ (bacmp(&info->bdaddr, &hcon->dst) ||
++ info->addr_type != hcon->dst_type)) {
++ bt_dev_err(hcon->hdev,
++ "ignoring IRK with invalid identity address");
++ goto distribute;
++ }
++
+ bacpy(&smp->id_addr, &info->bdaddr);
+ smp->id_addr_type = info->addr_type;
+
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index a50dd6f34b91..fe5fc4bab7ee 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -2438,8 +2438,10 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
+ goto out;
+ }
+ err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
+- if (err < 0)
++ if (err < 0) {
++ kfree_skb(out_skb);
+ goto out;
++ }
+
+ out_hdr = (struct sadb_msg *) out_skb->data;
+ out_hdr->sadb_msg_version = hdr->sadb_msg_version;
+@@ -2690,8 +2692,10 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
+ return PTR_ERR(out_skb);
+
+ err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
+- if (err < 0)
++ if (err < 0) {
++ kfree_skb(out_skb);
+ return err;
++ }
+
+ out_hdr = (struct sadb_msg *) out_skb->data;
+ out_hdr->sadb_msg_version = pfk->dump.msg_version;
+diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
+index 3cdf171cd468..16afa0df4004 100644
+--- a/net/netfilter/ipset/ip_set_core.c
++++ b/net/netfilter/ipset/ip_set_core.c
+@@ -1541,10 +1541,14 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
+ memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
+ cmdattr = (void *)&errmsg->msg + min_len;
+
+- nla_parse_deprecated(cda, IPSET_ATTR_CMD_MAX, cmdattr,
+- nlh->nlmsg_len - min_len,
+- ip_set_adt_policy, NULL);
++ ret = nla_parse_deprecated(cda, IPSET_ATTR_CMD_MAX, cmdattr,
++ nlh->nlmsg_len - min_len,
++ ip_set_adt_policy, NULL);
+
++ if (ret) {
++ nlmsg_free(skb2);
++ return ret;
++ }
+ errline = nla_data(cda[IPSET_ATTR_LINENO]);
+
+ *errline = lineno;
+diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
+index 10f619625abd..175f8fedcfaf 100644
+--- a/net/netfilter/ipset/ip_set_hash_gen.h
++++ b/net/netfilter/ipset/ip_set_hash_gen.h
+@@ -622,7 +622,7 @@ retry:
+ goto cleanup;
+ }
+ m->size = AHASH_INIT_SIZE;
+- extsize = ext_size(AHASH_INIT_SIZE, dsize);
++ extsize += ext_size(AHASH_INIT_SIZE, dsize);
+ RCU_INIT_POINTER(hbucket(t, key), m);
+ } else if (m->pos >= m->size) {
+ struct hbucket *ht;
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 7138556b206b..d5103a9eb302 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -2245,7 +2245,6 @@ static const struct nf_hook_ops ip_vs_ops[] = {
+ static int __net_init __ip_vs_init(struct net *net)
+ {
+ struct netns_ipvs *ipvs;
+- int ret;
+
+ ipvs = net_generic(net, ip_vs_net_id);
+ if (ipvs == NULL)
+@@ -2277,17 +2276,11 @@ static int __net_init __ip_vs_init(struct net *net)
+ if (ip_vs_sync_net_init(ipvs) < 0)
+ goto sync_fail;
+
+- ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+- if (ret < 0)
+- goto hook_fail;
+-
+ return 0;
+ /*
+ * Error handling
+ */
+
+-hook_fail:
+- ip_vs_sync_net_cleanup(ipvs);
+ sync_fail:
+ ip_vs_conn_net_cleanup(ipvs);
+ conn_fail:
+@@ -2317,6 +2310,19 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
+ net->ipvs = NULL;
+ }
+
++static int __net_init __ip_vs_dev_init(struct net *net)
++{
++ int ret;
++
++ ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
++ if (ret < 0)
++ goto hook_fail;
++ return 0;
++
++hook_fail:
++ return ret;
++}
++
+ static void __net_exit __ip_vs_dev_cleanup(struct net *net)
+ {
+ struct netns_ipvs *ipvs = net_ipvs(net);
+@@ -2336,6 +2342,7 @@ static struct pernet_operations ipvs_core_ops = {
+ };
+
+ static struct pernet_operations ipvs_core_dev_ops = {
++ .init = __ip_vs_dev_init,
+ .exit = __ip_vs_dev_cleanup,
+ };
+
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 776c87ed4813..741d91aa4a8d 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -2396,9 +2396,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
+ cfg.syncid = dm->syncid;
+ ret = start_sync_thread(ipvs, &cfg, dm->state);
+ } else {
+- mutex_lock(&ipvs->sync_mutex);
+ ret = stop_sync_thread(ipvs, dm->state);
+- mutex_unlock(&ipvs->sync_mutex);
+ }
+ goto out_dec;
+ }
+@@ -3515,10 +3513,8 @@ static int ip_vs_genl_del_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
+ if (!attrs[IPVS_DAEMON_ATTR_STATE])
+ return -EINVAL;
+
+- mutex_lock(&ipvs->sync_mutex);
+ ret = stop_sync_thread(ipvs,
+ nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+- mutex_unlock(&ipvs->sync_mutex);
+ return ret;
+ }
+
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index 2526be6b3d90..a4a78c4b06de 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -195,6 +195,7 @@ union ip_vs_sync_conn {
+ #define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1))
+
+ struct ip_vs_sync_thread_data {
++ struct task_struct *task;
+ struct netns_ipvs *ipvs;
+ struct socket *sock;
+ char *buf;
+@@ -374,8 +375,11 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs,
+ max(IPVS_SYNC_SEND_DELAY, 1));
+ ms->sync_queue_len++;
+ list_add_tail(&sb->list, &ms->sync_queue);
+- if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE)
+- wake_up_process(ms->master_thread);
++ if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) {
++ int id = (int)(ms - ipvs->ms);
++
++ wake_up_process(ipvs->master_tinfo[id].task);
++ }
+ } else
+ ip_vs_sync_buff_release(sb);
+ spin_unlock(&ipvs->sync_lock);
+@@ -1636,8 +1640,10 @@ static void master_wakeup_work_handler(struct work_struct *work)
+ spin_lock_bh(&ipvs->sync_lock);
+ if (ms->sync_queue_len &&
+ ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) {
++ int id = (int)(ms - ipvs->ms);
++
+ ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE;
+- wake_up_process(ms->master_thread);
++ wake_up_process(ipvs->master_tinfo[id].task);
+ }
+ spin_unlock_bh(&ipvs->sync_lock);
+ }
+@@ -1703,10 +1709,6 @@ done:
+ if (sb)
+ ip_vs_sync_buff_release(sb);
+
+- /* release the sending multicast socket */
+- sock_release(tinfo->sock);
+- kfree(tinfo);
+-
+ return 0;
+ }
+
+@@ -1740,11 +1742,6 @@ static int sync_thread_backup(void *data)
+ }
+ }
+
+- /* release the sending multicast socket */
+- sock_release(tinfo->sock);
+- kfree(tinfo->buf);
+- kfree(tinfo);
+-
+ return 0;
+ }
+
+@@ -1752,8 +1749,8 @@ static int sync_thread_backup(void *data)
+ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ int state)
+ {
+- struct ip_vs_sync_thread_data *tinfo = NULL;
+- struct task_struct **array = NULL, *task;
++ struct ip_vs_sync_thread_data *ti = NULL, *tinfo;
++ struct task_struct *task;
+ struct net_device *dev;
+ char *name;
+ int (*threadfn)(void *data);
+@@ -1822,7 +1819,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ threadfn = sync_thread_master;
+ } else if (state == IP_VS_STATE_BACKUP) {
+ result = -EEXIST;
+- if (ipvs->backup_threads)
++ if (ipvs->backup_tinfo)
+ goto out_early;
+
+ ipvs->bcfg = *c;
+@@ -1849,28 +1846,22 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ master_wakeup_work_handler);
+ ms->ipvs = ipvs;
+ }
+- } else {
+- array = kcalloc(count, sizeof(struct task_struct *),
+- GFP_KERNEL);
+- result = -ENOMEM;
+- if (!array)
+- goto out;
+ }
++ result = -ENOMEM;
++ ti = kcalloc(count, sizeof(struct ip_vs_sync_thread_data),
++ GFP_KERNEL);
++ if (!ti)
++ goto out;
+
+ for (id = 0; id < count; id++) {
+- result = -ENOMEM;
+- tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
+- if (!tinfo)
+- goto out;
++ tinfo = &ti[id];
+ tinfo->ipvs = ipvs;
+- tinfo->sock = NULL;
+ if (state == IP_VS_STATE_BACKUP) {
++ result = -ENOMEM;
+ tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
+ GFP_KERNEL);
+ if (!tinfo->buf)
+ goto out;
+- } else {
+- tinfo->buf = NULL;
+ }
+ tinfo->id = id;
+ if (state == IP_VS_STATE_MASTER)
+@@ -1885,17 +1876,15 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+ result = PTR_ERR(task);
+ goto out;
+ }
+- tinfo = NULL;
+- if (state == IP_VS_STATE_MASTER)
+- ipvs->ms[id].master_thread = task;
+- else
+- array[id] = task;
++ tinfo->task = task;
+ }
+
+ /* mark as active */
+
+- if (state == IP_VS_STATE_BACKUP)
+- ipvs->backup_threads = array;
++ if (state == IP_VS_STATE_MASTER)
++ ipvs->master_tinfo = ti;
++ else
++ ipvs->backup_tinfo = ti;
+ spin_lock_bh(&ipvs->sync_buff_lock);
+ ipvs->sync_state |= state;
+ spin_unlock_bh(&ipvs->sync_buff_lock);
+@@ -1910,29 +1899,31 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
+
+ out:
+ /* We do not need RTNL lock anymore, release it here so that
+- * sock_release below and in the kthreads can use rtnl_lock
+- * to leave the mcast group.
++ * sock_release below can use rtnl_lock to leave the mcast group.
+ */
+ rtnl_unlock();
+- count = id;
+- while (count-- > 0) {
+- if (state == IP_VS_STATE_MASTER)
+- kthread_stop(ipvs->ms[count].master_thread);
+- else
+- kthread_stop(array[count]);
++ id = min(id, count - 1);
++ if (ti) {
++ for (tinfo = ti + id; tinfo >= ti; tinfo--) {
++ if (tinfo->task)
++ kthread_stop(tinfo->task);
++ }
+ }
+ if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
+ kfree(ipvs->ms);
+ ipvs->ms = NULL;
+ }
+ mutex_unlock(&ipvs->sync_mutex);
+- if (tinfo) {
+- if (tinfo->sock)
+- sock_release(tinfo->sock);
+- kfree(tinfo->buf);
+- kfree(tinfo);
++
++ /* No more mutexes, release socks */
++ if (ti) {
++ for (tinfo = ti + id; tinfo >= ti; tinfo--) {
++ if (tinfo->sock)
++ sock_release(tinfo->sock);
++ kfree(tinfo->buf);
++ }
++ kfree(ti);
+ }
+- kfree(array);
+ return result;
+
+ out_early:
+@@ -1944,15 +1935,18 @@ out_early:
+
+ int stop_sync_thread(struct netns_ipvs *ipvs, int state)
+ {
+- struct task_struct **array;
++ struct ip_vs_sync_thread_data *ti, *tinfo;
+ int id;
+ int retc = -EINVAL;
+
+ IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
+
++ mutex_lock(&ipvs->sync_mutex);
+ if (state == IP_VS_STATE_MASTER) {
++ retc = -ESRCH;
+ if (!ipvs->ms)
+- return -ESRCH;
++ goto err;
++ ti = ipvs->master_tinfo;
+
+ /*
+ * The lock synchronizes with sb_queue_tail(), so that we don't
+@@ -1971,38 +1965,56 @@ int stop_sync_thread(struct netns_ipvs *ipvs, int state)
+ struct ipvs_master_sync_state *ms = &ipvs->ms[id];
+ int ret;
+
++ tinfo = &ti[id];
+ pr_info("stopping master sync thread %d ...\n",
+- task_pid_nr(ms->master_thread));
++ task_pid_nr(tinfo->task));
+ cancel_delayed_work_sync(&ms->master_wakeup_work);
+- ret = kthread_stop(ms->master_thread);
++ ret = kthread_stop(tinfo->task);
+ if (retc >= 0)
+ retc = ret;
+ }
+ kfree(ipvs->ms);
+ ipvs->ms = NULL;
++ ipvs->master_tinfo = NULL;
+ } else if (state == IP_VS_STATE_BACKUP) {
+- if (!ipvs->backup_threads)
+- return -ESRCH;
++ retc = -ESRCH;
++ if (!ipvs->backup_tinfo)
++ goto err;
++ ti = ipvs->backup_tinfo;
+
+ ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
+- array = ipvs->backup_threads;
+ retc = 0;
+ for (id = ipvs->threads_mask; id >= 0; id--) {
+ int ret;
+
++ tinfo = &ti[id];
+ pr_info("stopping backup sync thread %d ...\n",
+- task_pid_nr(array[id]));
+- ret = kthread_stop(array[id]);
++ task_pid_nr(tinfo->task));
++ ret = kthread_stop(tinfo->task);
+ if (retc >= 0)
+ retc = ret;
+ }
+- kfree(array);
+- ipvs->backup_threads = NULL;
++ ipvs->backup_tinfo = NULL;
++ } else {
++ goto err;
+ }
++ id = ipvs->threads_mask;
++ mutex_unlock(&ipvs->sync_mutex);
++
++ /* No more mutexes, release socks */
++ for (tinfo = ti + id; tinfo >= ti; tinfo--) {
++ if (tinfo->sock)
++ sock_release(tinfo->sock);
++ kfree(tinfo->buf);
++ }
++ kfree(ti);
+
+ /* decrease the module use count */
+ ip_vs_use_count_dec();
++ return retc;
+
++err:
++ mutex_unlock(&ipvs->sync_mutex);
+ return retc;
+ }
+
+@@ -2021,7 +2033,6 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
+ {
+ int retc;
+
+- mutex_lock(&ipvs->sync_mutex);
+ retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER);
+ if (retc && retc != -ESRCH)
+ pr_err("Failed to stop Master Daemon\n");
+@@ -2029,5 +2040,4 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
+ retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP);
+ if (retc && retc != -ESRCH)
+ pr_err("Failed to stop Backup Daemon\n");
+- mutex_unlock(&ipvs->sync_mutex);
+ }
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 7db79c1b8084..1b77444d5b52 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -1256,7 +1256,6 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
+ struct nf_conntrack_tuple tuple;
+ struct nf_conn *ct;
+ struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+- u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
+ struct nf_conntrack_zone zone;
+ int err;
+
+@@ -1266,11 +1265,13 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl,
+
+ if (cda[CTA_TUPLE_ORIG])
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
+- u3, &zone);
++ nfmsg->nfgen_family, &zone);
+ else if (cda[CTA_TUPLE_REPLY])
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
+- u3, &zone);
++ nfmsg->nfgen_family, &zone);
+ else {
++ u_int8_t u3 = nfmsg->version ? nfmsg->nfgen_family : AF_UNSPEC;
++
+ return ctnetlink_flush_conntrack(net, cda,
+ NETLINK_CB(skb).portid,
+ nlmsg_report(nlh), u3);
+diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
+index a824367ed518..dd53e2b20f6b 100644
+--- a/net/netfilter/nf_conntrack_proto_icmp.c
++++ b/net/netfilter/nf_conntrack_proto_icmp.c
+@@ -218,7 +218,7 @@ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
+ /* See ip_conntrack_proto_tcp.c */
+ if (state->net->ct.sysctl_checksum &&
+ state->hook == NF_INET_PRE_ROUTING &&
+- nf_ip_checksum(skb, state->hook, dataoff, 0)) {
++ nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) {
+ icmp_error_log(skb, state, "bad hw icmp checksum");
+ return -NF_ACCEPT;
+ }
+diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
+index 07da07788f6b..83a24cc5753b 100644
+--- a/net/netfilter/nf_nat_proto.c
++++ b/net/netfilter/nf_nat_proto.c
+@@ -564,7 +564,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb,
+
+ if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
+ return 0;
+- if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
++ if (nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_ICMP))
+ return 0;
+
+ inside = (void *)skb->data + hdrlen;
+diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c
+index 06dc55590441..51b454d8fa9c 100644
+--- a/net/netfilter/utils.c
++++ b/net/netfilter/utils.c
+@@ -17,7 +17,8 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+ case CHECKSUM_COMPLETE:
+ if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN)
+ break;
+- if ((protocol == 0 && !csum_fold(skb->csum)) ||
++ if ((protocol != IPPROTO_TCP && protocol != IPPROTO_UDP &&
++ !csum_fold(skb->csum)) ||
+ !csum_tcpudp_magic(iph->saddr, iph->daddr,
+ skb->len - dataoff, protocol,
+ skb->csum)) {
+@@ -26,7 +27,7 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+ }
+ /* fall through */
+ case CHECKSUM_NONE:
+- if (protocol == 0)
++ if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP)
+ skb->csum = 0;
+ else
+ skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index b03bfa055c08..9e1743b364ec 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1788,6 +1788,7 @@ rpc_xdr_encode(struct rpc_task *task)
+ req->rq_snd_buf.head[0].iov_len = 0;
+ xdr_init_encode(&xdr, &req->rq_snd_buf,
+ req->rq_snd_buf.head[0].iov_base, req);
++ xdr_free_bvec(&req->rq_snd_buf);
+ if (rpc_encode_header(task, &xdr))
+ return;
+
+@@ -1827,8 +1828,6 @@ call_encode(struct rpc_task *task)
+ rpc_call_rpcerror(task, task->tk_status);
+ }
+ return;
+- } else {
+- xprt_request_prepare(task->tk_rqstp);
+ }
+
+ /* Add task to reply queue before transmission to avoid races */
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index f6c82b1651e7..5ddd34ad64b9 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -1013,6 +1013,8 @@ xprt_request_enqueue_receive(struct rpc_task *task)
+
+ if (!xprt_request_need_enqueue_receive(task, req))
+ return;
++
++ xprt_request_prepare(task->tk_rqstp);
+ spin_lock(&xprt->queue_lock);
+
+ /* Update the softirq receive buffer */
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 36652352a38c..85166c552542 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -909,6 +909,7 @@ static int xs_nospace(struct rpc_rqst *req)
+ static void
+ xs_stream_prepare_request(struct rpc_rqst *req)
+ {
++ xdr_free_bvec(&req->rq_rcv_buf);
+ req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL);
+ }
+
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index a14e8864e4fa..5e0637db92ea 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -123,13 +123,17 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
+ u64 addr;
+ int err;
+
+- if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
+- return -EINVAL;
++ spin_lock_bh(&xs->rx_lock);
++
++ if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) {
++ err = -EINVAL;
++ goto out_unlock;
++ }
+
+ if (!xskq_peek_addr(xs->umem->fq, &addr) ||
+ len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
+- xs->rx_dropped++;
+- return -ENOSPC;
++ err = -ENOSPC;
++ goto out_drop;
+ }
+
+ addr += xs->umem->headroom;
+@@ -138,13 +142,21 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
+ memcpy(buffer, xdp->data_meta, len + metalen);
+ addr += metalen;
+ err = xskq_produce_batch_desc(xs->rx, addr, len);
+- if (!err) {
+- xskq_discard_addr(xs->umem->fq);
+- xsk_flush(xs);
+- return 0;
+- }
++ if (err)
++ goto out_drop;
++
++ xskq_discard_addr(xs->umem->fq);
++ xskq_produce_flush_desc(xs->rx);
+
++ spin_unlock_bh(&xs->rx_lock);
++
++ xs->sk.sk_data_ready(&xs->sk);
++ return 0;
++
++out_drop:
+ xs->rx_dropped++;
++out_unlock:
++ spin_unlock_bh(&xs->rx_lock);
+ return err;
+ }
+
+@@ -765,6 +777,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
+
+ xs = xdp_sk(sk);
+ mutex_init(&xs->mutex);
++ spin_lock_init(&xs->rx_lock);
+ spin_lock_init(&xs->tx_completion_lock);
+
+ mutex_lock(&net->xdp.lock);
+diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
+index 88b9ae24658d..cba4a640d5e8 100644
+--- a/net/xdp/xsk_queue.h
++++ b/net/xdp/xsk_queue.h
+@@ -288,7 +288,7 @@ static inline void xskq_produce_flush_desc(struct xsk_queue *q)
+ /* Order producer and data */
+ smp_wmb(); /* B, matches C */
+
+- q->prod_tail = q->prod_head,
++ q->prod_tail = q->prod_head;
+ WRITE_ONCE(q->ring->producer, q->prod_tail);
+ }
+
+diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
+index c967fc3c38c8..51bb6018f3bf 100644
+--- a/net/xfrm/Kconfig
++++ b/net/xfrm/Kconfig
+@@ -15,6 +15,8 @@ config XFRM_ALGO
+ tristate
+ select XFRM
+ select CRYPTO
++ select CRYPTO_HASH
++ select CRYPTO_BLKCIPHER
+
+ if INET
+ config XFRM_USER
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 173477211e40..b88ba45ff1ac 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -151,6 +151,25 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
+
+ err = -EINVAL;
+ switch (p->family) {
++ case AF_INET:
++ break;
++
++ case AF_INET6:
++#if IS_ENABLED(CONFIG_IPV6)
++ break;
++#else
++ err = -EAFNOSUPPORT;
++ goto out;
++#endif
++
++ default:
++ goto out;
++ }
++
++ switch (p->sel.family) {
++ case AF_UNSPEC:
++ break;
++
+ case AF_INET:
+ if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+ goto out;
+diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
+index 6006154d36bd..a245255cecb2 100644
+--- a/scripts/kconfig/confdata.c
++++ b/scripts/kconfig/confdata.c
+@@ -914,7 +914,8 @@ int conf_write(const char *name)
+ "# %s\n"
+ "#\n", str);
+ need_newline = false;
+- } else if (!(sym->flags & SYMBOL_CHOICE)) {
++ } else if (!(sym->flags & SYMBOL_CHOICE) &&
++ !(sym->flags & SYMBOL_WRITTEN)) {
+ sym_calc_value(sym);
+ if (!(sym->flags & SYMBOL_WRITE))
+ goto next;
+@@ -922,7 +923,7 @@ int conf_write(const char *name)
+ fprintf(out, "\n");
+ need_newline = false;
+ }
+- sym->flags &= ~SYMBOL_WRITE;
++ sym->flags |= SYMBOL_WRITTEN;
+ conf_write_symbol(out, sym, &kconfig_printer_cb, NULL);
+ }
+
+@@ -1082,8 +1083,6 @@ int conf_write_autoconf(int overwrite)
+ if (!overwrite && is_present(autoconf_name))
+ return 0;
+
+- sym_clear_all_valid();
+-
+ conf_write_dep("include/config/auto.conf.cmd");
+
+ if (conf_touch_deps())
+diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
+index 8dde65bc3165..017843c9a4f4 100644
+--- a/scripts/kconfig/expr.h
++++ b/scripts/kconfig/expr.h
+@@ -141,6 +141,7 @@ struct symbol {
+ #define SYMBOL_OPTIONAL 0x0100 /* choice is optional - values can be 'n' */
+ #define SYMBOL_WRITE 0x0200 /* write symbol to file (KCONFIG_CONFIG) */
+ #define SYMBOL_CHANGED 0x0400 /* ? */
++#define SYMBOL_WRITTEN 0x0800 /* track info to avoid double-write to .config */
+ #define SYMBOL_NO_WRITE 0x1000 /* Symbol for internal use only; it will not be written */
+ #define SYMBOL_CHECKED 0x2000 /* used during dependency checking */
+ #define SYMBOL_WARNED 0x8000 /* warning has been issued */
+diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
+index 4582bc26770a..868ade3e8970 100644
+--- a/security/integrity/digsig.c
++++ b/security/integrity/digsig.c
+@@ -69,8 +69,9 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
+ return -EOPNOTSUPP;
+ }
+
+-static int __integrity_init_keyring(const unsigned int id, key_perm_t perm,
+- struct key_restriction *restriction)
++static int __init __integrity_init_keyring(const unsigned int id,
++ key_perm_t perm,
++ struct key_restriction *restriction)
+ {
+ const struct cred *cred = current_cred();
+ int err = 0;
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 94de51628fdc..3ec7ac70c313 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -6351,11 +6351,12 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
+ } else if (!strcmp(name, "fscreate")) {
+ tsec->create_sid = sid;
+ } else if (!strcmp(name, "keycreate")) {
+- error = avc_has_perm(&selinux_state,
+- mysid, sid, SECCLASS_KEY, KEY__CREATE,
+- NULL);
+- if (error)
+- goto abort_change;
++ if (sid) {
++ error = avc_has_perm(&selinux_state, mysid, sid,
++ SECCLASS_KEY, KEY__CREATE, NULL);
++ if (error)
++ goto abort_change;
++ }
+ tsec->keycreate_sid = sid;
+ } else if (!strcmp(name, "sockcreate")) {
+ tsec->sockcreate_sid = sid;
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index a60e7a17f0b8..7737b2670064 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1021,7 +1021,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ {
+ struct snd_seq_client *client = file->private_data;
+ int written = 0, len;
+- int err;
++ int err, handled;
+ struct snd_seq_event event;
+
+ if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
+@@ -1034,6 +1034,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ if (!client->accept_output || client->pool == NULL)
+ return -ENXIO;
+
++ repeat:
++ handled = 0;
+ /* allocate the pool now if the pool is not allocated yet */
+ mutex_lock(&client->ioctl_mutex);
+ if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
+@@ -1093,12 +1095,19 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ 0, 0, &client->ioctl_mutex);
+ if (err < 0)
+ break;
++ handled++;
+
+ __skip_event:
+ /* Update pointers and counts */
+ count -= len;
+ buf += len;
+ written += len;
++
++ /* let's have a coffee break if too many events are queued */
++ if (++handled >= 200) {
++ mutex_unlock(&client->ioctl_mutex);
++ goto repeat;
++ }
+ }
+
+ out:
+diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c
+index a3a113ef5d56..4f9f1d2a2ec5 100644
+--- a/sound/hda/ext/hdac_ext_bus.c
++++ b/sound/hda/ext/hdac_ext_bus.c
+@@ -85,7 +85,6 @@ int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
+ const struct hdac_ext_bus_ops *ext_ops)
+ {
+ int ret;
+- static int idx;
+
+ /* check if io ops are provided, if not load the defaults */
+ if (io_ops == NULL)
+@@ -96,7 +95,12 @@ int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
+ return ret;
+
+ bus->ext_ops = ext_ops;
+- bus->idx = idx++;
++ /* FIXME:
++ * Currently only one bus is supported, if there is device with more
++ * buses, bus->idx should be greater than 0, but there needs to be a
++ * reliable way to always assign same number.
++ */
++ bus->idx = 0;
+ bus->cmd_dma_state = true;
+
+ return 0;
+diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
+index b02f74528b66..812dc144fb5b 100644
+--- a/sound/hda/hdac_controller.c
++++ b/sound/hda/hdac_controller.c
+@@ -79,6 +79,8 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
+ snd_hdac_chip_writew(bus, RINTCNT, 1);
+ /* enable rirb dma and response irq */
+ snd_hdac_chip_writeb(bus, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
++ /* Accept unsolicited responses */
++ snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, AZX_GCTL_UNSOL);
+ spin_unlock_irq(&bus->reg_lock);
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_bus_init_cmd_io);
+@@ -415,9 +417,6 @@ int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
+ return -EBUSY;
+ }
+
+- /* Accept unsolicited responses */
+- snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, AZX_GCTL_UNSOL);
+-
+ /* detect codecs */
+ if (!bus->codec_mask) {
+ bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 6c51b8363f8b..106328584998 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2941,15 +2941,19 @@ static int hda_codec_runtime_resume(struct device *dev)
+ #ifdef CONFIG_PM_SLEEP
+ static int hda_codec_force_resume(struct device *dev)
+ {
++ struct hda_codec *codec = dev_to_hda_codec(dev);
++ bool forced_resume = !codec->relaxed_resume;
+ int ret;
+
+ /* The get/put pair below enforces the runtime resume even if the
+ * device hasn't been used at suspend time. This trick is needed to
+ * update the jack state change during the sleep.
+ */
+- pm_runtime_get_noresume(dev);
++ if (forced_resume)
++ pm_runtime_get_noresume(dev);
+ ret = pm_runtime_force_resume(dev);
+- pm_runtime_put(dev);
++ if (forced_resume)
++ pm_runtime_put(dev);
+ return ret;
+ }
+
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index b7bde55b6adf..e49c1c00f5ce 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2291,8 +2291,10 @@ static void generic_hdmi_free(struct hda_codec *codec)
+ struct hdmi_spec *spec = codec->spec;
+ int pin_idx, pcm_idx;
+
+- if (codec_has_acomp(codec))
++ if (codec_has_acomp(codec)) {
+ snd_hdac_acomp_register_notifier(&codec->bus->core, NULL);
++ codec->relaxed_resume = 0;
++ }
+
+ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+@@ -2415,7 +2417,6 @@ static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
+ snd_hda_override_conn_list(codec, nid, spec->num_cvts, spec->cvt_nids);
+ }
+
+-#define INTEL_GET_VENDOR_VERB 0xf81
+ #define INTEL_GET_VENDOR_VERB 0xf81
+ #define INTEL_SET_VENDOR_VERB 0x781
+ #define INTEL_EN_DP12 0x02 /* enable DP 1.2 features */
+@@ -2524,18 +2525,32 @@ static int intel_pin2port(void *audio_ptr, int pin_nid)
+ return -1;
+ }
+
++static int intel_port2pin(struct hda_codec *codec, int port)
++{
++ struct hdmi_spec *spec = codec->spec;
++
++ if (!spec->port_num) {
++ /* we assume only from port-B to port-D */
++ if (port < 1 || port > 3)
++ return 0;
++ /* intel port is 1-based */
++ return port + intel_base_nid(codec) - 1;
++ }
++
++ if (port < 1 || port > spec->port_num)
++ return 0;
++ return spec->port_map[port - 1];
++}
++
+ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
+ {
+ struct hda_codec *codec = audio_ptr;
+ int pin_nid;
+ int dev_id = pipe;
+
+- /* we assume only from port-B to port-D */
+- if (port < 1 || port > 3)
++ pin_nid = intel_port2pin(codec, port);
++ if (!pin_nid)
+ return;
+-
+- pin_nid = port + intel_base_nid(codec) - 1; /* intel port is 1-based */
+-
+ /* skip notification during system suspend (but not in runtime PM);
+ * the state will be updated at resume
+ */
+@@ -2565,6 +2580,8 @@ static void register_i915_notifier(struct hda_codec *codec)
+ spec->drm_audio_ops.pin_eld_notify = intel_pin_eld_notify;
+ snd_hdac_acomp_register_notifier(&codec->bus->core,
+ &spec->drm_audio_ops);
++ /* no need for forcible resume for jack check thanks to notifier */
++ codec->relaxed_resume = 1;
+ }
+
+ /* setup_stream ops override for HSW+ */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f24a757f8239..de224cbea7a0 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7657,9 +7657,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x12, 0x90a60130},
+ {0x17, 0x90170110},
+ {0x21, 0x03211020}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
++ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+ {0x14, 0x90170110},
+ {0x21, 0x04211020}),
++ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
++ {0x14, 0x90170110},
++ {0x21, 0x04211030}),
+ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC295_STANDARD_PINS,
+ {0x17, 0x21014020},
+@@ -8800,6 +8803,11 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
+ {0x18, 0x01a19030},
+ {0x1a, 0x01813040},
+ {0x21, 0x01014020}),
++ SND_HDA_PIN_QUIRK(0x10ec0867, 0x1028, "Dell", ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
++ {0x16, 0x01813030},
++ {0x17, 0x02211010},
++ {0x18, 0x01a19040},
++ {0x21, 0x01014020}),
+ SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
+ {0x14, 0x01014010},
+ {0x18, 0x01a19020},
+diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
+index 1f57126708e7..c9f9820968bb 100644
+--- a/sound/soc/codecs/hdac_hdmi.c
++++ b/sound/soc/codecs/hdac_hdmi.c
+@@ -1859,6 +1859,12 @@ static void hdmi_codec_remove(struct snd_soc_component *component)
+ {
+ struct hdac_hdmi_priv *hdmi = snd_soc_component_get_drvdata(component);
+ struct hdac_device *hdev = hdmi->hdev;
++ int ret;
++
++ ret = snd_hdac_acomp_register_notifier(hdev->bus, NULL);
++ if (ret < 0)
++ dev_err(&hdev->dev, "notifier unregister failed: err: %d\n",
++ ret);
+
+ pm_runtime_disable(&hdev->dev);
+ }
+diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
+index ec7e673ba475..70ed28d97d49 100644
+--- a/sound/soc/generic/audio-graph-card.c
++++ b/sound/soc/generic/audio-graph-card.c
+@@ -435,9 +435,6 @@ static int graph_for_each_link(struct asoc_simple_priv *priv,
+ codec_ep = of_graph_get_remote_endpoint(cpu_ep);
+ codec_port = of_get_parent(codec_ep);
+
+- of_node_put(codec_ep);
+- of_node_put(codec_port);
+-
+ /* get convert-xxx property */
+ memset(&adata, 0, sizeof(adata));
+ graph_parse_convert(dev, codec_ep, &adata);
+@@ -457,6 +454,9 @@ static int graph_for_each_link(struct asoc_simple_priv *priv,
+ else
+ ret = func_noml(priv, cpu_ep, codec_ep, li);
+
++ of_node_put(codec_ep);
++ of_node_put(codec_port);
++
+ if (ret < 0)
+ return ret;
+
+diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
+index 5407d217228e..c0aef45d335a 100644
+--- a/sound/soc/intel/boards/Kconfig
++++ b/sound/soc/intel/boards/Kconfig
+@@ -392,7 +392,7 @@ config SND_SOC_INTEL_SOF_RT5682_MACH
+ (SND_SOC_SOF_BAYTRAIL && X86_INTEL_LPSS)
+ select SND_SOC_RT5682
+ select SND_SOC_DMIC
+- select SND_SOC_HDAC_HDMI if SND_SOC_SOF_HDA_COMMON
++ select SND_SOC_HDAC_HDMI
+ help
+ This adds support for ASoC machine driver for SOF platforms
+ with rt5682 codec.
+diff --git a/sound/soc/meson/axg-tdm.h b/sound/soc/meson/axg-tdm.h
+index e578b6f40a07..5774ce0916d4 100644
+--- a/sound/soc/meson/axg-tdm.h
++++ b/sound/soc/meson/axg-tdm.h
+@@ -40,7 +40,7 @@ struct axg_tdm_iface {
+
+ static inline bool axg_tdm_lrclk_invert(unsigned int fmt)
+ {
+- return (fmt & SND_SOC_DAIFMT_I2S) ^
++ return ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S) ^
+ !!(fmt & (SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_NB_IF));
+ }
+
+diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c
+index 8cb06dab234e..7647b3d4c0ba 100644
+--- a/sound/soc/sh/rcar/ctu.c
++++ b/sound/soc/sh/rcar/ctu.c
+@@ -108,7 +108,7 @@ static int rsnd_ctu_probe_(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv)
+ {
+- return rsnd_cmd_attach(io, rsnd_mod_id(mod) / 4);
++ return rsnd_cmd_attach(io, rsnd_mod_id(mod));
+ }
+
+ static void rsnd_ctu_value_init(struct rsnd_dai_stream *io,
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 41c0cfaf2db5..6aeba0d66ec5 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -158,9 +158,10 @@ static void soc_init_component_debugfs(struct snd_soc_component *component)
+ component->card->debugfs_card_root);
+ }
+
+- if (!component->debugfs_root) {
++ if (IS_ERR(component->debugfs_root)) {
+ dev_warn(component->dev,
+- "ASoC: Failed to create component debugfs directory\n");
++ "ASoC: Failed to create component debugfs directory: %ld\n",
++ PTR_ERR(component->debugfs_root));
+ return;
+ }
+
+@@ -212,18 +213,21 @@ static void soc_init_card_debugfs(struct snd_soc_card *card)
+
+ card->debugfs_card_root = debugfs_create_dir(card->name,
+ snd_soc_debugfs_root);
+- if (!card->debugfs_card_root) {
++ if (IS_ERR(card->debugfs_card_root)) {
+ dev_warn(card->dev,
+- "ASoC: Failed to create card debugfs directory\n");
++ "ASoC: Failed to create card debugfs directory: %ld\n",
++ PTR_ERR(card->debugfs_card_root));
++ card->debugfs_card_root = NULL;
+ return;
+ }
+
+ card->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644,
+ card->debugfs_card_root,
+ &card->pop_time);
+- if (!card->debugfs_pop_time)
++ if (IS_ERR(card->debugfs_pop_time))
+ dev_warn(card->dev,
+- "ASoC: Failed to create pop time debugfs file\n");
++ "ASoC: Failed to create pop time debugfs file: %ld\n",
++ PTR_ERR(card->debugfs_pop_time));
+ }
+
+ static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
+@@ -2837,14 +2841,12 @@ static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
+ snd_soc_dapm_shutdown(card);
+ snd_soc_flush_all_delayed_work(card);
+
+- mutex_lock(&client_mutex);
+ /* remove all components used by DAI links on this card */
+ for_each_comp_order(order) {
+ for_each_card_rtds(card, rtd) {
+ soc_remove_link_components(card, rtd, order);
+ }
+ }
+- mutex_unlock(&client_mutex);
+
+ soc_cleanup_card_resources(card);
+ if (!unregister)
+@@ -2863,7 +2865,9 @@ static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
+ */
+ int snd_soc_unregister_card(struct snd_soc_card *card)
+ {
++ mutex_lock(&client_mutex);
+ snd_soc_unbind_card(card, true);
++ mutex_unlock(&client_mutex);
+ dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name);
+
+ return 0;
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 55f8278077f4..c91df5a9c840 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -2155,23 +2155,25 @@ void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm,
+ {
+ struct dentry *d;
+
+- if (!parent)
++ if (!parent || IS_ERR(parent))
+ return;
+
+ dapm->debugfs_dapm = debugfs_create_dir("dapm", parent);
+
+- if (!dapm->debugfs_dapm) {
++ if (IS_ERR(dapm->debugfs_dapm)) {
+ dev_warn(dapm->dev,
+- "ASoC: Failed to create DAPM debugfs directory\n");
++ "ASoC: Failed to create DAPM debugfs directory %ld\n",
++ PTR_ERR(dapm->debugfs_dapm));
+ return;
+ }
+
+ d = debugfs_create_file("bias_level", 0444,
+ dapm->debugfs_dapm, dapm,
+ &dapm_bias_fops);
+- if (!d)
++ if (IS_ERR(d))
+ dev_warn(dapm->dev,
+- "ASoC: Failed to create bias level debugfs file\n");
++ "ASoC: Failed to create bias level debugfs file: %ld\n",
++ PTR_ERR(d));
+ }
+
+ static void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w)
+@@ -2185,10 +2187,10 @@ static void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w)
+ d = debugfs_create_file(w->name, 0444,
+ dapm->debugfs_dapm, w,
+ &dapm_widget_power_fops);
+- if (!d)
++ if (IS_ERR(d))
+ dev_warn(w->dapm->dev,
+- "ASoC: Failed to create %s debugfs file\n",
+- w->name);
++ "ASoC: Failed to create %s debugfs file: %ld\n",
++ w->name, PTR_ERR(d));
+ }
+
+ static void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm)
+diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
+index 3ef3093560ba..bfed711258ce 100644
+--- a/tools/bpf/bpftool/jit_disasm.c
++++ b/tools/bpf/bpftool/jit_disasm.c
+@@ -11,6 +11,8 @@
+ * Licensed under the GNU General Public License, version 2.0 (GPLv2)
+ */
+
++#define _GNU_SOURCE
++#include <stdio.h>
+ #include <stdarg.h>
+ #include <stdint.h>
+ #include <stdio.h>
+@@ -44,11 +46,13 @@ static int fprintf_json(void *out, const char *fmt, ...)
+ char *s;
+
+ va_start(ap, fmt);
++ if (vasprintf(&s, fmt, ap) < 0)
++ return -1;
++ va_end(ap);
++
+ if (!oper_count) {
+ int i;
+
+- s = va_arg(ap, char *);
+-
+ /* Strip trailing spaces */
+ i = strlen(s) - 1;
+ while (s[i] == ' ')
+@@ -61,11 +65,10 @@ static int fprintf_json(void *out, const char *fmt, ...)
+ } else if (!strcmp(fmt, ",")) {
+ /* Skip */
+ } else {
+- s = va_arg(ap, char *);
+ jsonw_string(json_wtr, s);
+ oper_count++;
+ }
+- va_end(ap);
++ free(s);
+ return 0;
+ }
+
+diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
+index a59c53705093..939ac2fcc783 100644
+--- a/tools/build/feature/test-all.c
++++ b/tools/build/feature/test-all.c
+@@ -182,7 +182,7 @@
+ # include "test-disassembler-four-args.c"
+ #undef main
+
+-#define main main_test_zstd
++#define main main_test_libzstd
+ # include "test-libzstd.c"
+ #undef main
+
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index a8b823c30b43..29a5bc3d5c66 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -3143,6 +3143,7 @@ struct bpf_prog_info {
+ char name[BPF_OBJ_NAME_LEN];
+ __u32 ifindex;
+ __u32 gpl_compatible:1;
++ __u32 :31; /* alignment pad */
+ __u64 netns_dev;
+ __u64 netns_ino;
+ __u32 nr_jited_ksyms;
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 151f7ac1882e..3865a5d27251 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -3487,10 +3487,7 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
+ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
+ struct bpf_object **pobj, int *prog_fd)
+ {
+- struct bpf_object_open_attr open_attr = {
+- .file = attr->file,
+- .prog_type = attr->prog_type,
+- };
++ struct bpf_object_open_attr open_attr = {};
+ struct bpf_program *prog, *first_prog = NULL;
+ enum bpf_attach_type expected_attach_type;
+ enum bpf_prog_type prog_type;
+@@ -3503,6 +3500,9 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
+ if (!attr->file)
+ return -EINVAL;
+
++ open_attr.file = attr->file;
++ open_attr.prog_type = attr->prog_type;
++
+ obj = bpf_object__open_xattr(&open_attr);
+ if (IS_ERR_OR_NULL(obj))
+ return -ENOENT;
+diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
+index 38667b62f1fe..ca272c5b67f4 100644
+--- a/tools/lib/bpf/xsk.c
++++ b/tools/lib/bpf/xsk.c
+@@ -337,7 +337,8 @@ static int xsk_get_max_queues(struct xsk_socket *xsk)
+
+ channels.cmd = ETHTOOL_GCHANNELS;
+ ifr.ifr_data = (void *)&channels;
+- strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ);
++ strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
++ ifr.ifr_name[IFNAMSIZ - 1] = '\0';
+ err = ioctl(fd, SIOCETHTOOL, &ifr);
+ if (err && errno != EOPNOTSUPP) {
+ ret = -errno;
+@@ -561,7 +562,8 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
+ err = -errno;
+ goto out_socket;
+ }
+- strncpy(xsk->ifname, ifname, IFNAMSIZ);
++ strncpy(xsk->ifname, ifname, IFNAMSIZ - 1);
++ xsk->ifname[IFNAMSIZ - 1] = '\0';
+
+ err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
+ if (err)
+diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
+index 85fbcd265351..17b81bc403e4 100644
+--- a/tools/perf/Makefile.config
++++ b/tools/perf/Makefile.config
+@@ -637,9 +637,14 @@ endif
+
+ ifndef NO_SLANG
+ ifneq ($(feature-libslang), 1)
+- msg := $(warning slang not found, disables TUI support. Please install slang-devel, libslang-dev or libslang2-dev);
+- NO_SLANG := 1
+- else
++ ifneq ($(feature-libslang-include-subdir), 1)
++ msg := $(warning slang not found, disables TUI support. Please install slang-devel, libslang-dev or libslang2-dev);
++ NO_SLANG := 1
++ else
++ CFLAGS += -DHAVE_SLANG_INCLUDE_SUBDIR
++ endif
++ endif
++ ifndef NO_SLANG
+ # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
+ CFLAGS += -I/usr/include/slang
+ CFLAGS += -DHAVE_SLANG_SUPPORT
+diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
+index 911426721170..0a278bbcaba6 100644
+--- a/tools/perf/arch/arm/util/cs-etm.c
++++ b/tools/perf/arch/arm/util/cs-etm.c
+@@ -31,6 +31,8 @@ struct cs_etm_recording {
+ struct auxtrace_record itr;
+ struct perf_pmu *cs_etm_pmu;
+ struct perf_evlist *evlist;
++ int wrapped_cnt;
++ bool *wrapped;
+ bool snapshot_mode;
+ size_t snapshot_size;
+ };
+@@ -536,16 +538,131 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
+ return 0;
+ }
+
+-static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused,
++static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
++{
++ bool *wrapped;
++ int cnt = ptr->wrapped_cnt;
++
++ /* Make @ptr->wrapped as big as @idx */
++ while (cnt <= idx)
++ cnt++;
++
++ /*
++ * Free'ed in cs_etm_recording_free(). Using realloc() to avoid
++ * cross compilation problems where the host's system supports
++ * reallocarray() but not the target.
++ */
++ wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
++ if (!wrapped)
++ return -ENOMEM;
++
++ wrapped[cnt - 1] = false;
++ ptr->wrapped_cnt = cnt;
++ ptr->wrapped = wrapped;
++
++ return 0;
++}
++
++static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
++ size_t buffer_size, u64 head)
++{
++ u64 i, watermark;
++ u64 *buf = (u64 *)buffer;
++ size_t buf_size = buffer_size;
++
++ /*
++ * We want to look the very last 512 byte (chosen arbitrarily) in
++ * the ring buffer.
++ */
++ watermark = buf_size - 512;
++
++ /*
++ * @head is continuously increasing - if its value is equal or greater
++ * than the size of the ring buffer, it has wrapped around.
++ */
++ if (head >= buffer_size)
++ return true;
++
++ /*
++ * The value of @head is somewhere within the size of the ring buffer.
++ * This can be that there hasn't been enough data to fill the ring
++ * buffer yet or the trace time was so long that @head has numerically
++ * wrapped around. To find we need to check if we have data at the very
++ * end of the ring buffer. We can reliably do this because mmap'ed
++ * pages are zeroed out and there is a fresh mapping with every new
++ * session.
++ */
++
++ /* @head is less than 512 byte from the end of the ring buffer */
++ if (head > watermark)
++ watermark = head;
++
++ /*
++ * Speed things up by using 64 bit transactions (see "u64 *buf" above)
++ */
++ watermark >>= 3;
++ buf_size >>= 3;
++
++ /*
++ * If we find trace data at the end of the ring buffer, @head has
++ * been there and has numerically wrapped around at least once.
++ */
++ for (i = watermark; i < buf_size; i++)
++ if (buf[i])
++ return true;
++
++ return false;
++}
++
++static int cs_etm_find_snapshot(struct auxtrace_record *itr,
+ int idx, struct auxtrace_mmap *mm,
+- unsigned char *data __maybe_unused,
++ unsigned char *data,
+ u64 *head, u64 *old)
+ {
++ int err;
++ bool wrapped;
++ struct cs_etm_recording *ptr =
++ container_of(itr, struct cs_etm_recording, itr);
++
++ /*
++ * Allocate memory to keep track of wrapping if this is the first
++ * time we deal with this *mm.
++ */
++ if (idx >= ptr->wrapped_cnt) {
++ err = cs_etm_alloc_wrapped_array(ptr, idx);
++ if (err)
++ return err;
++ }
++
++ /*
++ * Check to see if *head has wrapped around. If it hasn't only the
++ * amount of data between *head and *old is snapshot'ed to avoid
++ * bloating the perf.data file with zeros. But as soon as *head has
++ * wrapped around the entire size of the AUX ring buffer it taken.
++ */
++ wrapped = ptr->wrapped[idx];
++ if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
++ wrapped = true;
++ ptr->wrapped[idx] = true;
++ }
++
+ pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
+ __func__, idx, (size_t)*old, (size_t)*head, mm->len);
+
+- *old = *head;
+- *head += mm->len;
++ /* No wrap has occurred, we can just use *head and *old. */
++ if (!wrapped)
++ return 0;
++
++ /*
++ * *head has wrapped around - adjust *head and *old to pickup the
++ * entire content of the AUX buffer.
++ */
++ if (*head >= mm->len) {
++ *old = *head - mm->len;
++ } else {
++ *head += mm->len;
++ *old = *head - mm->len;
++ }
+
+ return 0;
+ }
+@@ -586,6 +703,8 @@ static void cs_etm_recording_free(struct auxtrace_record *itr)
+ {
+ struct cs_etm_recording *ptr =
+ container_of(itr, struct cs_etm_recording, itr);
++
++ zfree(&ptr->wrapped);
+ free(ptr);
+ }
+
+diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
+index aea7b1fe85aa..c441a34cb1c0 100644
+--- a/tools/perf/jvmti/libjvmti.c
++++ b/tools/perf/jvmti/libjvmti.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/compiler.h>
++#include <linux/string.h>
+ #include <sys/types.h>
+ #include <stdio.h>
+ #include <string.h>
+@@ -162,8 +163,7 @@ copy_class_filename(const char * class_sign, const char * file_name, char * resu
+ result[i] = '\0';
+ } else {
+ /* fallback case */
+- size_t file_name_len = strlen(file_name);
+- strncpy(result, file_name, file_name_len < max_length ? file_name_len : max_length);
++ strlcpy(result, file_name, max_length);
+ }
+ }
+
+diff --git a/tools/perf/perf.h b/tools/perf/perf.h
+index d59dee61b64d..a26555baf692 100644
+--- a/tools/perf/perf.h
++++ b/tools/perf/perf.h
+@@ -26,7 +26,7 @@ static inline unsigned long long rdclock(void)
+ }
+
+ #ifndef MAX_NR_CPUS
+-#define MAX_NR_CPUS 1024
++#define MAX_NR_CPUS 2048
+ #endif
+
+ extern const char *input_name;
+diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
+index 4a69c07f4101..8f3c80e13584 100644
+--- a/tools/perf/tests/parse-events.c
++++ b/tools/perf/tests/parse-events.c
+@@ -18,6 +18,32 @@
+ #define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
+
++#if defined(__s390x__)
++/* Return true if kvm module is available and loaded. Test this
++ * and retun success when trace point kvm_s390_create_vm
++ * exists. Otherwise this test always fails.
++ */
++static bool kvm_s390_create_vm_valid(void)
++{
++ char *eventfile;
++ bool rc = false;
++
++ eventfile = get_events_file("kvm-s390");
++
++ if (eventfile) {
++ DIR *mydir = opendir(eventfile);
++
++ if (mydir) {
++ rc = true;
++ closedir(mydir);
++ }
++ put_events_file(eventfile);
++ }
++
++ return rc;
++}
++#endif
++
+ static int test__checkevent_tracepoint(struct perf_evlist *evlist)
+ {
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+@@ -1642,6 +1668,7 @@ static struct evlist_test test__events[] = {
+ {
+ .name = "kvm-s390:kvm_s390_create_vm",
+ .check = test__checkevent_tracepoint,
++ .valid = kvm_s390_create_vm_valid,
+ .id = 100,
+ },
+ #endif
+diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+index 61c9f8fc6fa1..58a99a292930 100755
+--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
++++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+@@ -44,7 +44,7 @@ trace_libc_inet_pton_backtrace() {
+ eventattr='max-stack=4'
+ echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+ echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+- echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
++ echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+ ;;
+ *)
+ eventattr='max-stack=3'
+diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
+index 98d934a36d86..b0d089a95dac 100644
+--- a/tools/perf/ui/browsers/annotate.c
++++ b/tools/perf/ui/browsers/annotate.c
+@@ -97,11 +97,12 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
+ struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
+ struct annotation *notes = browser__annotation(browser);
+ struct annotation_line *al = list_entry(entry, struct annotation_line, node);
++ const bool is_current_entry = ui_browser__is_current_entry(browser, row);
+ struct annotation_write_ops ops = {
+ .first_line = row == 0,
+- .current_entry = ui_browser__is_current_entry(browser, row),
++ .current_entry = is_current_entry,
+ .change_color = (!notes->options->hide_src_code &&
+- (!ops.current_entry ||
++ (!is_current_entry ||
+ (browser->use_navkeypressed &&
+ !browser->navkeypressed))),
+ .width = browser->width,
+diff --git a/tools/perf/ui/libslang.h b/tools/perf/ui/libslang.h
+index c0686cda39a5..991e692b9b46 100644
+--- a/tools/perf/ui/libslang.h
++++ b/tools/perf/ui/libslang.h
+@@ -10,7 +10,12 @@
+ #ifndef HAVE_LONG_LONG
+ #define HAVE_LONG_LONG __GLIBC_HAVE_LONG_LONG
+ #endif
++
++#ifdef HAVE_SLANG_INCLUDE_SUBDIR
++#include <slang/slang.h>
++#else
+ #include <slang.h>
++#endif
+
+ #if SLANG_VERSION < 20104
+ #define slsmg_printf(msg, args...) \
+diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
+index 79db038b56f2..c8ce13419d9b 100644
+--- a/tools/perf/util/annotate.c
++++ b/tools/perf/util/annotate.c
+@@ -931,9 +931,8 @@ static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
+ if (sym == NULL)
+ return 0;
+ src = symbol__hists(sym, evsel->evlist->nr_entries);
+- if (src == NULL)
+- return -ENOMEM;
+- return __symbol__inc_addr_samples(sym, map, src, evsel->idx, addr, sample);
++ return (src) ? __symbol__inc_addr_samples(sym, map, src, evsel->idx,
++ addr, sample) : 0;
+ }
+
+ static int symbol__account_cycles(u64 addr, u64 start,
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index 4a5947625c5c..2c46f9aa416c 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -589,6 +589,9 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
+ {
+ char bf[128];
+
++ if (!evsel)
++ goto out_unknown;
++
+ if (evsel->name)
+ return evsel->name;
+
+@@ -628,7 +631,10 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
+
+ evsel->name = strdup(bf);
+
+- return evsel->name ?: "unknown";
++ if (evsel->name)
++ return evsel->name;
++out_unknown:
++ return "unknown";
+ }
+
+ const char *perf_evsel__group_name(struct perf_evsel *evsel)
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index fb0aa661644b..b82d4577d969 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -1100,7 +1100,7 @@ static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
+ return 0;
+ }
+
+-#define MAX_CACHES 2000
++#define MAX_CACHES (MAX_NR_CPUS * 4)
+
+ static int write_cache(struct feat_fd *ff,
+ struct perf_evlist *evlist __maybe_unused)
+diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
+index 699e020737d9..fabdb6dde88e 100644
+--- a/tools/perf/util/metricgroup.c
++++ b/tools/perf/util/metricgroup.c
+@@ -85,26 +85,49 @@ struct egroup {
+ const char *metric_expr;
+ };
+
+-static struct perf_evsel *find_evsel(struct perf_evlist *perf_evlist,
+- const char **ids,
+- int idnum,
+- struct perf_evsel **metric_events)
++static bool record_evsel(int *ind, struct perf_evsel **start,
++ int idnum,
++ struct perf_evsel **metric_events,
++ struct perf_evsel *ev)
++{
++ metric_events[*ind] = ev;
++ if (*ind == 0)
++ *start = ev;
++ if (++*ind == idnum) {
++ metric_events[*ind] = NULL;
++ return true;
++ }
++ return false;
++}
++
++static struct perf_evsel *find_evsel_group(struct perf_evlist *perf_evlist,
++ const char **ids,
++ int idnum,
++ struct perf_evsel **metric_events)
+ {
+ struct perf_evsel *ev, *start = NULL;
+ int ind = 0;
+
+ evlist__for_each_entry (perf_evlist, ev) {
++ if (ev->collect_stat)
++ continue;
+ if (!strcmp(ev->name, ids[ind])) {
+- metric_events[ind] = ev;
+- if (ind == 0)
+- start = ev;
+- if (++ind == idnum) {
+- metric_events[ind] = NULL;
++ if (record_evsel(&ind, &start, idnum,
++ metric_events, ev))
+ return start;
+- }
+ } else {
++ /*
++ * We saw some other event that is not
++ * in our list of events. Discard
++ * the whole match and start again.
++ */
+ ind = 0;
+ start = NULL;
++ if (!strcmp(ev->name, ids[ind])) {
++ if (record_evsel(&ind, &start, idnum,
++ metric_events, ev))
++ return start;
++ }
+ }
+ }
+ /*
+@@ -134,8 +157,8 @@ static int metricgroup__setup_events(struct list_head *groups,
+ ret = -ENOMEM;
+ break;
+ }
+- evsel = find_evsel(perf_evlist, eg->ids, eg->idnum,
+- metric_events);
++ evsel = find_evsel_group(perf_evlist, eg->ids, eg->idnum,
++ metric_events);
+ if (!evsel) {
+ pr_debug("Cannot resolve %s: %s\n",
+ eg->metric_name, eg->metric_expr);
+diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
+index 4c53bae5644b..94bed4031def 100644
+--- a/tools/perf/util/stat-display.c
++++ b/tools/perf/util/stat-display.c
+@@ -542,7 +542,8 @@ static void collect_all_aliases(struct perf_stat_config *config, struct perf_evs
+ alias->scale != counter->scale ||
+ alias->cgrp != counter->cgrp ||
+ strcmp(alias->unit, counter->unit) ||
+- perf_evsel__is_clock(alias) != perf_evsel__is_clock(counter))
++ perf_evsel__is_clock(alias) != perf_evsel__is_clock(counter) ||
++ !strcmp(alias->pmu_name, counter->pmu_name))
+ break;
+ alias->merged_stat = true;
+ cb(config, alias, data, false);
+diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
+index 83d8094be4fe..0ef98e991ade 100644
+--- a/tools/perf/util/stat-shadow.c
++++ b/tools/perf/util/stat-shadow.c
+@@ -303,7 +303,7 @@ static struct perf_evsel *perf_stat__find_event(struct perf_evlist *evsel_list,
+ struct perf_evsel *c2;
+
+ evlist__for_each_entry (evsel_list, c2) {
+- if (!strcasecmp(c2->name, name))
++ if (!strcasecmp(c2->name, name) && !c2->collect_stat)
+ return c2;
+ }
+ return NULL;
+@@ -342,7 +342,8 @@ void perf_stat__collect_metric_expr(struct perf_evlist *evsel_list)
+ if (leader) {
+ /* Search in group */
+ for_each_group_member (oc, leader) {
+- if (!strcasecmp(oc->name, metric_names[i])) {
++ if (!strcasecmp(oc->name, metric_names[i]) &&
++ !oc->collect_stat) {
+ found = true;
+ break;
+ }
+@@ -722,6 +723,7 @@ static void generic_metric(struct perf_stat_config *config,
+ double ratio;
+ int i;
+ void *ctxp = out->ctx;
++ char *n, *pn;
+
+ expr__ctx_init(&pctx);
+ expr__add_id(&pctx, name, avg);
+@@ -741,7 +743,19 @@ static void generic_metric(struct perf_stat_config *config,
+ stats = &v->stats;
+ scale = 1.0;
+ }
+- expr__add_id(&pctx, metric_events[i]->name, avg_stats(stats)*scale);
++
++ n = strdup(metric_events[i]->name);
++ if (!n)
++ return;
++ /*
++ * This display code with --no-merge adds [cpu] postfixes.
++ * These are not supported by the parser. Remove everything
++ * after the space.
++ */
++ pn = strchr(n, ' ');
++ if (pn)
++ *pn = 0;
++ expr__add_id(&pctx, n, avg_stats(stats)*scale);
+ }
+ if (!metric_events[i]) {
+ const char *p = metric_expr;
+@@ -758,6 +772,9 @@ static void generic_metric(struct perf_stat_config *config,
+ (metric_name ? metric_name : name) : "", 0);
+ } else
+ print_metric(config, ctxp, NULL, NULL, "", 0);
++
++ for (i = 1; i < pctx.num_ids; i++)
++ free((void *)pctx.ids[i].name);
+ }
+
+ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
+diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
+index f49bc4aa2a08..6ed82fba5aaa 100644
+--- a/tools/power/cpupower/utils/cpufreq-set.c
++++ b/tools/power/cpupower/utils/cpufreq-set.c
+@@ -305,6 +305,8 @@ int cmd_freq_set(int argc, char **argv)
+ bitmask_setbit(cpus_chosen, cpus->cpu);
+ cpus = cpus->next;
+ }
++ /* Set the last cpu in related cpus list */
++ bitmask_setbit(cpus_chosen, cpus->cpu);
+ cpufreq_put_related_cpus(cpus);
+ }
+ }
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index e36356e2377e..1c9511262947 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -275,4 +275,5 @@ $(OUTPUT)/verifier/tests.h: $(VERIFIER_TESTS_DIR) $(VERIFIER_TEST_FILES)
+ ) > $(VERIFIER_TESTS_H))
+
+ EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) \
+- $(VERIFIER_TESTS_H) $(PROG_TESTS_H) $(MAP_TESTS_H)
++ $(VERIFIER_TESTS_H) $(PROG_TESTS_H) $(MAP_TESTS_H) \
++ feature
+diff --git a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
+index 0575751bc1bc..e2f6ed0a583d 100644
+--- a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
++++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
+@@ -61,7 +61,7 @@ struct sr6_tlv_t {
+ unsigned char value[0];
+ } BPF_PACKET_HEADER;
+
+-__attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb)
++static __always_inline struct ip6_srh_t *get_srh(struct __sk_buff *skb)
+ {
+ void *cursor, *data_end;
+ struct ip6_srh_t *srh;
+@@ -95,7 +95,7 @@ __attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb)
+ return srh;
+ }
+
+-__attribute__((always_inline))
++static __always_inline
+ int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
+ uint32_t old_pad, uint32_t pad_off)
+ {
+@@ -125,7 +125,7 @@ int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
+ return 0;
+ }
+
+-__attribute__((always_inline))
++static __always_inline
+ int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
+ uint32_t *tlv_off, uint32_t *pad_size,
+ uint32_t *pad_off)
+@@ -184,7 +184,7 @@ int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
+ return 0;
+ }
+
+-__attribute__((always_inline))
++static __always_inline
+ int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
+ struct sr6_tlv_t *itlv, uint8_t tlv_size)
+ {
+@@ -228,7 +228,7 @@ int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
+ return update_tlv_pad(skb, new_pad, pad_size, pad_off);
+ }
+
+-__attribute__((always_inline))
++static __always_inline
+ int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh,
+ uint32_t tlv_off)
+ {
+@@ -266,7 +266,7 @@ int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh,
+ return update_tlv_pad(skb, new_pad, pad_size, pad_off);
+ }
+
+-__attribute__((always_inline))
++static __always_inline
+ int has_egr_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh)
+ {
+ int tlv_offset = sizeof(struct ip6_t) + sizeof(struct ip6_srh_t) +
+diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
+index 288cb740e005..6438d4dc8ae1 100644
+--- a/tools/testing/selftests/bpf/test_verifier.c
++++ b/tools/testing/selftests/bpf/test_verifier.c
+@@ -207,33 +207,35 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self)
+ self->retval = (uint32_t)res;
+ }
+
+-/* test the sequence of 1k jumps */
++#define MAX_JMP_SEQ 8192
++
++/* test the sequence of 8k jumps */
+ static void bpf_fill_scale1(struct bpf_test *self)
+ {
+ struct bpf_insn *insn = self->fill_insns;
+ int i = 0, k = 0;
+
+ insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
+- /* test to check that the sequence of 1024 jumps is acceptable */
+- while (k++ < 1024) {
++ /* test to check that the long sequence of jumps is acceptable */
++ while (k++ < MAX_JMP_SEQ) {
+ insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_prandom_u32);
+- insn[i++] = BPF_JMP_IMM(BPF_JGT, BPF_REG_0, bpf_semi_rand_get(), 2);
++ insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
+ insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
+ insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
+ -8 * (k % 64 + 1));
+ }
+- /* every jump adds 1024 steps to insn_processed, so to stay exactly
+- * within 1m limit add MAX_TEST_INSNS - 1025 MOVs and 1 EXIT
++ /* every jump adds 1 step to insn_processed, so to stay exactly
++ * within 1m limit add MAX_TEST_INSNS - MAX_JMP_SEQ - 1 MOVs and 1 EXIT
+ */
+- while (i < MAX_TEST_INSNS - 1025)
++ while (i < MAX_TEST_INSNS - MAX_JMP_SEQ - 1)
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 42);
+ insn[i] = BPF_EXIT_INSN();
+ self->prog_len = i + 1;
+ self->retval = 42;
+ }
+
+-/* test the sequence of 1k jumps in inner most function (function depth 8)*/
++/* test the sequence of 8k jumps in inner most function (function depth 8)*/
+ static void bpf_fill_scale2(struct bpf_test *self)
+ {
+ struct bpf_insn *insn = self->fill_insns;
+@@ -245,19 +247,20 @@ static void bpf_fill_scale2(struct bpf_test *self)
+ insn[i++] = BPF_EXIT_INSN();
+ }
+ insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
+- /* test to check that the sequence of 1024 jumps is acceptable */
+- while (k++ < 1024) {
++ /* test to check that the long sequence of jumps is acceptable */
++ k = 0;
++ while (k++ < MAX_JMP_SEQ) {
+ insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_prandom_u32);
+- insn[i++] = BPF_JMP_IMM(BPF_JGT, BPF_REG_0, bpf_semi_rand_get(), 2);
++ insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
+ insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
+ insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
+ -8 * (k % (64 - 4 * FUNC_NEST) + 1));
+ }
+- /* every jump adds 1024 steps to insn_processed, so to stay exactly
+- * within 1m limit add MAX_TEST_INSNS - 1025 MOVs and 1 EXIT
++ /* every jump adds 1 step to insn_processed, so to stay exactly
++ * within 1m limit add MAX_TEST_INSNS - MAX_JMP_SEQ - 1 MOVs and 1 EXIT
+ */
+- while (i < MAX_TEST_INSNS - 1025)
++ while (i < MAX_TEST_INSNS - MAX_JMP_SEQ - 1)
+ insn[i++] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 42);
+ insn[i] = BPF_EXIT_INSN();
+ self->prog_len = i + 1;
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 2f2d24a4dd5c..e629766f0ec8 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1790,7 +1790,7 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+ if (!map->hva)
+ return;
+
+- if (map->page)
++ if (map->page != KVM_UNMAPPED_PAGE)
+ kunmap(map->page);
+ #ifdef CONFIG_HAS_IOMEM
+ else
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-07-21 14:43 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-07-21 14:43 UTC (permalink / raw
To: gentoo-commits
commit: 9b389634cc124ae7238533cc9784f5ff9dcf1029
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jul 21 14:43:30 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jul 21 14:43:30 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9b389634
Linux patch 5.2.2
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1001_linux-5.2.2.patch | 1203 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 1207 insertions(+)
diff --git a/0000_README b/0000_README
index 3d37d29..d2c1e9b 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch: 1000_linux-5.2.1.patch
From: https://www.kernel.org
Desc: Linux 5.2.1
+Patch: 1001_linux-5.2.2.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.2
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1001_linux-5.2.2.patch b/1001_linux-5.2.2.patch
new file mode 100644
index 0000000..d6a081e
--- /dev/null
+++ b/1001_linux-5.2.2.patch
@@ -0,0 +1,1203 @@
+diff --git a/Makefile b/Makefile
+index d8f5dbfd6b76..d6c65b678d21 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
+index 182ce67dfe10..c2663fce7f6c 100644
+--- a/arch/arc/kernel/unwind.c
++++ b/arch/arc/kernel/unwind.c
+@@ -181,11 +181,6 @@ static void *__init unw_hdr_alloc_early(unsigned long sz)
+ return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS);
+ }
+
+-static void *unw_hdr_alloc(unsigned long sz)
+-{
+- return kmalloc(sz, GFP_KERNEL);
+-}
+-
+ static void init_unwind_table(struct unwind_table *table, const char *name,
+ const void *core_start, unsigned long core_size,
+ const void *init_start, unsigned long init_size,
+@@ -366,6 +361,10 @@ ret_err:
+ }
+
+ #ifdef CONFIG_MODULES
++static void *unw_hdr_alloc(unsigned long sz)
++{
++ return kmalloc(sz, GFP_KERNEL);
++}
+
+ static struct unwind_table *last_table;
+
+diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
+index e78cda94456b..68c476b20b57 100644
+--- a/arch/s390/include/asm/facility.h
++++ b/arch/s390/include/asm/facility.h
+@@ -59,6 +59,18 @@ static inline int test_facility(unsigned long nr)
+ return __test_facility(nr, &S390_lowcore.stfle_fac_list);
+ }
+
++static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
++{
++ register unsigned long reg0 asm("0") = size - 1;
++
++ asm volatile(
++ ".insn s,0xb2b00000,0(%1)" /* stfle */
++ : "+d" (reg0)
++ : "a" (stfle_fac_list)
++ : "memory", "cc");
++ return reg0;
++}
++
+ /**
+ * stfle - Store facility list extended
+ * @stfle_fac_list: array where facility list can be stored
+@@ -75,13 +87,8 @@ static inline void __stfle(u64 *stfle_fac_list, int size)
+ memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
+ if (S390_lowcore.stfl_fac_list & 0x01000000) {
+ /* More facility bits available with stfle */
+- register unsigned long reg0 asm("0") = size - 1;
+-
+- asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
+- : "+d" (reg0)
+- : "a" (stfle_fac_list)
+- : "memory", "cc");
+- nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
++ nr = __stfle_asm(stfle_fac_list, size);
++ nr = min_t(unsigned long, (nr + 1) * 8, size * 8);
+ }
+ memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
+ }
+diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
+index f577c5f6031a..c563f8368b19 100644
+--- a/arch/s390/include/asm/sclp.h
++++ b/arch/s390/include/asm/sclp.h
+@@ -80,7 +80,6 @@ struct sclp_info {
+ unsigned char has_gisaf : 1;
+ unsigned char has_diag318 : 1;
+ unsigned char has_sipl : 1;
+- unsigned char has_sipl_g2 : 1;
+ unsigned char has_dirq : 1;
+ unsigned int ibc;
+ unsigned int mtid;
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index d836af3ccc38..2c0a515428d6 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -286,12 +286,7 @@ static struct kobj_attribute sys_ipl_secure_attr =
+ static ssize_t ipl_has_secure_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+ {
+- if (MACHINE_IS_LPAR)
+- return sprintf(page, "%i\n", !!sclp.has_sipl);
+- else if (MACHINE_IS_VM)
+- return sprintf(page, "%i\n", !!sclp.has_sipl_g2);
+- else
+- return sprintf(page, "%i\n", 0);
++ return sprintf(page, "%i\n", !!sclp.has_sipl);
+ }
+
+ static struct kobj_attribute sys_ipl_has_secure_attr =
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 7b23431be5cb..f49e11669271 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -1104,6 +1104,30 @@ ENTRY(irq_entries_start)
+ .endr
+ END(irq_entries_start)
+
++#ifdef CONFIG_X86_LOCAL_APIC
++ .align 8
++ENTRY(spurious_entries_start)
++ vector=FIRST_SYSTEM_VECTOR
++ .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
++ pushl $(~vector+0x80) /* Note: always in signed byte range */
++ vector=vector+1
++ jmp common_spurious
++ .align 8
++ .endr
++END(spurious_entries_start)
++
++common_spurious:
++ ASM_CLAC
++ addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
++ SAVE_ALL switch_stacks=1
++ ENCODE_FRAME_POINTER
++ TRACE_IRQS_OFF
++ movl %esp, %eax
++ call smp_spurious_interrupt
++ jmp ret_from_intr
++ENDPROC(common_spurious)
++#endif
++
+ /*
+ * the CPU automatically disables interrupts when executing an IRQ vector,
+ * so IRQ-flags tracing has to follow that:
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 11aa3b2afa4d..8dbca86c249b 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -375,6 +375,18 @@ ENTRY(irq_entries_start)
+ .endr
+ END(irq_entries_start)
+
++ .align 8
++ENTRY(spurious_entries_start)
++ vector=FIRST_SYSTEM_VECTOR
++ .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
++ UNWIND_HINT_IRET_REGS
++ pushq $(~vector+0x80) /* Note: always in signed byte range */
++ jmp common_spurious
++ .align 8
++ vector=vector+1
++ .endr
++END(spurious_entries_start)
++
+ .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
+ #ifdef CONFIG_DEBUG_ENTRY
+ pushq %rax
+@@ -571,10 +583,20 @@ _ASM_NOKPROBE(interrupt_entry)
+
+ /* Interrupt entry/exit. */
+
+- /*
+- * The interrupt stubs push (~vector+0x80) onto the stack and
+- * then jump to common_interrupt.
+- */
++/*
++ * The interrupt stubs push (~vector+0x80) onto the stack and
++ * then jump to common_spurious/interrupt.
++ */
++common_spurious:
++ addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
++ call interrupt_entry
++ UNWIND_HINT_REGS indirect=1
++ call smp_spurious_interrupt /* rdi points to pt_regs */
++ jmp ret_from_intr
++END(common_spurious)
++_ASM_NOKPROBE(common_spurious)
++
++/* common_interrupt is a hotpath. Align it */
+ .p2align CONFIG_X86_L1_CACHE_SHIFT
+ common_interrupt:
+ addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
+diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
+index 32e666e1231e..cbd97e22d2f3 100644
+--- a/arch/x86/include/asm/hw_irq.h
++++ b/arch/x86/include/asm/hw_irq.h
+@@ -150,8 +150,11 @@ extern char irq_entries_start[];
+ #define trace_irq_entries_start irq_entries_start
+ #endif
+
++extern char spurious_entries_start[];
++
+ #define VECTOR_UNUSED NULL
+-#define VECTOR_RETRIGGERED ((void *)~0UL)
++#define VECTOR_SHUTDOWN ((void *)~0UL)
++#define VECTOR_RETRIGGERED ((void *)~1UL)
+
+ typedef struct irq_desc* vector_irq_t[NR_VECTORS];
+ DECLARE_PER_CPU(vector_irq_t, vector_irq);
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 85be316665b4..16c21ed97cb2 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -2041,21 +2041,32 @@ __visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
+ entering_irq();
+ trace_spurious_apic_entry(vector);
+
++ inc_irq_stat(irq_spurious_count);
++
++ /*
++ * If this is a spurious interrupt then do not acknowledge
++ */
++ if (vector == SPURIOUS_APIC_VECTOR) {
++ /* See SDM vol 3 */
++ pr_info("Spurious APIC interrupt (vector 0xFF) on CPU#%d, should never happen.\n",
++ smp_processor_id());
++ goto out;
++ }
++
+ /*
+- * Check if this really is a spurious interrupt and ACK it
+- * if it is a vectored one. Just in case...
+- * Spurious interrupts should not be ACKed.
++ * If it is a vectored one, verify it's set in the ISR. If set,
++ * acknowledge it.
+ */
+ v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
+- if (v & (1 << (vector & 0x1f)))
++ if (v & (1 << (vector & 0x1f))) {
++ pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n",
++ vector, smp_processor_id());
+ ack_APIC_irq();
+-
+- inc_irq_stat(irq_spurious_count);
+-
+- /* see sw-dev-man vol 3, chapter 7.4.13.5 */
+- pr_info("spurious APIC interrupt through vector %02x on CPU#%d, "
+- "should never happen.\n", vector, smp_processor_id());
+-
++ } else {
++ pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n",
++ vector, smp_processor_id());
++ }
++out:
+ trace_spurious_apic_exit(vector);
+ exiting_irq();
+ }
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 53aa234a6803..c9fec0657eea 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1893,6 +1893,50 @@ static int ioapic_set_affinity(struct irq_data *irq_data,
+ return ret;
+ }
+
++/*
++ * Interrupt shutdown masks the ioapic pin, but the interrupt might already
++ * be in flight, but not yet serviced by the target CPU. That means
++ * __synchronize_hardirq() would return and claim that everything is calmed
++ * down. So free_irq() would proceed and deactivate the interrupt and free
++ * resources.
++ *
++ * Once the target CPU comes around to service it it will find a cleared
++ * vector and complain. While the spurious interrupt is harmless, the full
++ * release of resources might prevent the interrupt from being acknowledged
++ * which keeps the hardware in a weird state.
++ *
++ * Verify that the corresponding Remote-IRR bits are clear.
++ */
++static int ioapic_irq_get_chip_state(struct irq_data *irqd,
++ enum irqchip_irq_state which,
++ bool *state)
++{
++ struct mp_chip_data *mcd = irqd->chip_data;
++ struct IO_APIC_route_entry rentry;
++ struct irq_pin_list *p;
++
++ if (which != IRQCHIP_STATE_ACTIVE)
++ return -EINVAL;
++
++ *state = false;
++ raw_spin_lock(&ioapic_lock);
++ for_each_irq_pin(p, mcd->irq_2_pin) {
++ rentry = __ioapic_read_entry(p->apic, p->pin);
++ /*
++ * The remote IRR is only valid in level trigger mode. It's
++ * meaning is undefined for edge triggered interrupts and
++ * irrelevant because the IO-APIC treats them as fire and
++ * forget.
++ */
++ if (rentry.irr && rentry.trigger) {
++ *state = true;
++ break;
++ }
++ }
++ raw_spin_unlock(&ioapic_lock);
++ return 0;
++}
++
+ static struct irq_chip ioapic_chip __read_mostly = {
+ .name = "IO-APIC",
+ .irq_startup = startup_ioapic_irq,
+@@ -1902,6 +1946,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
+ .irq_eoi = ioapic_ack_level,
+ .irq_set_affinity = ioapic_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
++ .irq_get_irqchip_state = ioapic_irq_get_chip_state,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+ };
+
+@@ -1914,6 +1959,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
+ .irq_eoi = ioapic_ir_ack_level,
+ .irq_set_affinity = ioapic_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
++ .irq_get_irqchip_state = ioapic_irq_get_chip_state,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+ };
+
+diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
+index e7cb78aed644..fdacb864c3dd 100644
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -340,7 +340,7 @@ static void clear_irq_vector(struct irq_data *irqd)
+ trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
+ apicd->prev_cpu);
+
+- per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED;
++ per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
+ irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
+ apicd->vector = 0;
+
+@@ -349,7 +349,7 @@ static void clear_irq_vector(struct irq_data *irqd)
+ if (!vector)
+ return;
+
+- per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED;
++ per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
+ irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
+ apicd->prev_vector = 0;
+ apicd->move_in_progress = 0;
+diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
+index d2482bbbe3d0..87ef69a72c52 100644
+--- a/arch/x86/kernel/idt.c
++++ b/arch/x86/kernel/idt.c
+@@ -319,7 +319,8 @@ void __init idt_setup_apic_and_irq_gates(void)
+ #ifdef CONFIG_X86_LOCAL_APIC
+ for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
+ set_bit(i, system_vectors);
+- set_intr_gate(i, spurious_interrupt);
++ entry = spurious_entries_start + 8 * (i - FIRST_SYSTEM_VECTOR);
++ set_intr_gate(i, entry);
+ }
+ #endif
+ }
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index 9b68b5b00ac9..cc496eb7a8d2 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -247,7 +247,7 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
+ if (!handle_irq(desc, regs)) {
+ ack_APIC_irq();
+
+- if (desc != VECTOR_RETRIGGERED) {
++ if (desc != VECTOR_RETRIGGERED && desc != VECTOR_SHUTDOWN) {
+ pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
+ __func__, smp_processor_id(),
+ vector);
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index a7359535caf5..b444f89a2041 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -655,7 +655,8 @@ static int cacheinfo_cpu_pre_down(unsigned int cpu)
+
+ static int __init cacheinfo_sysfs_init(void)
+ {
+- return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
++ return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
++ "base/cacheinfo:online",
+ cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
+ }
+ device_initcall(cacheinfo_sysfs_init);
+diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
+index f962488546b6..103b5d37fa86 100644
+--- a/drivers/base/firmware_loader/fallback.c
++++ b/drivers/base/firmware_loader/fallback.c
+@@ -659,7 +659,7 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
+ /* Also permit LSMs and IMA to fail firmware sysfs fallback */
+ ret = security_kernel_load_data(LOADING_FIRMWARE);
+ if (ret < 0)
+- return ret;
++ return false;
+
+ return fw_force_sysfs_fallback(opt_flags);
+ }
+diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
+index 4acbc47973e9..e78ff5c65ed6 100644
+--- a/drivers/crypto/nx/nx-842-powernv.c
++++ b/drivers/crypto/nx/nx-842-powernv.c
+@@ -27,8 +27,6 @@ MODULE_ALIAS_CRYPTO("842-nx");
+ #define WORKMEM_ALIGN (CRB_ALIGN)
+ #define CSB_WAIT_MAX (5000) /* ms */
+ #define VAS_RETRIES (10)
+-/* # of requests allowed per RxFIFO at a time. 0 for unlimited */
+-#define MAX_CREDITS_PER_RXFIFO (1024)
+
+ struct nx842_workmem {
+ /* Below fields must be properly aligned */
+@@ -812,7 +810,11 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
+ rxattr.lnotify_lpid = lpid;
+ rxattr.lnotify_pid = pid;
+ rxattr.lnotify_tid = tid;
+- rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO;
++ /*
++ * Maximum RX window credits can not be more than #CRBs in
++ * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns.
++ */
++ rxattr.wcreds_max = fifo_size / CRB_SIZE;
+
+ /*
+ * Open a VAS receice window which is used to configure RxFIFO
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 427c78d4d948..8c57c5af0930 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -321,6 +321,21 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
+ }
+ EXPORT_SYMBOL(talitos_submit);
+
++static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
++{
++ struct talitos_edesc *edesc;
++
++ if (!is_sec1)
++ return request->desc->hdr;
++
++ if (!request->desc->next_desc)
++ return request->desc->hdr1;
++
++ edesc = container_of(request->desc, struct talitos_edesc, desc);
++
++ return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
++}
++
+ /*
+ * process what was done, notify callback of error if not
+ */
+@@ -342,12 +357,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
+
+ /* descriptors with their done bits set don't get the error */
+ rmb();
+- if (!is_sec1)
+- hdr = request->desc->hdr;
+- else if (request->desc->next_desc)
+- hdr = (request->desc + 1)->hdr1;
+- else
+- hdr = request->desc->hdr1;
++ hdr = get_request_hdr(request, is_sec1);
+
+ if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
+ status = 0;
+@@ -477,8 +487,14 @@ static u32 current_desc_hdr(struct device *dev, int ch)
+ }
+ }
+
+- if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
+- return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
++ if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
++ struct talitos_edesc *edesc;
++
++ edesc = container_of(priv->chan[ch].fifo[iter].desc,
++ struct talitos_edesc, desc);
++ return ((struct talitos_desc *)
++ (edesc->buf + edesc->dma_len))->hdr;
++ }
+
+ return priv->chan[ch].fifo[iter].desc->hdr;
+ }
+@@ -948,36 +964,6 @@ badkey:
+ goto out;
+ }
+
+-/*
+- * talitos_edesc - s/w-extended descriptor
+- * @src_nents: number of segments in input scatterlist
+- * @dst_nents: number of segments in output scatterlist
+- * @icv_ool: whether ICV is out-of-line
+- * @iv_dma: dma address of iv for checking continuity and link table
+- * @dma_len: length of dma mapped link_tbl space
+- * @dma_link_tbl: bus physical address of link_tbl/buf
+- * @desc: h/w descriptor
+- * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
+- * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
+- *
+- * if decrypting (with authcheck), or either one of src_nents or dst_nents
+- * is greater than 1, an integrity check value is concatenated to the end
+- * of link_tbl data
+- */
+-struct talitos_edesc {
+- int src_nents;
+- int dst_nents;
+- bool icv_ool;
+- dma_addr_t iv_dma;
+- int dma_len;
+- dma_addr_t dma_link_tbl;
+- struct talitos_desc desc;
+- union {
+- struct talitos_ptr link_tbl[0];
+- u8 buf[0];
+- };
+-};
+-
+ static void talitos_sg_unmap(struct device *dev,
+ struct talitos_edesc *edesc,
+ struct scatterlist *src,
+@@ -1466,15 +1452,11 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ edesc->dma_len = dma_len;
+- if (dma_len) {
+- void *addr = &edesc->link_tbl[0];
+-
+- if (is_sec1 && !dst)
+- addr += sizeof(struct talitos_desc);
+- edesc->dma_link_tbl = dma_map_single(dev, addr,
++ if (dma_len)
++ edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
+ edesc->dma_len,
+ DMA_BIDIRECTIONAL);
+- }
++
+ return edesc;
+ }
+
+@@ -1759,14 +1741,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+ struct talitos_desc *desc = &edesc->desc;
+- struct talitos_desc *desc2 = desc + 1;
++ struct talitos_desc *desc2 = (struct talitos_desc *)
++ (edesc->buf + edesc->dma_len);
+
+ unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
+ if (desc->next_desc &&
+ desc->ptr[5].ptr != desc2->ptr[5].ptr)
+ unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
+
+- talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
++ if (req_ctx->psrc)
++ talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
+
+ /* When using hashctx-in, must unmap it. */
+ if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
+@@ -1833,7 +1817,6 @@ static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
+
+ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ struct ahash_request *areq, unsigned int length,
+- unsigned int offset,
+ void (*callback) (struct device *dev,
+ struct talitos_desc *desc,
+ void *context, int error))
+@@ -1872,9 +1855,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+
+ sg_count = edesc->src_nents ?: 1;
+ if (is_sec1 && sg_count > 1)
+- sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
+- edesc->buf + sizeof(struct talitos_desc),
+- length, req_ctx->nbuf);
++ sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
+ else if (length)
+ sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
+ DMA_TO_DEVICE);
+@@ -1887,7 +1868,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ DMA_TO_DEVICE);
+ } else {
+ sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
+- &desc->ptr[3], sg_count, offset, 0);
++ &desc->ptr[3], sg_count, 0, 0);
+ if (sg_count > 1)
+ sync_needed = true;
+ }
+@@ -1911,7 +1892,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
+
+ if (is_sec1 && req_ctx->nbuf && length) {
+- struct talitos_desc *desc2 = desc + 1;
++ struct talitos_desc *desc2 = (struct talitos_desc *)
++ (edesc->buf + edesc->dma_len);
+ dma_addr_t next_desc;
+
+ memset(desc2, 0, sizeof(*desc2));
+@@ -1932,7 +1914,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ DMA_TO_DEVICE);
+ copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
+ sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
+- &desc2->ptr[3], sg_count, offset, 0);
++ &desc2->ptr[3], sg_count, 0, 0);
+ if (sg_count > 1)
+ sync_needed = true;
+ copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
+@@ -2043,7 +2025,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+ struct device *dev = ctx->dev;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+- int offset = 0;
+ u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
+
+ if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
+@@ -2083,6 +2064,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+ sg_chain(req_ctx->bufsl, 2, areq->src);
+ req_ctx->psrc = req_ctx->bufsl;
+ } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
++ int offset;
++
+ if (nbytes_to_hash > blocksize)
+ offset = blocksize - req_ctx->nbuf;
+ else
+@@ -2095,7 +2078,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+ sg_copy_to_buffer(areq->src, nents,
+ ctx_buf + req_ctx->nbuf, offset);
+ req_ctx->nbuf += offset;
+- req_ctx->psrc = areq->src;
++ req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
++ offset);
+ } else
+ req_ctx->psrc = areq->src;
+
+@@ -2135,8 +2119,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+ if (ctx->keylen && (req_ctx->first || req_ctx->last))
+ edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
+
+- return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
+- ahash_done);
++ return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
+ }
+
+ static int ahash_update(struct ahash_request *areq)
+diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
+index a65a63e0d6c1..979f6a61e545 100644
+--- a/drivers/crypto/talitos.h
++++ b/drivers/crypto/talitos.h
+@@ -65,6 +65,36 @@ struct talitos_desc {
+
+ #define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32))
+
++/*
++ * talitos_edesc - s/w-extended descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @icv_ool: whether ICV is out-of-line
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @dma_len: length of dma mapped link_tbl space
++ * @dma_link_tbl: bus physical address of link_tbl/buf
++ * @desc: h/w descriptor
++ * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
++ * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
++ *
++ * if decrypting (with authcheck), or either one of src_nents or dst_nents
++ * is greater than 1, an integrity check value is concatenated to the end
++ * of link_tbl data
++ */
++struct talitos_edesc {
++ int src_nents;
++ int dst_nents;
++ bool icv_ool;
++ dma_addr_t iv_dma;
++ int dma_len;
++ dma_addr_t dma_link_tbl;
++ struct talitos_desc desc;
++ union {
++ struct talitos_ptr link_tbl[0];
++ u8 buf[0];
++ };
++};
++
+ /**
+ * talitos_request - descriptor submission request
+ * @desc: descriptor pointer (kernel virtual)
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index b8ec301025b7..1080c0c49815 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -173,6 +173,7 @@ static const char * const smbus_pnp_ids[] = {
+ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
+ "LEN0073", /* X1 Carbon G5 (Elantech) */
+ "LEN0092", /* X1 Carbon 6 */
++ "LEN0093", /* T480 */
+ "LEN0096", /* X280 */
+ "LEN0097", /* X280 -> ALPS trackpoint */
+ "LEN200f", /* T450s */
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 0e09bede42a2..b081a1ef6859 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -4208,7 +4208,7 @@ void e1000e_up(struct e1000_adapter *adapter)
+ e1000_configure_msix(adapter);
+ e1000_irq_enable(adapter);
+
+- netif_start_queue(adapter->netdev);
++ /* Tx queue started by watchdog timer when link is up */
+
+ e1000e_trigger_lsc(adapter);
+ }
+@@ -4606,6 +4606,7 @@ int e1000e_open(struct net_device *netdev)
+ pm_runtime_get_sync(&pdev->dev);
+
+ netif_carrier_off(netdev);
++ netif_stop_queue(netdev);
+
+ /* allocate transmit descriptors */
+ err = e1000e_setup_tx_resources(adapter->tx_ring);
+@@ -4666,7 +4667,6 @@ int e1000e_open(struct net_device *netdev)
+ e1000_irq_enable(adapter);
+
+ adapter->tx_hang_recheck = false;
+- netif_start_queue(netdev);
+
+ hw->mac.get_link_status = true;
+ pm_runtime_put(&pdev->dev);
+@@ -5288,6 +5288,7 @@ static void e1000_watchdog_task(struct work_struct *work)
+ if (phy->ops.cfg_on_link_up)
+ phy->ops.cfg_on_link_up(hw);
+
++ netif_wake_queue(netdev);
+ netif_carrier_on(netdev);
+
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+@@ -5301,6 +5302,7 @@ static void e1000_watchdog_task(struct work_struct *work)
+ /* Link status message must follow this format */
+ pr_info("%s NIC Link is Down\n", adapter->netdev->name);
+ netif_carrier_off(netdev);
++ netif_stop_queue(netdev);
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->phy_info_timer,
+ round_jiffies(jiffies + 2 * HZ));
+@@ -5308,13 +5310,8 @@ static void e1000_watchdog_task(struct work_struct *work)
+ /* 8000ES2LAN requires a Rx packet buffer work-around
+ * on link down event; reset the controller to flush
+ * the Rx packet buffer.
+- *
+- * If the link is lost the controller stops DMA, but
+- * if there is queued Tx work it cannot be done. So
+- * reset the controller to flush the Tx packet buffers.
+ */
+- if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
+- e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
++ if (adapter->flags & FLAG_RX_NEEDS_RESTART)
+ adapter->flags |= FLAG_RESTART_NOW;
+ else
+ pm_schedule_suspend(netdev->dev.parent,
+@@ -5337,6 +5334,14 @@ link_up:
+ adapter->gotc_old = adapter->stats.gotc;
+ spin_unlock(&adapter->stats64_lock);
+
++ /* If the link is lost the controller stops DMA, but
++ * if there is queued Tx work it cannot be done. So
++ * reset the controller to flush the Tx packet buffers.
++ */
++ if (!netif_carrier_ok(netdev) &&
++ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
++ adapter->flags |= FLAG_RESTART_NOW;
++
+ /* If reset is necessary, do it outside of interrupt context. */
+ if (adapter->flags & FLAG_RESTART_NOW) {
+ schedule_work(&adapter->reset_task);
+diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
+index 6c90aa725f23..e71992a3c55f 100644
+--- a/drivers/s390/char/sclp_early.c
++++ b/drivers/s390/char/sclp_early.c
+@@ -41,7 +41,6 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
+ sclp.has_hvs = !!(sccb->fac119 & 0x80);
+ sclp.has_kss = !!(sccb->fac98 & 0x01);
+ sclp.has_sipl = !!(sccb->cbl & 0x02);
+- sclp.has_sipl_g2 = !!(sccb->cbl & 0x04);
+ if (sccb->fac85 & 0x02)
+ S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
+ if (sccb->fac91 & 0x40)
+diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
+index 99d7d2566a3a..d4101cecdc8d 100644
+--- a/drivers/s390/cio/qdio_setup.c
++++ b/drivers/s390/cio/qdio_setup.c
+@@ -150,6 +150,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
+ return -ENOMEM;
+ }
+ irq_ptr_qs[i] = q;
++ INIT_LIST_HEAD(&q->entry);
+ }
+ return 0;
+ }
+@@ -178,6 +179,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
+ q->mask = 1 << (31 - i);
+ q->nr = i;
+ q->handler = handler;
++ INIT_LIST_HEAD(&q->entry);
+ }
+
+ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
+diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
+index 28d59ac2204c..d9763bbecbf9 100644
+--- a/drivers/s390/cio/qdio_thinint.c
++++ b/drivers/s390/cio/qdio_thinint.c
+@@ -79,7 +79,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
+ mutex_lock(&tiq_list_lock);
+ list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
+ mutex_unlock(&tiq_list_lock);
+- xchg(irq_ptr->dsci, 1 << 7);
+ }
+
+ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
+@@ -87,14 +86,14 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
+ struct qdio_q *q;
+
+ q = irq_ptr->input_qs[0];
+- /* if establish triggered an error */
+- if (!q || !q->entry.prev || !q->entry.next)
++ if (!q)
+ return;
+
+ mutex_lock(&tiq_list_lock);
+ list_del_rcu(&q->entry);
+ mutex_unlock(&tiq_list_lock);
+ synchronize_rcu();
++ INIT_LIST_HEAD(&q->entry);
+ }
+
+ static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 5c6062206760..52ec0d9fa1f7 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -176,6 +176,7 @@ enum cpuhp_state {
+ CPUHP_AP_WATCHDOG_ONLINE,
+ CPUHP_AP_WORKQUEUE_ONLINE,
+ CPUHP_AP_RCUTREE_ONLINE,
++ CPUHP_AP_BASE_CACHEINFO_ONLINE,
+ CPUHP_AP_ONLINE_DYN,
+ CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
+ CPUHP_AP_X86_HPET_ONLINE,
+diff --git a/include/uapi/linux/nilfs2_ondisk.h b/include/uapi/linux/nilfs2_ondisk.h
+index a7e66ab11d1d..c23f91ae5fe8 100644
+--- a/include/uapi/linux/nilfs2_ondisk.h
++++ b/include/uapi/linux/nilfs2_ondisk.h
+@@ -29,7 +29,7 @@
+
+ #include <linux/types.h>
+ #include <linux/magic.h>
+-
++#include <asm/byteorder.h>
+
+ #define NILFS_INODE_BMAP_SIZE 7
+
+@@ -533,19 +533,19 @@ enum {
+ static inline void \
+ nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \
+ { \
+- cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \
+- (1UL << NILFS_CHECKPOINT_##flag)); \
++ cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) | \
++ (1UL << NILFS_CHECKPOINT_##flag)); \
+ } \
+ static inline void \
+ nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \
+ { \
+- cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \
++ cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) & \
+ ~(1UL << NILFS_CHECKPOINT_##flag)); \
+ } \
+ static inline int \
+ nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \
+ { \
+- return !!(le32_to_cpu(cp->cp_flags) & \
++ return !!(__le32_to_cpu(cp->cp_flags) & \
+ (1UL << NILFS_CHECKPOINT_##flag)); \
+ }
+
+@@ -595,20 +595,20 @@ enum {
+ static inline void \
+ nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \
+ { \
+- su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \
++ su->su_flags = __cpu_to_le32(__le32_to_cpu(su->su_flags) | \
+ (1UL << NILFS_SEGMENT_USAGE_##flag));\
+ } \
+ static inline void \
+ nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \
+ { \
+ su->su_flags = \
+- cpu_to_le32(le32_to_cpu(su->su_flags) & \
++ __cpu_to_le32(__le32_to_cpu(su->su_flags) & \
+ ~(1UL << NILFS_SEGMENT_USAGE_##flag)); \
+ } \
+ static inline int \
+ nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \
+ { \
+- return !!(le32_to_cpu(su->su_flags) & \
++ return !!(__le32_to_cpu(su->su_flags) & \
+ (1UL << NILFS_SEGMENT_USAGE_##flag)); \
+ }
+
+@@ -619,15 +619,15 @@ NILFS_SEGMENT_USAGE_FNS(ERROR, error)
+ static inline void
+ nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su)
+ {
+- su->su_lastmod = cpu_to_le64(0);
+- su->su_nblocks = cpu_to_le32(0);
+- su->su_flags = cpu_to_le32(0);
++ su->su_lastmod = __cpu_to_le64(0);
++ su->su_nblocks = __cpu_to_le32(0);
++ su->su_flags = __cpu_to_le32(0);
+ }
+
+ static inline int
+ nilfs_segment_usage_clean(const struct nilfs_segment_usage *su)
+ {
+- return !le32_to_cpu(su->su_flags);
++ return !__le32_to_cpu(su->su_flags);
+ }
+
+ /**
+diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
+index 16cbf6beb276..ae60cae24e9a 100644
+--- a/kernel/irq/autoprobe.c
++++ b/kernel/irq/autoprobe.c
+@@ -90,7 +90,7 @@ unsigned long probe_irq_on(void)
+ /* It triggered already - consider it spurious. */
+ if (!(desc->istate & IRQS_WAITING)) {
+ desc->istate &= ~IRQS_AUTODETECT;
+- irq_shutdown(desc);
++ irq_shutdown_and_deactivate(desc);
+ } else
+ if (i < 32)
+ mask |= 1 << i;
+@@ -127,7 +127,7 @@ unsigned int probe_irq_mask(unsigned long val)
+ mask |= 1 << i;
+
+ desc->istate &= ~IRQS_AUTODETECT;
+- irq_shutdown(desc);
++ irq_shutdown_and_deactivate(desc);
+ }
+ raw_spin_unlock_irq(&desc->lock);
+ }
+@@ -169,7 +169,7 @@ int probe_irq_off(unsigned long val)
+ nr_of_irqs++;
+ }
+ desc->istate &= ~IRQS_AUTODETECT;
+- irq_shutdown(desc);
++ irq_shutdown_and_deactivate(desc);
+ }
+ raw_spin_unlock_irq(&desc->lock);
+ }
+diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
+index 29d6c7d070b4..3ff4a1260885 100644
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -314,6 +314,12 @@ void irq_shutdown(struct irq_desc *desc)
+ }
+ irq_state_clr_started(desc);
+ }
++}
++
++
++void irq_shutdown_and_deactivate(struct irq_desc *desc)
++{
++ irq_shutdown(desc);
+ /*
+ * This must be called even if the interrupt was never started up,
+ * because the activation can happen before the interrupt is
+diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
+index 5b1072e394b2..6c7ca2e983a5 100644
+--- a/kernel/irq/cpuhotplug.c
++++ b/kernel/irq/cpuhotplug.c
+@@ -116,7 +116,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
+ */
+ if (irqd_affinity_is_managed(d)) {
+ irqd_set_managed_shutdown(d);
+- irq_shutdown(desc);
++ irq_shutdown_and_deactivate(desc);
+ return false;
+ }
+ affinity = cpu_online_mask;
+diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
+index 70c3053bc1f6..3a948f41ab00 100644
+--- a/kernel/irq/internals.h
++++ b/kernel/irq/internals.h
+@@ -82,6 +82,7 @@ extern int irq_activate_and_startup(struct irq_desc *desc, bool resend);
+ extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
+
+ extern void irq_shutdown(struct irq_desc *desc);
++extern void irq_shutdown_and_deactivate(struct irq_desc *desc);
+ extern void irq_enable(struct irq_desc *desc);
+ extern void irq_disable(struct irq_desc *desc);
+ extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
+@@ -96,6 +97,10 @@ static inline void irq_mark_irq(unsigned int irq) { }
+ extern void irq_mark_irq(unsigned int irq);
+ #endif
+
++extern int __irq_get_irqchip_state(struct irq_data *data,
++ enum irqchip_irq_state which,
++ bool *state);
++
+ extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
+
+ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 78f3ddeb7fe4..e8f7f179bf77 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -13,6 +13,7 @@
+ #include <linux/module.h>
+ #include <linux/random.h>
+ #include <linux/interrupt.h>
++#include <linux/irqdomain.h>
+ #include <linux/slab.h>
+ #include <linux/sched.h>
+ #include <linux/sched/rt.h>
+@@ -34,8 +35,9 @@ static int __init setup_forced_irqthreads(char *arg)
+ early_param("threadirqs", setup_forced_irqthreads);
+ #endif
+
+-static void __synchronize_hardirq(struct irq_desc *desc)
++static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
+ {
++ struct irq_data *irqd = irq_desc_get_irq_data(desc);
+ bool inprogress;
+
+ do {
+@@ -51,6 +53,20 @@ static void __synchronize_hardirq(struct irq_desc *desc)
+ /* Ok, that indicated we're done: double-check carefully. */
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ inprogress = irqd_irq_inprogress(&desc->irq_data);
++
++ /*
++ * If requested and supported, check at the chip whether it
++ * is in flight at the hardware level, i.e. already pending
++ * in a CPU and waiting for service and acknowledge.
++ */
++ if (!inprogress && sync_chip) {
++ /*
++ * Ignore the return code. inprogress is only updated
++ * when the chip supports it.
++ */
++ __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
++ &inprogress);
++ }
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ /* Oops, that failed? */
+@@ -73,13 +89,18 @@ static void __synchronize_hardirq(struct irq_desc *desc)
+ * Returns: false if a threaded handler is active.
+ *
+ * This function may be called - with care - from IRQ context.
++ *
++ * It does not check whether there is an interrupt in flight at the
++ * hardware level, but not serviced yet, as this might deadlock when
++ * called with interrupts disabled and the target CPU of the interrupt
++ * is the current CPU.
+ */
+ bool synchronize_hardirq(unsigned int irq)
+ {
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc) {
+- __synchronize_hardirq(desc);
++ __synchronize_hardirq(desc, false);
+ return !atomic_read(&desc->threads_active);
+ }
+
+@@ -95,14 +116,19 @@ EXPORT_SYMBOL(synchronize_hardirq);
+ * to complete before returning. If you use this function while
+ * holding a resource the IRQ handler may need you will deadlock.
+ *
+- * This function may be called - with care - from IRQ context.
++ * Can only be called from preemptible code as it might sleep when
++ * an interrupt thread is associated to @irq.
++ *
++ * It optionally makes sure (when the irq chip supports that method)
++ * that the interrupt is not pending in any CPU and waiting for
++ * service.
+ */
+ void synchronize_irq(unsigned int irq)
+ {
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc) {
+- __synchronize_hardirq(desc);
++ __synchronize_hardirq(desc, true);
+ /*
+ * We made sure that no hardirq handler is
+ * running. Now verify that no threaded handlers are
+@@ -1699,6 +1725,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
+ /* If this was the last handler, shut down the IRQ line: */
+ if (!desc->action) {
+ irq_settings_clr_disable_unlazy(desc);
++ /* Only shutdown. Deactivate after synchronize_hardirq() */
+ irq_shutdown(desc);
+ }
+
+@@ -1727,8 +1754,12 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
+
+ unregister_handler_proc(irq, action);
+
+- /* Make sure it's not being used on another CPU: */
+- synchronize_hardirq(irq);
++ /*
++ * Make sure it's not being used on another CPU and if the chip
++ * supports it also make sure that there is no (not yet serviced)
++ * interrupt in flight at the hardware level.
++ */
++ __synchronize_hardirq(desc, true);
+
+ #ifdef CONFIG_DEBUG_SHIRQ
+ /*
+@@ -1768,6 +1799,14 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
+ * require it to deallocate resources over the slow bus.
+ */
+ chip_bus_lock(desc);
++ /*
++ * There is no interrupt on the fly anymore. Deactivate it
++ * completely.
++ */
++ raw_spin_lock_irqsave(&desc->lock, flags);
++ irq_domain_deactivate_irq(&desc->irq_data);
++ raw_spin_unlock_irqrestore(&desc->lock, flags);
++
+ irq_release_resources(desc);
+ chip_bus_sync_unlock(desc);
+ irq_remove_timings(desc);
+@@ -1855,7 +1894,7 @@ static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
+ }
+
+ irq_settings_clr_disable_unlazy(desc);
+- irq_shutdown(desc);
++ irq_shutdown_and_deactivate(desc);
+
+ irq_release_resources(desc);
+
+@@ -2578,6 +2617,28 @@ out:
+ irq_put_desc_unlock(desc, flags);
+ }
+
++int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
++ bool *state)
++{
++ struct irq_chip *chip;
++ int err = -EINVAL;
++
++ do {
++ chip = irq_data_get_irq_chip(data);
++ if (chip->irq_get_irqchip_state)
++ break;
++#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
++ data = data->parent_data;
++#else
++ data = NULL;
++#endif
++ } while (data);
++
++ if (data)
++ err = chip->irq_get_irqchip_state(data, which, state);
++ return err;
++}
++
+ /**
+ * irq_get_irqchip_state - returns the irqchip state of a interrupt.
+ * @irq: Interrupt line that is forwarded to a VM
+@@ -2596,7 +2657,6 @@ int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
+ {
+ struct irq_desc *desc;
+ struct irq_data *data;
+- struct irq_chip *chip;
+ unsigned long flags;
+ int err = -EINVAL;
+
+@@ -2606,19 +2666,7 @@ int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
+
+ data = irq_desc_get_irq_data(desc);
+
+- do {
+- chip = irq_data_get_irq_chip(data);
+- if (chip->irq_get_irqchip_state)
+- break;
+-#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+- data = data->parent_data;
+-#else
+- data = NULL;
+-#endif
+- } while (data);
+-
+- if (data)
+- err = chip->irq_get_irqchip_state(data, which, state);
++ err = __irq_get_irqchip_state(data, which, state);
+
+ irq_put_desc_busunlock(desc, flags);
+ return err;
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [gentoo-commits] proj/linux-patches:5.2 commit in: /
@ 2019-07-14 15:52 Mike Pagano
0 siblings, 0 replies; 24+ messages in thread
From: Mike Pagano @ 2019-07-14 15:52 UTC (permalink / raw
To: gentoo-commits
commit: 6091199db63b6a242df8c64d9354179c68bdf442
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jul 14 15:51:59 2019 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jul 14 15:51:59 2019 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6091199d
Linux patch 5.2.1
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1000_linux-5.2.1.patch | 3923 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 3927 insertions(+)
diff --git a/0000_README b/0000_README
index f86fe5e..3d37d29 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
Individual Patch Descriptions:
--------------------------------------------------------------------------
+Patch: 1000_linux-5.2.1.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.1
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1000_linux-5.2.1.patch b/1000_linux-5.2.1.patch
new file mode 100644
index 0000000..03bdab7
--- /dev/null
+++ b/1000_linux-5.2.1.patch
@@ -0,0 +1,3923 @@
+diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
+index ffc064c1ec68..49311f3da6f2 100644
+--- a/Documentation/admin-guide/hw-vuln/index.rst
++++ b/Documentation/admin-guide/hw-vuln/index.rst
+@@ -9,5 +9,6 @@ are configurable at compile, boot or run time.
+ .. toctree::
+ :maxdepth: 1
+
++ spectre
+ l1tf
+ mds
+diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
+new file mode 100644
+index 000000000000..25f3b2532198
+--- /dev/null
++++ b/Documentation/admin-guide/hw-vuln/spectre.rst
+@@ -0,0 +1,697 @@
++.. SPDX-License-Identifier: GPL-2.0
++
++Spectre Side Channels
++=====================
++
++Spectre is a class of side channel attacks that exploit branch prediction
++and speculative execution on modern CPUs to read memory, possibly
++bypassing access controls. Speculative execution side channel exploits
++do not modify memory but attempt to infer privileged data in the memory.
++
++This document covers Spectre variant 1 and Spectre variant 2.
++
++Affected processors
++-------------------
++
++Speculative execution side channel methods affect a wide range of modern
++high performance processors, since most modern high speed processors
++use branch prediction and speculative execution.
++
++The following CPUs are vulnerable:
++
++ - Intel Core, Atom, Pentium, and Xeon processors
++
++ - AMD Phenom, EPYC, and Zen processors
++
++ - IBM POWER and zSeries processors
++
++ - Higher end ARM processors
++
++ - Apple CPUs
++
++ - Higher end MIPS CPUs
++
++ - Likely most other high performance CPUs. Contact your CPU vendor for details.
++
++Whether a processor is affected or not can be read out from the Spectre
++vulnerability files in sysfs. See :ref:`spectre_sys_info`.
++
++Related CVEs
++------------
++
++The following CVE entries describe Spectre variants:
++
++ ============= ======================= =================
++ CVE-2017-5753 Bounds check bypass Spectre variant 1
++ CVE-2017-5715 Branch target injection Spectre variant 2
++ ============= ======================= =================
++
++Problem
++-------
++
++CPUs use speculative operations to improve performance. That may leave
++traces of memory accesses or computations in the processor's caches,
++buffers, and branch predictors. Malicious software may be able to
++influence the speculative execution paths, and then use the side effects
++of the speculative execution in the CPUs' caches and buffers to infer
++privileged data touched during the speculative execution.
++
++Spectre variant 1 attacks take advantage of speculative execution of
++conditional branches, while Spectre variant 2 attacks use speculative
++execution of indirect branches to leak privileged memory.
++See :ref:`[1] <spec_ref1>` :ref:`[5] <spec_ref5>` :ref:`[7] <spec_ref7>`
++:ref:`[10] <spec_ref10>` :ref:`[11] <spec_ref11>`.
++
++Spectre variant 1 (Bounds Check Bypass)
++---------------------------------------
++
++The bounds check bypass attack :ref:`[2] <spec_ref2>` takes advantage
++of speculative execution that bypasses conditional branch instructions
++used for memory access bounds check (e.g. checking if the index of an
++array results in memory access within a valid range). This results in
++memory accesses to invalid memory (with out-of-bound index) that are
++done speculatively before validation checks resolve. Such speculative
++memory accesses can leave side effects, creating side channels which
++leak information to the attacker.
++
++There are some extensions of Spectre variant 1 attacks for reading data
++over the network, see :ref:`[12] <spec_ref12>`. However such attacks
++are difficult, low bandwidth, fragile, and are considered low risk.
++
++Spectre variant 2 (Branch Target Injection)
++-------------------------------------------
++
++The branch target injection attack takes advantage of speculative
++execution of indirect branches :ref:`[3] <spec_ref3>`. The indirect
++branch predictors inside the processor used to guess the target of
++indirect branches can be influenced by an attacker, causing gadget code
++to be speculatively executed, thus exposing sensitive data touched by
++the victim. The side effects left in the CPU's caches during speculative
++execution can be measured to infer data values.
++
++.. _poison_btb:
++
++In Spectre variant 2 attacks, the attacker can steer speculative indirect
++branches in the victim to gadget code by poisoning the branch target
++buffer of a CPU used for predicting indirect branch addresses. Such
++poisoning could be done by indirect branching into existing code,
++with the address offset of the indirect branch under the attacker's
++control. Since the branch prediction on impacted hardware does not
++fully disambiguate branch address and uses the offset for prediction,
++this could cause privileged code's indirect branch to jump to a gadget
++code with the same offset.
++
++The most useful gadgets take an attacker-controlled input parameter (such
++as a register value) so that the memory read can be controlled. Gadgets
++without input parameters might be possible, but the attacker would have
++very little control over what memory can be read, reducing the risk of
++the attack revealing useful data.
++
++One other variant 2 attack vector is for the attacker to poison the
++return stack buffer (RSB) :ref:`[13] <spec_ref13>` to cause speculative
++subroutine return instruction execution to go to a gadget. An attacker's
++imbalanced subroutine call instructions might "poison" entries in the
++return stack buffer which are later consumed by a victim's subroutine
++return instructions. This attack can be mitigated by flushing the return
++stack buffer on context switch, or virtual machine (VM) exit.
++
++On systems with simultaneous multi-threading (SMT), attacks are possible
++from the sibling thread, as level 1 cache and branch target buffer
++(BTB) may be shared between hardware threads in a CPU core. A malicious
++program running on the sibling thread may influence its peer's BTB to
++steer its indirect branch speculations to gadget code, and measure the
++speculative execution's side effects left in level 1 cache to infer the
++victim's data.
++
++Attack scenarios
++----------------
++
++The following list of attack scenarios have been anticipated, but may
++not cover all possible attack vectors.
++
++1. A user process attacking the kernel
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ The attacker passes a parameter to the kernel via a register or
++ via a known address in memory during a syscall. Such parameter may
++ be used later by the kernel as an index to an array or to derive
++ a pointer for a Spectre variant 1 attack. The index or pointer
++ is invalid, but bound checks are bypassed in the code branch taken
++ for speculative execution. This could cause privileged memory to be
++ accessed and leaked.
++
++ For kernel code that has been identified where data pointers could
++ potentially be influenced for Spectre attacks, new "nospec" accessor
++ macros are used to prevent speculative loading of data.
++
++ Spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
++ target buffer (BTB) before issuing syscall to launch an attack.
++ After entering the kernel, the kernel could use the poisoned branch
++ target buffer on indirect jump and jump to gadget code in speculative
++ execution.
++
++ If an attacker tries to control the memory addresses leaked during
++ speculative execution, he would also need to pass a parameter to the
++ gadget, either through a register or a known address in memory. After
++ the gadget has executed, he can measure the side effect.
++
++ The kernel can protect itself against consuming poisoned branch
++ target buffer entries by using return trampolines (also known as
++ "retpoline") :ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` for all
++ indirect branches. Return trampolines trap speculative execution paths
++ to prevent jumping to gadget code during speculative execution.
++ x86 CPUs with Enhanced Indirect Branch Restricted Speculation
++ (Enhanced IBRS) available in hardware should use the feature to
++ mitigate Spectre variant 2 instead of retpoline. Enhanced IBRS is
++ more efficient than retpoline.
++
++ There may be gadget code in firmware which could be exploited with
++ Spectre variant 2 attack by a rogue user process. To mitigate such
++ attacks on x86, Indirect Branch Restricted Speculation (IBRS) feature
++ is turned on before the kernel invokes any firmware code.
++
++2. A user process attacking another user process
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ A malicious user process can try to attack another user process,
++ either via a context switch on the same hardware thread, or from the
++ sibling hyperthread sharing a physical processor core on simultaneous
++ multi-threading (SMT) system.
++
++ Spectre variant 1 attacks generally require passing parameters
++ between the processes, which needs a data passing relationship, such
++ as remote procedure calls (RPC). Those parameters are used in gadget
++ code to derive invalid data pointers accessing privileged memory in
++ the attacked process.
++
++ Spectre variant 2 attacks can be launched from a rogue process by
++ :ref:`poisoning <poison_btb>` the branch target buffer. This can
++ influence the indirect branch targets for a victim process that either
++ runs later on the same hardware thread, or running concurrently on
++ a sibling hardware thread sharing the same physical core.
++
++ A user process can protect itself against Spectre variant 2 attacks
++ by using the prctl() syscall to disable indirect branch speculation
++ for itself. An administrator can also cordon off an unsafe process
++ from polluting the branch target buffer by disabling the process's
++ indirect branch speculation. This comes with a performance cost
++ from not using indirect branch speculation and clearing the branch
++ target buffer. When SMT is enabled on x86, for a process that has
++ indirect branch speculation disabled, Single Threaded Indirect Branch
++ Predictors (STIBP) :ref:`[4] <spec_ref4>` are turned on to prevent the
++ sibling thread from controlling branch target buffer. In addition,
++ the Indirect Branch Prediction Barrier (IBPB) is issued to clear the
++ branch target buffer when context switching to and from such process.
++
++ On x86, the return stack buffer is stuffed on context switch.
++ This prevents the branch target buffer from being used for branch
++ prediction when the return stack buffer underflows while switching to
++ a deeper call stack. Any poisoned entries in the return stack buffer
++ left by the previous process will also be cleared.
++
++ User programs should use address space randomization to make attacks
++ more difficult (Set /proc/sys/kernel/randomize_va_space = 1 or 2).
++
++3. A virtualized guest attacking the host
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ The attack mechanism is similar to how user processes attack the
++ kernel. The kernel is entered via hyper-calls or other virtualization
++ exit paths.
++
++ For Spectre variant 1 attacks, rogue guests can pass parameters
++ (e.g. in registers) via hyper-calls to derive invalid pointers to
++ speculate into privileged memory after entering the kernel. For places
++ where such kernel code has been identified, nospec accessor macros
++ are used to stop speculative memory access.
++
++ For Spectre variant 2 attacks, rogue guests can :ref:`poison
++ <poison_btb>` the branch target buffer or return stack buffer, causing
++ the kernel to jump to gadget code in the speculative execution paths.
++
++ To mitigate variant 2, the host kernel can use return trampolines
++ for indirect branches to bypass the poisoned branch target buffer,
++ and flushing the return stack buffer on VM exit. This prevents rogue
++ guests from affecting indirect branching in the host kernel.
++
++ To protect host processes from rogue guests, host processes can have
++ indirect branch speculation disabled via prctl(). The branch target
++ buffer is cleared before context switching to such processes.
++
++4. A virtualized guest attacking other guest
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ A rogue guest may attack another guest to get data accessible by the
++ other guest.
++
++ Spectre variant 1 attacks are possible if parameters can be passed
++ between guests. This may be done via mechanisms such as shared memory
++ or message passing. Such parameters could be used to derive data
++ pointers to privileged data in guest. The privileged data could be
++ accessed by gadget code in the victim's speculation paths.
++
++ Spectre variant 2 attacks can be launched from a rogue guest by
++ :ref:`poisoning <poison_btb>` the branch target buffer or the return
++ stack buffer. Such poisoned entries could be used to influence
++ speculation execution paths in the victim guest.
++
++ Linux kernel mitigates attacks to other guests running in the same
++ CPU hardware thread by flushing the return stack buffer on VM exit,
++ and clearing the branch target buffer before switching to a new guest.
++
++ If SMT is used, Spectre variant 2 attacks from an untrusted guest
++ in the sibling hyperthread can be mitigated by the administrator,
++ by turning off the unsafe guest's indirect branch speculation via
++ prctl(). A guest can also protect itself by turning on microcode
++ based mitigations (such as IBPB or STIBP on x86) within the guest.
++
++.. _spectre_sys_info:
++
++Spectre system information
++--------------------------
++
++The Linux kernel provides a sysfs interface to enumerate the current
++mitigation status of the system for Spectre: whether the system is
++vulnerable, and which mitigations are active.
++
++The sysfs file showing Spectre variant 1 mitigation status is:
++
++ /sys/devices/system/cpu/vulnerabilities/spectre_v1
++
++The possible values in this file are:
++
++ ======================================= =================================
++ 'Mitigation: __user pointer sanitation' Protection in kernel on a case by
++ case base with explicit pointer
++ sanitation.
++ ======================================= =================================
++
++However, the protections are put in place on a case by case basis,
++and there is no guarantee that all possible attack vectors for Spectre
++variant 1 are covered.
++
++The spectre_v2 kernel file reports if the kernel has been compiled with
++retpoline mitigation or if the CPU has hardware mitigation, and if the
++CPU has support for additional process-specific mitigation.
++
++This file also reports CPU features enabled by microcode to mitigate
++attack between user processes:
++
++1. Indirect Branch Prediction Barrier (IBPB) to add additional
++ isolation between processes of different users.
++2. Single Thread Indirect Branch Predictors (STIBP) to add additional
++ isolation between CPU threads running on the same core.
++
++These CPU features may impact performance when used and can be enabled
++per process on a case-by-case base.
++
++The sysfs file showing Spectre variant 2 mitigation status is:
++
++ /sys/devices/system/cpu/vulnerabilities/spectre_v2
++
++The possible values in this file are:
++
++ - Kernel status:
++
++ ==================================== =================================
++ 'Not affected' The processor is not vulnerable
++ 'Vulnerable' Vulnerable, no mitigation
++ 'Mitigation: Full generic retpoline' Software-focused mitigation
++ 'Mitigation: Full AMD retpoline' AMD-specific software mitigation
++ 'Mitigation: Enhanced IBRS' Hardware-focused mitigation
++ ==================================== =================================
++
++ - Firmware status: Show if Indirect Branch Restricted Speculation (IBRS) is
++ used to protect against Spectre variant 2 attacks when calling firmware (x86 only).
++
++ ========== =============================================================
++ 'IBRS_FW' Protection against user program attacks when calling firmware
++ ========== =============================================================
++
++ - Indirect branch prediction barrier (IBPB) status for protection between
++ processes of different users. This feature can be controlled through
++ prctl() per process, or through kernel command line options. This is
++ an x86 only feature. For more details see below.
++
++ =================== ========================================================
++ 'IBPB: disabled' IBPB unused
++ 'IBPB: always-on' Use IBPB on all tasks
++ 'IBPB: conditional' Use IBPB on SECCOMP or indirect branch restricted tasks
++ =================== ========================================================
++
++ - Single threaded indirect branch prediction (STIBP) status for protection
++ between different hyper threads. This feature can be controlled through
++ prctl per process, or through kernel command line options. This is x86
++ only feature. For more details see below.
++
++ ==================== ========================================================
++ 'STIBP: disabled' STIBP unused
++ 'STIBP: forced' Use STIBP on all tasks
++ 'STIBP: conditional' Use STIBP on SECCOMP or indirect branch restricted tasks
++ ==================== ========================================================
++
++ - Return stack buffer (RSB) protection status:
++
++ ============= ===========================================
++ 'RSB filling' Protection of RSB on context switch enabled
++ ============= ===========================================
++
++Full mitigation might require a microcode update from the CPU
++vendor. When the necessary microcode is not available, the kernel will
++report vulnerability.
++
++Turning on mitigation for Spectre variant 1 and Spectre variant 2
++-----------------------------------------------------------------
++
++1. Kernel mitigation
++^^^^^^^^^^^^^^^^^^^^
++
++ For the Spectre variant 1, vulnerable kernel code (as determined
++ by code audit or scanning tools) is annotated on a case by case
++ basis to use nospec accessor macros for bounds clipping :ref:`[2]
++ <spec_ref2>` to avoid any usable disclosure gadgets. However, it may
++ not cover all attack vectors for Spectre variant 1.
++
++ For Spectre variant 2 mitigation, the compiler turns indirect calls or
++ jumps in the kernel into equivalent return trampolines (retpolines)
++ :ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` to go to the target
++ addresses. Speculative execution paths under retpolines are trapped
++ in an infinite loop to prevent any speculative execution jumping to
++ a gadget.
++
++ To turn on retpoline mitigation on a vulnerable CPU, the kernel
++ needs to be compiled with a gcc compiler that supports the
++ -mindirect-branch=thunk-extern -mindirect-branch-register options.
++ If the kernel is compiled with a Clang compiler, the compiler needs
++ to support -mretpoline-external-thunk option. The kernel config
++ CONFIG_RETPOLINE needs to be turned on, and the CPU needs to run with
++ the latest updated microcode.
++
++ On Intel Skylake-era systems the mitigation covers most, but not all,
++ cases. See :ref:`[3] <spec_ref3>` for more details.
++
++ On CPUs with hardware mitigation for Spectre variant 2 (e.g. Enhanced
++ IBRS on x86), retpoline is automatically disabled at run time.
++
++ The retpoline mitigation is turned on by default on vulnerable
++ CPUs. It can be forced on or off by the administrator
++ via the kernel command line and sysfs control files. See
++ :ref:`spectre_mitigation_control_command_line`.
++
++ On x86, indirect branch restricted speculation is turned on by default
++ before invoking any firmware code to prevent Spectre variant 2 exploits
++ using the firmware.
++
++ Using kernel address space randomization (CONFIG_RANDOMIZE_SLAB=y
++ and CONFIG_SLAB_FREELIST_RANDOM=y in the kernel configuration) makes
++ attacks on the kernel generally more difficult.
++
++2. User program mitigation
++^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ User programs can mitigate Spectre variant 1 using LFENCE or "bounds
++ clipping". For more details see :ref:`[2] <spec_ref2>`.
++
++ For Spectre variant 2 mitigation, individual user programs
++ can be compiled with return trampolines for indirect branches.
++ This protects them from consuming poisoned entries in the branch
++ target buffer left by malicious software. Alternatively, the
++ programs can disable their indirect branch speculation via prctl()
++ (See :ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
++ On x86, this will turn on STIBP to guard against attacks from the
++ sibling thread when the user program is running, and use IBPB to
++ flush the branch target buffer when switching to/from the program.
++
++ Restricting indirect branch speculation on a user program will
++ also prevent the program from launching a variant 2 attack
++ on x86. All sand-boxed SECCOMP programs have indirect branch
++ speculation restricted by default. Administrators can change
++ that behavior via the kernel command line and sysfs control files.
++ See :ref:`spectre_mitigation_control_command_line`.
++
++ Programs that disable their indirect branch speculation will have
++ more overhead and run slower.
++
++ User programs should use address space randomization
++ (/proc/sys/kernel/randomize_va_space = 1 or 2) to make attacks more
++ difficult.
++
++3. VM mitigation
++^^^^^^^^^^^^^^^^
++
++ Within the kernel, Spectre variant 1 attacks from rogue guests are
++ mitigated on a case by case basis in VM exit paths. Vulnerable code
++ uses nospec accessor macros for "bounds clipping", to avoid any
++ usable disclosure gadgets. However, this may not cover all variant
++ 1 attack vectors.
++
++ For Spectre variant 2 attacks from rogue guests to the kernel, the
++ Linux kernel uses retpoline or Enhanced IBRS to prevent consumption of
++ poisoned entries in branch target buffer left by rogue guests. It also
++ flushes the return stack buffer on every VM exit to prevent a return
++ stack buffer underflow so poisoned branch target buffer could be used,
++ or attacker guests leaving poisoned entries in the return stack buffer.
++
++ To mitigate guest-to-guest attacks in the same CPU hardware thread,
++ the branch target buffer is sanitized by flushing before switching
++ to a new guest on a CPU.
++
++ The above mitigations are turned on by default on vulnerable CPUs.
++
++ To mitigate guest-to-guest attacks from sibling thread when SMT is
++ in use, an untrusted guest running in the sibling thread can have
++ its indirect branch speculation disabled by administrator via prctl().
++
++ The kernel also allows guests to use any microcode based mitigation
++ they choose to use (such as IBPB or STIBP on x86) to protect themselves.
++
++.. _spectre_mitigation_control_command_line:
++
++Mitigation control on the kernel command line
++---------------------------------------------
++
++Spectre variant 2 mitigation can be disabled or force enabled at the
++kernel command line.
++
++ nospectre_v2
++
++ [X86] Disable all mitigations for the Spectre variant 2
++ (indirect branch prediction) vulnerability. System may
++ allow data leaks with this option, which is equivalent
++ to spectre_v2=off.
++
++
++ spectre_v2=
++
++ [X86] Control mitigation of Spectre variant 2
++ (indirect branch speculation) vulnerability.
++ The default operation protects the kernel from
++ user space attacks.
++
++ on
++ unconditionally enable, implies
++ spectre_v2_user=on
++ off
++ unconditionally disable, implies
++ spectre_v2_user=off
++ auto
++ kernel detects whether your CPU model is
++ vulnerable
++
++ Selecting 'on' will, and 'auto' may, choose a
++ mitigation method at run time according to the
++ CPU, the available microcode, the setting of the
++ CONFIG_RETPOLINE configuration option, and the
++ compiler with which the kernel was built.
++
++ Selecting 'on' will also enable the mitigation
++ against user space to user space task attacks.
++
++ Selecting 'off' will disable both the kernel and
++ the user space protections.
++
++ Specific mitigations can also be selected manually:
++
++ retpoline
++ replace indirect branches
++ retpoline,generic
++ google's original retpoline
++ retpoline,amd
++ AMD-specific minimal thunk
++
++ Not specifying this option is equivalent to
++ spectre_v2=auto.
++
++For user space mitigation:
++
++ spectre_v2_user=
++
++ [X86] Control mitigation of Spectre variant 2
++ (indirect branch speculation) vulnerability between
++ user space tasks
++
++ on
++ Unconditionally enable mitigations. Is
++ enforced by spectre_v2=on
++
++ off
++ Unconditionally disable mitigations. Is
++ enforced by spectre_v2=off
++
++ prctl
++ Indirect branch speculation is enabled,
++ but mitigation can be enabled via prctl
++ per thread. The mitigation control state
++ is inherited on fork.
++
++ prctl,ibpb
++ Like "prctl" above, but only STIBP is
++ controlled per thread. IBPB is issued
++ always when switching between different user
++ space processes.
++
++ seccomp
++ Same as "prctl" above, but all seccomp
++ threads will enable the mitigation unless
++ they explicitly opt out.
++
++ seccomp,ibpb
++ Like "seccomp" above, but only STIBP is
++ controlled per thread. IBPB is issued
++ always when switching between different
++ user space processes.
++
++ auto
++ Kernel selects the mitigation depending on
++ the available CPU features and vulnerability.
++
++ Default mitigation:
++ If CONFIG_SECCOMP=y then "seccomp", otherwise "prctl"
++
++ Not specifying this option is equivalent to
++ spectre_v2_user=auto.
++
++ In general the kernel by default selects
++ reasonable mitigations for the current CPU. To
++ disable Spectre variant 2 mitigations, boot with
++ spectre_v2=off. Spectre variant 1 mitigations
++ cannot be disabled.
++
++Mitigation selection guide
++--------------------------
++
++1. Trusted userspace
++^^^^^^^^^^^^^^^^^^^^
++
++ If all userspace applications are from trusted sources and do not
++ execute externally supplied untrusted code, then the mitigations can
++ be disabled.
++
++2. Protect sensitive programs
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ For security-sensitive programs that have secrets (e.g. crypto
++ keys), protection against Spectre variant 2 can be put in place by
++ disabling indirect branch speculation when the program is running
++ (See :ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
++
++3. Sandbox untrusted programs
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ Untrusted programs that could be a source of attacks can be cordoned
++ off by disabling their indirect branch speculation when they are run
++ (See :ref:`Documentation/userspace-api/spec_ctrl.rst <set_spec_ctrl>`).
++ This prevents untrusted programs from polluting the branch target
++ buffer. All programs running in SECCOMP sandboxes have indirect
++ branch speculation restricted by default. This behavior can be
++ changed via the kernel command line and sysfs control files. See
++ :ref:`spectre_mitigation_control_command_line`.
++
++3. High security mode
++^^^^^^^^^^^^^^^^^^^^^
++
++ All Spectre variant 2 mitigations can be forced on
++ at boot time for all programs (See the "on" option in
++ :ref:`spectre_mitigation_control_command_line`). This will add
++ overhead as indirect branch speculations for all programs will be
++ restricted.
++
++ On x86, branch target buffer will be flushed with IBPB when switching
++ to a new program. STIBP is left on all the time to protect programs
++ against variant 2 attacks originating from programs running on
++ sibling threads.
++
++ Alternatively, STIBP can be used only when running programs
++ whose indirect branch speculation is explicitly disabled,
++ while IBPB is still used all the time when switching to a new
++ program to clear the branch target buffer (See "ibpb" option in
++ :ref:`spectre_mitigation_control_command_line`). This "ibpb" option
++ has less performance cost than the "on" option, which leaves STIBP
++ on all the time.
++
++References on Spectre
++---------------------
++
++Intel white papers:
++
++.. _spec_ref1:
++
++[1] `Intel analysis of speculative execution side channels <https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/Intel-Analysis-of-Speculative-Execution-Side-Channels.pdf>`_.
++
++.. _spec_ref2:
++
++[2] `Bounds check bypass <https://software.intel.com/security-software-guidance/software-guidance/bounds-check-bypass>`_.
++
++.. _spec_ref3:
++
++[3] `Deep dive: Retpoline: A branch target injection mitigation <https://software.intel.com/security-software-guidance/insights/deep-dive-retpoline-branch-target-injection-mitigation>`_.
++
++.. _spec_ref4:
++
++[4] `Deep Dive: Single Thread Indirect Branch Predictors <https://software.intel.com/security-software-guidance/insights/deep-dive-single-thread-indirect-branch-predictors>`_.
++
++AMD white papers:
++
++.. _spec_ref5:
++
++[5] `AMD64 technology indirect branch control extension <https://developer.amd.com/wp-content/resources/Architecture_Guidelines_Update_Indirect_Branch_Control.pdf>`_.
++
++.. _spec_ref6:
++
++[6] `Software techniques for managing speculation on AMD processors <https://developer.amd.com/wp-content/resources/90343-B_SoftwareTechniquesforManagingSpeculation_WP_7-18Update_FNL.pdf>`_.
++
++ARM white papers:
++
++.. _spec_ref7:
++
++[7] `Cache speculation side-channels <https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability/download-the-whitepaper>`_.
++
++.. _spec_ref8:
++
++[8] `Cache speculation issues update <https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability/latest-updates/cache-speculation-issues-update>`_.
++
++Google white paper:
++
++.. _spec_ref9:
++
++[9] `Retpoline: a software construct for preventing branch-target-injection <https://support.google.com/faqs/answer/7625886>`_.
++
++MIPS white paper:
++
++.. _spec_ref10:
++
++[10] `MIPS: response on speculative execution and side channel vulnerabilities <https://www.mips.com/blog/mips-response-on-speculative-execution-and-side-channel-vulnerabilities/>`_.
++
++Academic papers:
++
++.. _spec_ref11:
++
++[11] `Spectre Attacks: Exploiting Speculative Execution <https://spectreattack.com/spectre.pdf>`_.
++
++.. _spec_ref12:
++
++[12] `NetSpectre: Read Arbitrary Memory over Network <https://arxiv.org/abs/1807.10535>`_.
++
++.. _spec_ref13:
++
++[13] `Spectre Returns! Speculation Attacks using the Return Stack Buffer <https://www.usenix.org/system/files/conference/woot18/woot18-paper-koruyeh.pdf>`_.
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 138f6664b2e2..0082d1e56999 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5102,12 +5102,6 @@
+ emulate [default] Vsyscalls turn into traps and are
+ emulated reasonably safely.
+
+- native Vsyscalls are native syscall instructions.
+- This is a little bit faster than trapping
+- and makes a few dynamic recompilers work
+- better than they would in emulation mode.
+- It also makes exploits much easier to write.
+-
+ none Vsyscalls don't work at all. This makes
+ them quite hard to use for exploits but
+ might break your system.
+diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst
+index 1129c7550a48..7ddd8f667459 100644
+--- a/Documentation/userspace-api/spec_ctrl.rst
++++ b/Documentation/userspace-api/spec_ctrl.rst
+@@ -49,6 +49,8 @@ If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
+ available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
+ misfeature will fail.
+
++.. _set_spec_ctrl:
++
+ PR_SET_SPECULATION_CTRL
+ -----------------------
+
+diff --git a/Makefile b/Makefile
+index 3e4868a6498b..d8f5dbfd6b76 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
+index a166c960bc9e..e9d0bc3a5e88 100644
+--- a/arch/x86/kernel/ptrace.c
++++ b/arch/x86/kernel/ptrace.c
+@@ -25,6 +25,7 @@
+ #include <linux/rcupdate.h>
+ #include <linux/export.h>
+ #include <linux/context_tracking.h>
++#include <linux/nospec.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -643,9 +644,11 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
+ {
+ struct thread_struct *thread = &tsk->thread;
+ unsigned long val = 0;
++ int index = n;
+
+ if (n < HBP_NUM) {
+- struct perf_event *bp = thread->ptrace_bps[n];
++ struct perf_event *bp = thread->ptrace_bps[index];
++ index = array_index_nospec(index, HBP_NUM);
+
+ if (bp)
+ val = bp->hw.info.address;
+diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
+index a5b802a12212..71d3fef1edc9 100644
+--- a/arch/x86/kernel/tls.c
++++ b/arch/x86/kernel/tls.c
+@@ -5,6 +5,7 @@
+ #include <linux/user.h>
+ #include <linux/regset.h>
+ #include <linux/syscalls.h>
++#include <linux/nospec.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/desc.h>
+@@ -220,6 +221,7 @@ int do_get_thread_area(struct task_struct *p, int idx,
+ struct user_desc __user *u_info)
+ {
+ struct user_desc info;
++ int index;
+
+ if (idx == -1 && get_user(idx, &u_info->entry_number))
+ return -EFAULT;
+@@ -227,8 +229,11 @@ int do_get_thread_area(struct task_struct *p, int idx,
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+- fill_user_desc(&info, idx,
+- &p->thread.tls_array[idx - GDT_ENTRY_TLS_MIN]);
++ index = idx - GDT_ENTRY_TLS_MIN;
++ index = array_index_nospec(index,
++ GDT_ENTRY_TLS_MAX - GDT_ENTRY_TLS_MIN + 1);
++
++ fill_user_desc(&info, idx, &p->thread.tls_array[index]);
+
+ if (copy_to_user(u_info, &info, sizeof(info)))
+ return -EFAULT;
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index 0850b5149345..4d1517022a14 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -141,10 +141,10 @@ SECTIONS
+ *(.text.__x86.indirect_thunk)
+ __indirect_thunk_end = .;
+ #endif
+- } :text = 0x9090
+
+- /* End of text section */
+- _etext = .;
++ /* End of text section */
++ _etext = .;
++ } :text = 0x9090
+
+ NOTES :text :note
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index f9269ae6da9c..e5db3856b194 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -4584,6 +4584,7 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfqd->lock, flags);
++ bfqq->bic = NULL;
+ bfq_exit_bfqq(bfqd, bfqq);
+ bic_set_bfqq(bic, NULL, is_sync);
+ spin_unlock_irqrestore(&bfqd->lock, flags);
+diff --git a/block/bio.c b/block/bio.c
+index ce797d73bb43..67bba12d273b 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -731,7 +731,7 @@ static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
+ }
+ }
+
+- if (bio_full(bio))
++ if (bio_full(bio, len))
+ return 0;
+
+ if (bio->bi_phys_segments >= queue_max_segments(q))
+@@ -807,7 +807,7 @@ void __bio_add_page(struct bio *bio, struct page *page,
+ struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
+
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+- WARN_ON_ONCE(bio_full(bio));
++ WARN_ON_ONCE(bio_full(bio, len));
+
+ bv->bv_page = page;
+ bv->bv_offset = off;
+@@ -834,7 +834,7 @@ int bio_add_page(struct bio *bio, struct page *page,
+ bool same_page = false;
+
+ if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
+- if (bio_full(bio))
++ if (bio_full(bio, len))
+ return 0;
+ __bio_add_page(bio, page, len, offset);
+ }
+@@ -922,7 +922,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+ if (same_page)
+ put_page(page);
+ } else {
+- if (WARN_ON_ONCE(bio_full(bio)))
++ if (WARN_ON_ONCE(bio_full(bio, len)))
+ return -EINVAL;
+ __bio_add_page(bio, page, len, offset);
+ }
+@@ -966,7 +966,7 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+ ret = __bio_iov_bvec_add_pages(bio, iter);
+ else
+ ret = __bio_iov_iter_get_pages(bio, iter);
+- } while (!ret && iov_iter_count(iter) && !bio_full(bio));
++ } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
+
+ if (iov_iter_bvec_no_ref(iter))
+ bio_set_flag(bio, BIO_NO_PAGE_REF);
+diff --git a/crypto/lrw.c b/crypto/lrw.c
+index 58009cf63a6e..be829f6afc8e 100644
+--- a/crypto/lrw.c
++++ b/crypto/lrw.c
+@@ -384,7 +384,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
+ inst->alg.base.cra_priority = alg->base.cra_priority;
+ inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
+ inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
+- (__alignof__(__be32) - 1);
++ (__alignof__(be128) - 1);
+
+ inst->alg.ivsize = LRW_BLOCK_SIZE;
+ inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index bc26b5511f0a..38a59a630cd4 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2059,10 +2059,9 @@ static size_t binder_get_object(struct binder_proc *proc,
+
+ read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
+ if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+- !IS_ALIGNED(offset, sizeof(u32)))
++ binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
++ offset, read_size))
+ return 0;
+- binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
+- offset, read_size);
+
+ /* Ok, now see if we read a complete object. */
+ hdr = &object->hdr;
+@@ -2131,8 +2130,10 @@ static struct binder_buffer_object *binder_validate_ptr(
+ return NULL;
+
+ buffer_offset = start_offset + sizeof(binder_size_t) * index;
+- binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
+- b, buffer_offset, sizeof(object_offset));
++ if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
++ b, buffer_offset,
++ sizeof(object_offset)))
++ return NULL;
+ object_size = binder_get_object(proc, b, object_offset, object);
+ if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
+ return NULL;
+@@ -2212,10 +2213,12 @@ static bool binder_validate_fixup(struct binder_proc *proc,
+ return false;
+ last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
+ buffer_offset = objects_start_offset +
+- sizeof(binder_size_t) * last_bbo->parent,
+- binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset,
+- b, buffer_offset,
+- sizeof(last_obj_offset));
++ sizeof(binder_size_t) * last_bbo->parent;
++ if (binder_alloc_copy_from_buffer(&proc->alloc,
++ &last_obj_offset,
++ b, buffer_offset,
++ sizeof(last_obj_offset)))
++ return false;
+ }
+ return (fixup_offset >= last_min_offset);
+ }
+@@ -2301,15 +2304,15 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
+ for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
+ buffer_offset += sizeof(binder_size_t)) {
+ struct binder_object_header *hdr;
+- size_t object_size;
++ size_t object_size = 0;
+ struct binder_object object;
+ binder_size_t object_offset;
+
+- binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
+- buffer, buffer_offset,
+- sizeof(object_offset));
+- object_size = binder_get_object(proc, buffer,
+- object_offset, &object);
++ if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
++ buffer, buffer_offset,
++ sizeof(object_offset)))
++ object_size = binder_get_object(proc, buffer,
++ object_offset, &object);
+ if (object_size == 0) {
+ pr_err("transaction release %d bad object at offset %lld, size %zd\n",
+ debug_id, (u64)object_offset, buffer->data_size);
+@@ -2432,15 +2435,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
+ for (fd_index = 0; fd_index < fda->num_fds;
+ fd_index++) {
+ u32 fd;
++ int err;
+ binder_size_t offset = fda_offset +
+ fd_index * sizeof(fd);
+
+- binder_alloc_copy_from_buffer(&proc->alloc,
+- &fd,
+- buffer,
+- offset,
+- sizeof(fd));
+- binder_deferred_fd_close(fd);
++ err = binder_alloc_copy_from_buffer(
++ &proc->alloc, &fd, buffer,
++ offset, sizeof(fd));
++ WARN_ON(err);
++ if (!err)
++ binder_deferred_fd_close(fd);
+ }
+ } break;
+ default:
+@@ -2683,11 +2687,12 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
+ int ret;
+ binder_size_t offset = fda_offset + fdi * sizeof(fd);
+
+- binder_alloc_copy_from_buffer(&target_proc->alloc,
+- &fd, t->buffer,
+- offset, sizeof(fd));
+- ret = binder_translate_fd(fd, offset, t, thread,
+- in_reply_to);
++ ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
++ &fd, t->buffer,
++ offset, sizeof(fd));
++ if (!ret)
++ ret = binder_translate_fd(fd, offset, t, thread,
++ in_reply_to);
+ if (ret < 0)
+ return ret;
+ }
+@@ -2740,8 +2745,12 @@ static int binder_fixup_parent(struct binder_transaction *t,
+ }
+ buffer_offset = bp->parent_offset +
+ (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
+- binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
+- &bp->buffer, sizeof(bp->buffer));
++ if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
++ &bp->buffer, sizeof(bp->buffer))) {
++ binder_user_error("%d:%d got transaction with invalid parent offset\n",
++ proc->pid, thread->pid);
++ return -EINVAL;
++ }
+
+ return 0;
+ }
+@@ -3160,15 +3169,20 @@ static void binder_transaction(struct binder_proc *proc,
+ goto err_binder_alloc_buf_failed;
+ }
+ if (secctx) {
++ int err;
+ size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
+ ALIGN(tr->offsets_size, sizeof(void *)) +
+ ALIGN(extra_buffers_size, sizeof(void *)) -
+ ALIGN(secctx_sz, sizeof(u64));
+
+ t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
+- binder_alloc_copy_to_buffer(&target_proc->alloc,
+- t->buffer, buf_offset,
+- secctx, secctx_sz);
++ err = binder_alloc_copy_to_buffer(&target_proc->alloc,
++ t->buffer, buf_offset,
++ secctx, secctx_sz);
++ if (err) {
++ t->security_ctx = 0;
++ WARN_ON(1);
++ }
+ security_release_secctx(secctx, secctx_sz);
+ secctx = NULL;
+ }
+@@ -3234,11 +3248,16 @@ static void binder_transaction(struct binder_proc *proc,
+ struct binder_object object;
+ binder_size_t object_offset;
+
+- binder_alloc_copy_from_buffer(&target_proc->alloc,
+- &object_offset,
+- t->buffer,
+- buffer_offset,
+- sizeof(object_offset));
++ if (binder_alloc_copy_from_buffer(&target_proc->alloc,
++ &object_offset,
++ t->buffer,
++ buffer_offset,
++ sizeof(object_offset))) {
++ return_error = BR_FAILED_REPLY;
++ return_error_param = -EINVAL;
++ return_error_line = __LINE__;
++ goto err_bad_offset;
++ }
+ object_size = binder_get_object(target_proc, t->buffer,
+ object_offset, &object);
+ if (object_size == 0 || object_offset < off_min) {
+@@ -3262,15 +3281,17 @@ static void binder_transaction(struct binder_proc *proc,
+
+ fp = to_flat_binder_object(hdr);
+ ret = binder_translate_binder(fp, t, thread);
+- if (ret < 0) {
++
++ if (ret < 0 ||
++ binder_alloc_copy_to_buffer(&target_proc->alloc,
++ t->buffer,
++ object_offset,
++ fp, sizeof(*fp))) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
+ goto err_translate_failed;
+ }
+- binder_alloc_copy_to_buffer(&target_proc->alloc,
+- t->buffer, object_offset,
+- fp, sizeof(*fp));
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE: {
+@@ -3278,15 +3299,16 @@ static void binder_transaction(struct binder_proc *proc,
+
+ fp = to_flat_binder_object(hdr);
+ ret = binder_translate_handle(fp, t, thread);
+- if (ret < 0) {
++ if (ret < 0 ||
++ binder_alloc_copy_to_buffer(&target_proc->alloc,
++ t->buffer,
++ object_offset,
++ fp, sizeof(*fp))) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
+ goto err_translate_failed;
+ }
+- binder_alloc_copy_to_buffer(&target_proc->alloc,
+- t->buffer, object_offset,
+- fp, sizeof(*fp));
+ } break;
+
+ case BINDER_TYPE_FD: {
+@@ -3296,16 +3318,17 @@ static void binder_transaction(struct binder_proc *proc,
+ int ret = binder_translate_fd(fp->fd, fd_offset, t,
+ thread, in_reply_to);
+
+- if (ret < 0) {
++ fp->pad_binder = 0;
++ if (ret < 0 ||
++ binder_alloc_copy_to_buffer(&target_proc->alloc,
++ t->buffer,
++ object_offset,
++ fp, sizeof(*fp))) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
+ goto err_translate_failed;
+ }
+- fp->pad_binder = 0;
+- binder_alloc_copy_to_buffer(&target_proc->alloc,
+- t->buffer, object_offset,
+- fp, sizeof(*fp));
+ } break;
+ case BINDER_TYPE_FDA: {
+ struct binder_object ptr_object;
+@@ -3393,15 +3416,16 @@ static void binder_transaction(struct binder_proc *proc,
+ num_valid,
+ last_fixup_obj_off,
+ last_fixup_min_off);
+- if (ret < 0) {
++ if (ret < 0 ||
++ binder_alloc_copy_to_buffer(&target_proc->alloc,
++ t->buffer,
++ object_offset,
++ bp, sizeof(*bp))) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
+ goto err_translate_failed;
+ }
+- binder_alloc_copy_to_buffer(&target_proc->alloc,
+- t->buffer, object_offset,
+- bp, sizeof(*bp));
+ last_fixup_obj_off = object_offset;
+ last_fixup_min_off = 0;
+ } break;
+@@ -4140,20 +4164,27 @@ static int binder_apply_fd_fixups(struct binder_proc *proc,
+ trace_binder_transaction_fd_recv(t, fd, fixup->offset);
+ fd_install(fd, fixup->file);
+ fixup->file = NULL;
+- binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
+- fixup->offset, &fd,
+- sizeof(u32));
++ if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
++ fixup->offset, &fd,
++ sizeof(u32))) {
++ ret = -EINVAL;
++ break;
++ }
+ }
+ list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
+ if (fixup->file) {
+ fput(fixup->file);
+ } else if (ret) {
+ u32 fd;
+-
+- binder_alloc_copy_from_buffer(&proc->alloc, &fd,
+- t->buffer, fixup->offset,
+- sizeof(fd));
+- binder_deferred_fd_close(fd);
++ int err;
++
++ err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
++ t->buffer,
++ fixup->offset,
++ sizeof(fd));
++ WARN_ON(err);
++ if (!err)
++ binder_deferred_fd_close(fd);
+ }
+ list_del(&fixup->fixup_entry);
+ kfree(fixup);
+@@ -4268,6 +4299,8 @@ retry:
+ case BINDER_WORK_TRANSACTION_COMPLETE: {
+ binder_inner_proc_unlock(proc);
+ cmd = BR_TRANSACTION_COMPLETE;
++ kfree(w);
++ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+@@ -4276,8 +4309,6 @@ retry:
+ binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
+ "%d:%d BR_TRANSACTION_COMPLETE\n",
+ proc->pid, thread->pid);
+- kfree(w);
+- binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+ } break;
+ case BINDER_WORK_NODE: {
+ struct binder_node *node = container_of(w, struct binder_node, work);
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index ce5603c2291c..6d79a1b0d446 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -1119,15 +1119,16 @@ binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
+ return 0;
+ }
+
+-static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
+- bool to_buffer,
+- struct binder_buffer *buffer,
+- binder_size_t buffer_offset,
+- void *ptr,
+- size_t bytes)
++static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
++ bool to_buffer,
++ struct binder_buffer *buffer,
++ binder_size_t buffer_offset,
++ void *ptr,
++ size_t bytes)
+ {
+ /* All copies must be 32-bit aligned and 32-bit size */
+- BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes));
++ if (!check_buffer(alloc, buffer, buffer_offset, bytes))
++ return -EINVAL;
+
+ while (bytes) {
+ unsigned long size;
+@@ -1155,25 +1156,26 @@ static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
+ ptr = ptr + size;
+ buffer_offset += size;
+ }
++ return 0;
+ }
+
+-void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
+- struct binder_buffer *buffer,
+- binder_size_t buffer_offset,
+- void *src,
+- size_t bytes)
++int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
++ struct binder_buffer *buffer,
++ binder_size_t buffer_offset,
++ void *src,
++ size_t bytes)
+ {
+- binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
+- src, bytes);
++ return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
++ src, bytes);
+ }
+
+-void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
+- void *dest,
+- struct binder_buffer *buffer,
+- binder_size_t buffer_offset,
+- size_t bytes)
++int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
++ void *dest,
++ struct binder_buffer *buffer,
++ binder_size_t buffer_offset,
++ size_t bytes)
+ {
+- binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
+- dest, bytes);
++ return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
++ dest, bytes);
+ }
+
+diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
+index 71bfa95f8e09..db9c1b984695 100644
+--- a/drivers/android/binder_alloc.h
++++ b/drivers/android/binder_alloc.h
+@@ -159,17 +159,17 @@ binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
+ const void __user *from,
+ size_t bytes);
+
+-void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
+- struct binder_buffer *buffer,
+- binder_size_t buffer_offset,
+- void *src,
+- size_t bytes);
+-
+-void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
+- void *dest,
+- struct binder_buffer *buffer,
+- binder_size_t buffer_offset,
+- size_t bytes);
++int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
++ struct binder_buffer *buffer,
++ binder_size_t buffer_offset,
++ void *src,
++ size_t bytes);
++
++int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
++ void *dest,
++ struct binder_buffer *buffer,
++ binder_size_t buffer_offset,
++ size_t bytes);
+
+ #endif /* _LINUX_BINDER_ALLOC_H */
+
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index 90325e1749fb..d47ad10a35fe 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -289,15 +289,15 @@ static int tpm_class_shutdown(struct device *dev)
+ {
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+
++ down_write(&chip->ops_sem);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+- down_write(&chip->ops_sem);
+ if (!tpm_chip_start(chip)) {
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ tpm_chip_stop(chip);
+ }
+- chip->ops = NULL;
+- up_write(&chip->ops_sem);
+ }
++ chip->ops = NULL;
++ up_write(&chip->ops_sem);
+
+ return 0;
+ }
+diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
+index 85dcf2654d11..faacbe1ffa1a 100644
+--- a/drivers/char/tpm/tpm1-cmd.c
++++ b/drivers/char/tpm/tpm1-cmd.c
+@@ -510,7 +510,7 @@ struct tpm1_get_random_out {
+ *
+ * Return:
+ * * number of bytes read
+- * * -errno or a TPM return code otherwise
++ * * -errno (positive TPM return codes are masked to -EIO)
+ */
+ int tpm1_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
+ {
+@@ -531,8 +531,11 @@ int tpm1_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
+
+ rc = tpm_transmit_cmd(chip, &buf, sizeof(out->rng_data_len),
+ "attempting get random");
+- if (rc)
++ if (rc) {
++ if (rc > 0)
++ rc = -EIO;
+ goto out;
++ }
+
+ out = (struct tpm1_get_random_out *)&buf.data[TPM_HEADER_SIZE];
+
+diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
+index 4de49924cfc4..d103545e4055 100644
+--- a/drivers/char/tpm/tpm2-cmd.c
++++ b/drivers/char/tpm/tpm2-cmd.c
+@@ -297,7 +297,7 @@ struct tpm2_get_random_out {
+ *
+ * Return:
+ * size of the buffer on success,
+- * -errno otherwise
++ * -errno otherwise (positive TPM return codes are masked to -EIO)
+ */
+ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
+ {
+@@ -324,8 +324,11 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
+ offsetof(struct tpm2_get_random_out,
+ buffer),
+ "attempting get random");
+- if (err)
++ if (err) {
++ if (err > 0)
++ err = -EIO;
+ goto out;
++ }
+
+ out = (struct tpm2_get_random_out *)
+ &buf.data[TPM_HEADER_SIZE];
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index fbc7bf9d7380..427c78d4d948 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -2339,7 +2339,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+- "cbc-aes-talitos",
++ "cbc-aes-talitos-hsna",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+@@ -2384,7 +2384,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+- "cbc-3des-talitos",
++ "cbc-3des-talitos-hsna",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+@@ -2427,7 +2427,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+- "cbc-aes-talitos",
++ "cbc-aes-talitos-hsna",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+@@ -2472,7 +2472,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+- "cbc-3des-talitos",
++ "cbc-3des-talitos-hsna",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+@@ -2515,7 +2515,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+- "cbc-aes-talitos",
++ "cbc-aes-talitos-hsna",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+@@ -2560,7 +2560,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+- "cbc-3des-talitos",
++ "cbc-3des-talitos-hsna",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+@@ -2689,7 +2689,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+- "cbc-aes-talitos",
++ "cbc-aes-talitos-hsna",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+@@ -2732,7 +2732,7 @@ static struct talitos_alg_template driver_algs[] = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+- "cbc-3des-talitos",
++ "cbc-3des-talitos-hsna",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index b032d3899fa3..bfc584ada4eb 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -1241,6 +1241,7 @@
+ #define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
+ #define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72
+ #define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F 0x4d0f
++#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D65 0x4d65
+ #define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22 0x4e22
+
+
+diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
+index 671a285724f9..1549c7a2f04c 100644
+--- a/drivers/hid/hid-quirks.c
++++ b/drivers/hid/hid-quirks.c
+@@ -130,6 +130,7 @@ static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL },
++ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D65), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001), HID_QUIRK_NOGET },
+diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
+index 4ee4c80a4354..543cc3d36e1d 100644
+--- a/drivers/hwtracing/coresight/coresight-etb10.c
++++ b/drivers/hwtracing/coresight/coresight-etb10.c
+@@ -373,12 +373,10 @@ static void *etb_alloc_buffer(struct coresight_device *csdev,
+ struct perf_event *event, void **pages,
+ int nr_pages, bool overwrite)
+ {
+- int node, cpu = event->cpu;
++ int node;
+ struct cs_buffers *buf;
+
+- if (cpu == -1)
+- cpu = smp_processor_id();
+- node = cpu_to_node(cpu);
++ node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
+
+ buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
+ if (!buf)
+diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
+index 16b0c0e1e43a..ad6e16c96263 100644
+--- a/drivers/hwtracing/coresight/coresight-funnel.c
++++ b/drivers/hwtracing/coresight/coresight-funnel.c
+@@ -241,6 +241,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
+ }
+
+ pm_runtime_put(dev);
++ ret = 0;
+
+ out_disable_clk:
+ if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
+index 2527b5d3b65e..8de109de171f 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
+@@ -378,12 +378,10 @@ static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
+ struct perf_event *event, void **pages,
+ int nr_pages, bool overwrite)
+ {
+- int node, cpu = event->cpu;
++ int node;
+ struct cs_buffers *buf;
+
+- if (cpu == -1)
+- cpu = smp_processor_id();
+- node = cpu_to_node(cpu);
++ node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
+
+ /* Allocate memory structure for interaction with Perf */
+ buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+index df6e4b0b84e9..9f293b9dce8c 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+@@ -1178,14 +1178,11 @@ static struct etr_buf *
+ alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
+ int nr_pages, void **pages, bool snapshot)
+ {
+- int node, cpu = event->cpu;
++ int node;
+ struct etr_buf *etr_buf;
+ unsigned long size;
+
+- if (cpu == -1)
+- cpu = smp_processor_id();
+- node = cpu_to_node(cpu);
+-
++ node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
+ /*
+ * Try to match the perf ring buffer size if it is larger
+ * than the size requested via sysfs.
+@@ -1317,13 +1314,11 @@ static struct etr_perf_buffer *
+ tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
+ int nr_pages, void **pages, bool snapshot)
+ {
+- int node, cpu = event->cpu;
++ int node;
+ struct etr_buf *etr_buf;
+ struct etr_perf_buffer *etr_perf;
+
+- if (cpu == -1)
+- cpu = smp_processor_id();
+- node = cpu_to_node(cpu);
++ node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
+
+ etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
+ if (!etr_perf)
+diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
+index 2327ec18b40c..1f7ce5186dfc 100644
+--- a/drivers/iio/adc/stm32-adc-core.c
++++ b/drivers/iio/adc/stm32-adc-core.c
+@@ -87,6 +87,7 @@ struct stm32_adc_priv_cfg {
+ * @domain: irq domain reference
+ * @aclk: clock reference for the analog circuitry
+ * @bclk: bus clock common for all ADCs, depends on part used
++ * @vdda: vdda analog supply reference
+ * @vref: regulator reference
+ * @cfg: compatible configuration data
+ * @common: common data for all ADC instances
+@@ -97,6 +98,7 @@ struct stm32_adc_priv {
+ struct irq_domain *domain;
+ struct clk *aclk;
+ struct clk *bclk;
++ struct regulator *vdda;
+ struct regulator *vref;
+ const struct stm32_adc_priv_cfg *cfg;
+ struct stm32_adc_common common;
+@@ -394,10 +396,16 @@ static int stm32_adc_core_hw_start(struct device *dev)
+ struct stm32_adc_priv *priv = to_stm32_adc_priv(common);
+ int ret;
+
++ ret = regulator_enable(priv->vdda);
++ if (ret < 0) {
++ dev_err(dev, "vdda enable failed %d\n", ret);
++ return ret;
++ }
++
+ ret = regulator_enable(priv->vref);
+ if (ret < 0) {
+ dev_err(dev, "vref enable failed\n");
+- return ret;
++ goto err_vdda_disable;
+ }
+
+ if (priv->bclk) {
+@@ -425,6 +433,8 @@ err_bclk_disable:
+ clk_disable_unprepare(priv->bclk);
+ err_regulator_disable:
+ regulator_disable(priv->vref);
++err_vdda_disable:
++ regulator_disable(priv->vdda);
+
+ return ret;
+ }
+@@ -441,6 +451,7 @@ static void stm32_adc_core_hw_stop(struct device *dev)
+ if (priv->bclk)
+ clk_disable_unprepare(priv->bclk);
+ regulator_disable(priv->vref);
++ regulator_disable(priv->vdda);
+ }
+
+ static int stm32_adc_probe(struct platform_device *pdev)
+@@ -468,6 +479,14 @@ static int stm32_adc_probe(struct platform_device *pdev)
+ return PTR_ERR(priv->common.base);
+ priv->common.phys_base = res->start;
+
++ priv->vdda = devm_regulator_get(&pdev->dev, "vdda");
++ if (IS_ERR(priv->vdda)) {
++ ret = PTR_ERR(priv->vdda);
++ if (ret != -EPROBE_DEFER)
++ dev_err(&pdev->dev, "vdda get failed, %d\n", ret);
++ return ret;
++ }
++
+ priv->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(priv->vref)) {
+ ret = PTR_ERR(priv->vref);
+diff --git a/drivers/media/dvb-frontends/stv0297.c b/drivers/media/dvb-frontends/stv0297.c
+index dac396c95a59..6d5962d5697a 100644
+--- a/drivers/media/dvb-frontends/stv0297.c
++++ b/drivers/media/dvb-frontends/stv0297.c
+@@ -682,7 +682,7 @@ static const struct dvb_frontend_ops stv0297_ops = {
+ .delsys = { SYS_DVBC_ANNEX_A },
+ .info = {
+ .name = "ST STV0297 DVB-C",
+- .frequency_min_hz = 470 * MHz,
++ .frequency_min_hz = 47 * MHz,
+ .frequency_max_hz = 862 * MHz,
+ .frequency_stepsize_hz = 62500,
+ .symbol_rate_min = 870000,
+diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
+index 951c984de61a..fb10eafe9bde 100644
+--- a/drivers/misc/lkdtm/Makefile
++++ b/drivers/misc/lkdtm/Makefile
+@@ -15,8 +15,7 @@ KCOV_INSTRUMENT_rodata.o := n
+
+ OBJCOPYFLAGS :=
+ OBJCOPYFLAGS_rodata_objcopy.o := \
+- --set-section-flags .text=alloc,readonly \
+- --rename-section .text=.rodata
++ --rename-section .text=.rodata,alloc,readonly,load
+ targets += rodata.o rodata_objcopy.o
+ $(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE
+ $(call if_changed,objcopy)
+diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
+index 300ed69fe2c7..16695366ec92 100644
+--- a/drivers/misc/vmw_vmci/vmci_context.c
++++ b/drivers/misc/vmw_vmci/vmci_context.c
+@@ -21,6 +21,9 @@
+ #include "vmci_driver.h"
+ #include "vmci_event.h"
+
++/* Use a wide upper bound for the maximum contexts. */
++#define VMCI_MAX_CONTEXTS 2000
++
+ /*
+ * List of current VMCI contexts. Contexts can be added by
+ * vmci_ctx_create() and removed via vmci_ctx_destroy().
+@@ -117,19 +120,22 @@ struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags,
+ /* Initialize host-specific VMCI context. */
+ init_waitqueue_head(&context->host_context.wait_queue);
+
+- context->queue_pair_array = vmci_handle_arr_create(0);
++ context->queue_pair_array =
++ vmci_handle_arr_create(0, VMCI_MAX_GUEST_QP_COUNT);
+ if (!context->queue_pair_array) {
+ error = -ENOMEM;
+ goto err_free_ctx;
+ }
+
+- context->doorbell_array = vmci_handle_arr_create(0);
++ context->doorbell_array =
++ vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
+ if (!context->doorbell_array) {
+ error = -ENOMEM;
+ goto err_free_qp_array;
+ }
+
+- context->pending_doorbell_array = vmci_handle_arr_create(0);
++ context->pending_doorbell_array =
++ vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
+ if (!context->pending_doorbell_array) {
+ error = -ENOMEM;
+ goto err_free_db_array;
+@@ -204,7 +210,7 @@ static int ctx_fire_notification(u32 context_id, u32 priv_flags)
+ * We create an array to hold the subscribers we find when
+ * scanning through all contexts.
+ */
+- subscriber_array = vmci_handle_arr_create(0);
++ subscriber_array = vmci_handle_arr_create(0, VMCI_MAX_CONTEXTS);
+ if (subscriber_array == NULL)
+ return VMCI_ERROR_NO_MEM;
+
+@@ -623,20 +629,26 @@ int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
+
+ spin_lock(&context->lock);
+
+- list_for_each_entry(n, &context->notifier_list, node) {
+- if (vmci_handle_is_equal(n->handle, notifier->handle)) {
+- exists = true;
+- break;
++ if (context->n_notifiers < VMCI_MAX_CONTEXTS) {
++ list_for_each_entry(n, &context->notifier_list, node) {
++ if (vmci_handle_is_equal(n->handle, notifier->handle)) {
++ exists = true;
++ break;
++ }
+ }
+- }
+
+- if (exists) {
+- kfree(notifier);
+- result = VMCI_ERROR_ALREADY_EXISTS;
++ if (exists) {
++ kfree(notifier);
++ result = VMCI_ERROR_ALREADY_EXISTS;
++ } else {
++ list_add_tail_rcu(¬ifier->node,
++ &context->notifier_list);
++ context->n_notifiers++;
++ result = VMCI_SUCCESS;
++ }
+ } else {
+- list_add_tail_rcu(¬ifier->node, &context->notifier_list);
+- context->n_notifiers++;
+- result = VMCI_SUCCESS;
++ kfree(notifier);
++ result = VMCI_ERROR_NO_MEM;
+ }
+
+ spin_unlock(&context->lock);
+@@ -721,8 +733,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
+ u32 *buf_size, void **pbuf)
+ {
+ struct dbell_cpt_state *dbells;
+- size_t n_doorbells;
+- int i;
++ u32 i, n_doorbells;
+
+ n_doorbells = vmci_handle_arr_get_size(context->doorbell_array);
+ if (n_doorbells > 0) {
+@@ -860,7 +871,8 @@ int vmci_ctx_rcv_notifications_get(u32 context_id,
+ spin_lock(&context->lock);
+
+ *db_handle_array = context->pending_doorbell_array;
+- context->pending_doorbell_array = vmci_handle_arr_create(0);
++ context->pending_doorbell_array =
++ vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT);
+ if (!context->pending_doorbell_array) {
+ context->pending_doorbell_array = *db_handle_array;
+ *db_handle_array = NULL;
+@@ -942,12 +954,11 @@ int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle)
+ return VMCI_ERROR_NOT_FOUND;
+
+ spin_lock(&context->lock);
+- if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) {
+- vmci_handle_arr_append_entry(&context->doorbell_array, handle);
+- result = VMCI_SUCCESS;
+- } else {
++ if (!vmci_handle_arr_has_entry(context->doorbell_array, handle))
++ result = vmci_handle_arr_append_entry(&context->doorbell_array,
++ handle);
++ else
+ result = VMCI_ERROR_DUPLICATE_ENTRY;
+- }
+
+ spin_unlock(&context->lock);
+ vmci_ctx_put(context);
+@@ -1083,15 +1094,16 @@ int vmci_ctx_notify_dbell(u32 src_cid,
+ if (!vmci_handle_arr_has_entry(
+ dst_context->pending_doorbell_array,
+ handle)) {
+- vmci_handle_arr_append_entry(
++ result = vmci_handle_arr_append_entry(
+ &dst_context->pending_doorbell_array,
+ handle);
+-
+- ctx_signal_notify(dst_context);
+- wake_up(&dst_context->host_context.wait_queue);
+-
++ if (result == VMCI_SUCCESS) {
++ ctx_signal_notify(dst_context);
++ wake_up(&dst_context->host_context.wait_queue);
++ }
++ } else {
++ result = VMCI_SUCCESS;
+ }
+- result = VMCI_SUCCESS;
+ }
+ spin_unlock(&dst_context->lock);
+ }
+@@ -1118,13 +1130,11 @@ int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle)
+ if (context == NULL || vmci_handle_is_invalid(handle))
+ return VMCI_ERROR_INVALID_ARGS;
+
+- if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) {
+- vmci_handle_arr_append_entry(&context->queue_pair_array,
+- handle);
+- result = VMCI_SUCCESS;
+- } else {
++ if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle))
++ result = vmci_handle_arr_append_entry(
++ &context->queue_pair_array, handle);
++ else
+ result = VMCI_ERROR_DUPLICATE_ENTRY;
+- }
+
+ return result;
+ }
+diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c
+index c527388f5d7b..de7fee7ead1b 100644
+--- a/drivers/misc/vmw_vmci/vmci_handle_array.c
++++ b/drivers/misc/vmw_vmci/vmci_handle_array.c
+@@ -8,24 +8,29 @@
+ #include <linux/slab.h>
+ #include "vmci_handle_array.h"
+
+-static size_t handle_arr_calc_size(size_t capacity)
++static size_t handle_arr_calc_size(u32 capacity)
+ {
+- return sizeof(struct vmci_handle_arr) +
++ return VMCI_HANDLE_ARRAY_HEADER_SIZE +
+ capacity * sizeof(struct vmci_handle);
+ }
+
+-struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity)
++struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity)
+ {
+ struct vmci_handle_arr *array;
+
++ if (max_capacity == 0 || capacity > max_capacity)
++ return NULL;
++
+ if (capacity == 0)
+- capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE;
++ capacity = min((u32)VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY,
++ max_capacity);
+
+ array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
+ if (!array)
+ return NULL;
+
+ array->capacity = capacity;
++ array->max_capacity = max_capacity;
+ array->size = 0;
+
+ return array;
+@@ -36,27 +41,34 @@ void vmci_handle_arr_destroy(struct vmci_handle_arr *array)
+ kfree(array);
+ }
+
+-void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+- struct vmci_handle handle)
++int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
++ struct vmci_handle handle)
+ {
+ struct vmci_handle_arr *array = *array_ptr;
+
+ if (unlikely(array->size >= array->capacity)) {
+ /* reallocate. */
+ struct vmci_handle_arr *new_array;
+- size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT;
+- size_t new_size = handle_arr_calc_size(new_capacity);
++ u32 capacity_bump = min(array->max_capacity - array->capacity,
++ array->capacity);
++ size_t new_size = handle_arr_calc_size(array->capacity +
++ capacity_bump);
++
++ if (array->size >= array->max_capacity)
++ return VMCI_ERROR_NO_MEM;
+
+ new_array = krealloc(array, new_size, GFP_ATOMIC);
+ if (!new_array)
+- return;
++ return VMCI_ERROR_NO_MEM;
+
+- new_array->capacity = new_capacity;
++ new_array->capacity += capacity_bump;
+ *array_ptr = array = new_array;
+ }
+
+ array->entries[array->size] = handle;
+ array->size++;
++
++ return VMCI_SUCCESS;
+ }
+
+ /*
+@@ -66,7 +78,7 @@ struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle)
+ {
+ struct vmci_handle handle = VMCI_INVALID_HANDLE;
+- size_t i;
++ u32 i;
+
+ for (i = 0; i < array->size; i++) {
+ if (vmci_handle_is_equal(array->entries[i], entry_handle)) {
+@@ -101,7 +113,7 @@ struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array)
+ * Handle at given index, VMCI_INVALID_HANDLE if invalid index.
+ */
+ struct vmci_handle
+-vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
++vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index)
+ {
+ if (unlikely(index >= array->size))
+ return VMCI_INVALID_HANDLE;
+@@ -112,7 +124,7 @@ vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
+ bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle)
+ {
+- size_t i;
++ u32 i;
+
+ for (i = 0; i < array->size; i++)
+ if (vmci_handle_is_equal(array->entries[i], entry_handle))
+diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h
+index bd1559a548e9..96193f85be5b 100644
+--- a/drivers/misc/vmw_vmci/vmci_handle_array.h
++++ b/drivers/misc/vmw_vmci/vmci_handle_array.h
+@@ -9,32 +9,41 @@
+ #define _VMCI_HANDLE_ARRAY_H_
+
+ #include <linux/vmw_vmci_defs.h>
++#include <linux/limits.h>
+ #include <linux/types.h>
+
+-#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4
+-#define VMCI_ARR_CAP_MULT 2 /* Array capacity multiplier */
+-
+ struct vmci_handle_arr {
+- size_t capacity;
+- size_t size;
++ u32 capacity;
++ u32 max_capacity;
++ u32 size;
++ u32 pad;
+ struct vmci_handle entries[];
+ };
+
+-struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity);
++#define VMCI_HANDLE_ARRAY_HEADER_SIZE \
++ offsetof(struct vmci_handle_arr, entries)
++/* Select a default capacity that results in a 64 byte sized array */
++#define VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY 6
++/* Make sure that the max array size can be expressed by a u32 */
++#define VMCI_HANDLE_ARRAY_MAX_CAPACITY \
++ ((U32_MAX - VMCI_HANDLE_ARRAY_HEADER_SIZE - 1) / \
++ sizeof(struct vmci_handle))
++
++struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity);
+ void vmci_handle_arr_destroy(struct vmci_handle_arr *array);
+-void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+- struct vmci_handle handle);
++int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
++ struct vmci_handle handle);
+ struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+ struct vmci_handle
+ entry_handle);
+ struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array);
+ struct vmci_handle
+-vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index);
++vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, u32 index);
+ bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+ struct vmci_handle entry_handle);
+ struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array);
+
+-static inline size_t vmci_handle_arr_get_size(
++static inline u32 vmci_handle_arr_get_size(
+ const struct vmci_handle_arr *array)
+ {
+ return array->size;
+diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
+index e7c3f3b8457d..99f1897a775d 100644
+--- a/drivers/net/wireless/ath/carl9170/usb.c
++++ b/drivers/net/wireless/ath/carl9170/usb.c
+@@ -128,6 +128,8 @@ static const struct usb_device_id carl9170_usb_ids[] = {
+ };
+ MODULE_DEVICE_TABLE(usb, carl9170_usb_ids);
+
++static struct usb_driver carl9170_driver;
++
+ static void carl9170_usb_submit_data_urb(struct ar9170 *ar)
+ {
+ struct urb *urb;
+@@ -966,32 +968,28 @@ err_out:
+
+ static void carl9170_usb_firmware_failed(struct ar9170 *ar)
+ {
+- struct device *parent = ar->udev->dev.parent;
+- struct usb_device *udev;
+-
+- /*
+- * Store a copy of the usb_device pointer locally.
+- * This is because device_release_driver initiates
+- * carl9170_usb_disconnect, which in turn frees our
+- * driver context (ar).
++ /* Store a copies of the usb_interface and usb_device pointer locally.
++ * This is because release_driver initiates carl9170_usb_disconnect,
++ * which in turn frees our driver context (ar).
+ */
+- udev = ar->udev;
++ struct usb_interface *intf = ar->intf;
++ struct usb_device *udev = ar->udev;
+
+ complete(&ar->fw_load_wait);
++ /* at this point 'ar' could be already freed. Don't use it anymore */
++ ar = NULL;
+
+ /* unbind anything failed */
+- if (parent)
+- device_lock(parent);
+-
+- device_release_driver(&udev->dev);
+- if (parent)
+- device_unlock(parent);
++ usb_lock_device(udev);
++ usb_driver_release_interface(&carl9170_driver, intf);
++ usb_unlock_device(udev);
+
+- usb_put_dev(udev);
++ usb_put_intf(intf);
+ }
+
+ static void carl9170_usb_firmware_finish(struct ar9170 *ar)
+ {
++ struct usb_interface *intf = ar->intf;
+ int err;
+
+ err = carl9170_parse_firmware(ar);
+@@ -1009,7 +1007,7 @@ static void carl9170_usb_firmware_finish(struct ar9170 *ar)
+ goto err_unrx;
+
+ complete(&ar->fw_load_wait);
+- usb_put_dev(ar->udev);
++ usb_put_intf(intf);
+ return;
+
+ err_unrx:
+@@ -1052,7 +1050,6 @@ static int carl9170_usb_probe(struct usb_interface *intf,
+ return PTR_ERR(ar);
+
+ udev = interface_to_usbdev(intf);
+- usb_get_dev(udev);
+ ar->udev = udev;
+ ar->intf = intf;
+ ar->features = id->driver_info;
+@@ -1094,15 +1091,14 @@ static int carl9170_usb_probe(struct usb_interface *intf,
+ atomic_set(&ar->rx_anch_urbs, 0);
+ atomic_set(&ar->rx_pool_urbs, 0);
+
+- usb_get_dev(ar->udev);
++ usb_get_intf(intf);
+
+ carl9170_set_state(ar, CARL9170_STOPPED);
+
+ err = request_firmware_nowait(THIS_MODULE, 1, CARL9170FW_NAME,
+ &ar->udev->dev, GFP_KERNEL, ar, carl9170_usb_firmware_step2);
+ if (err) {
+- usb_put_dev(udev);
+- usb_put_dev(udev);
++ usb_put_intf(intf);
+ carl9170_free(ar);
+ }
+ return err;
+@@ -1131,7 +1127,6 @@ static void carl9170_usb_disconnect(struct usb_interface *intf)
+
+ carl9170_release_firmware(ar);
+ carl9170_free(ar);
+- usb_put_dev(udev);
+ }
+
+ #ifdef CONFIG_PM
+diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c
+index f937815f0f2c..b94764c88750 100644
+--- a/drivers/net/wireless/intersil/p54/p54usb.c
++++ b/drivers/net/wireless/intersil/p54/p54usb.c
+@@ -30,6 +30,8 @@ MODULE_ALIAS("prism54usb");
+ MODULE_FIRMWARE("isl3886usb");
+ MODULE_FIRMWARE("isl3887usb");
+
++static struct usb_driver p54u_driver;
++
+ /*
+ * Note:
+ *
+@@ -918,9 +920,9 @@ static void p54u_load_firmware_cb(const struct firmware *firmware,
+ {
+ struct p54u_priv *priv = context;
+ struct usb_device *udev = priv->udev;
++ struct usb_interface *intf = priv->intf;
+ int err;
+
+- complete(&priv->fw_wait_load);
+ if (firmware) {
+ priv->fw = firmware;
+ err = p54u_start_ops(priv);
+@@ -929,26 +931,22 @@ static void p54u_load_firmware_cb(const struct firmware *firmware,
+ dev_err(&udev->dev, "Firmware not found.\n");
+ }
+
+- if (err) {
+- struct device *parent = priv->udev->dev.parent;
+-
+- dev_err(&udev->dev, "failed to initialize device (%d)\n", err);
+-
+- if (parent)
+- device_lock(parent);
++ complete(&priv->fw_wait_load);
++ /*
++ * At this point p54u_disconnect may have already freed
++ * the "priv" context. Do not use it anymore!
++ */
++ priv = NULL;
+
+- device_release_driver(&udev->dev);
+- /*
+- * At this point p54u_disconnect has already freed
+- * the "priv" context. Do not use it anymore!
+- */
+- priv = NULL;
++ if (err) {
++ dev_err(&intf->dev, "failed to initialize device (%d)\n", err);
+
+- if (parent)
+- device_unlock(parent);
++ usb_lock_device(udev);
++ usb_driver_release_interface(&p54u_driver, intf);
++ usb_unlock_device(udev);
+ }
+
+- usb_put_dev(udev);
++ usb_put_intf(intf);
+ }
+
+ static int p54u_load_firmware(struct ieee80211_hw *dev,
+@@ -969,14 +967,14 @@ static int p54u_load_firmware(struct ieee80211_hw *dev,
+ dev_info(&priv->udev->dev, "Loading firmware file %s\n",
+ p54u_fwlist[i].fw);
+
+- usb_get_dev(udev);
++ usb_get_intf(intf);
+ err = request_firmware_nowait(THIS_MODULE, 1, p54u_fwlist[i].fw,
+ device, GFP_KERNEL, priv,
+ p54u_load_firmware_cb);
+ if (err) {
+ dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
+ "(%d)!\n", p54u_fwlist[i].fw, err);
+- usb_put_dev(udev);
++ usb_put_intf(intf);
+ }
+
+ return err;
+@@ -1008,8 +1006,6 @@ static int p54u_probe(struct usb_interface *intf,
+ skb_queue_head_init(&priv->rx_queue);
+ init_usb_anchor(&priv->submitted);
+
+- usb_get_dev(udev);
+-
+ /* really lazy and simple way of figuring out if we're a 3887 */
+ /* TODO: should just stick the identification in the device table */
+ i = intf->altsetting->desc.bNumEndpoints;
+@@ -1050,10 +1046,8 @@ static int p54u_probe(struct usb_interface *intf,
+ priv->upload_fw = p54u_upload_firmware_net2280;
+ }
+ err = p54u_load_firmware(dev, intf);
+- if (err) {
+- usb_put_dev(udev);
++ if (err)
+ p54_free_common(dev);
+- }
+ return err;
+ }
+
+@@ -1069,7 +1063,6 @@ static void p54u_disconnect(struct usb_interface *intf)
+ wait_for_completion(&priv->fw_wait_load);
+ p54_unregister_common(dev);
+
+- usb_put_dev(interface_to_usbdev(intf));
+ release_firmware(priv->fw);
+ p54_free_common(dev);
+ }
+diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
+index ff9acd1563f4..5892898f8853 100644
+--- a/drivers/net/wireless/intersil/p54/txrx.c
++++ b/drivers/net/wireless/intersil/p54/txrx.c
+@@ -139,7 +139,10 @@ static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb)
+ unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON))
+ priv->beacon_req_id = data->req_id;
+
+- __skb_queue_after(&priv->tx_queue, target_skb, skb);
++ if (target_skb)
++ __skb_queue_after(&priv->tx_queue, target_skb, skb);
++ else
++ __skb_queue_head(&priv->tx_queue, skb);
+ spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
+ return 0;
+ }
+diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
+index b73f99dc5a72..1fb76d2f5d3f 100644
+--- a/drivers/net/wireless/marvell/mwifiex/fw.h
++++ b/drivers/net/wireless/marvell/mwifiex/fw.h
+@@ -1759,9 +1759,10 @@ struct mwifiex_ie_types_wmm_queue_status {
+ struct ieee_types_vendor_header {
+ u8 element_id;
+ u8 len;
+- u8 oui[4]; /* 0~2: oui, 3: oui_type */
+- u8 oui_subtype;
+- u8 version;
++ struct {
++ u8 oui[3];
++ u8 oui_type;
++ } __packed oui;
+ } __packed;
+
+ struct ieee_types_wmm_parameter {
+@@ -1775,6 +1776,9 @@ struct ieee_types_wmm_parameter {
+ * Version [1]
+ */
+ struct ieee_types_vendor_header vend_hdr;
++ u8 oui_subtype;
++ u8 version;
++
+ u8 qos_info_bitmap;
+ u8 reserved;
+ struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
+@@ -1792,6 +1796,8 @@ struct ieee_types_wmm_info {
+ * Version [1]
+ */
+ struct ieee_types_vendor_header vend_hdr;
++ u8 oui_subtype;
++ u8 version;
+
+ u8 qos_info_bitmap;
+ } __packed;
+diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
+index c269a0de9413..e2786ab612ca 100644
+--- a/drivers/net/wireless/marvell/mwifiex/scan.c
++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
+@@ -1361,21 +1361,25 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
+ break;
+
+ case WLAN_EID_VENDOR_SPECIFIC:
+- if (element_len + 2 < sizeof(vendor_ie->vend_hdr))
+- return -EINVAL;
+-
+ vendor_ie = (struct ieee_types_vendor_specific *)
+ current_ptr;
+
+- if (!memcmp
+- (vendor_ie->vend_hdr.oui, wpa_oui,
+- sizeof(wpa_oui))) {
++ /* 802.11 requires at least 3-byte OUI. */
++ if (element_len < sizeof(vendor_ie->vend_hdr.oui.oui))
++ return -EINVAL;
++
++ /* Not long enough for a match? Skip it. */
++ if (element_len < sizeof(wpa_oui))
++ break;
++
++ if (!memcmp(&vendor_ie->vend_hdr.oui, wpa_oui,
++ sizeof(wpa_oui))) {
+ bss_entry->bcn_wpa_ie =
+ (struct ieee_types_vendor_specific *)
+ current_ptr;
+ bss_entry->wpa_offset = (u16)
+ (current_ptr - bss_entry->beacon_buf);
+- } else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui,
++ } else if (!memcmp(&vendor_ie->vend_hdr.oui, wmm_oui,
+ sizeof(wmm_oui))) {
+ if (total_ie_len ==
+ sizeof(struct ieee_types_wmm_parameter) ||
+diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+index ebc0e41e5d3b..74e50566db1f 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
++++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+@@ -1351,7 +1351,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
+ /* Test to see if it is a WPA IE, if not, then
+ * it is a gen IE
+ */
+- if (!memcmp(pvendor_ie->oui, wpa_oui,
++ if (!memcmp(&pvendor_ie->oui, wpa_oui,
+ sizeof(wpa_oui))) {
+ /* IE is a WPA/WPA2 IE so call set_wpa function
+ */
+@@ -1361,7 +1361,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
+ goto next_ie;
+ }
+
+- if (!memcmp(pvendor_ie->oui, wps_oui,
++ if (!memcmp(&pvendor_ie->oui, wps_oui,
+ sizeof(wps_oui))) {
+ /* Test to see if it is a WPS IE,
+ * if so, enable wps session flag
+diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
+index 407b9932ca4d..64916ba15df5 100644
+--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
++++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
+@@ -240,7 +240,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
+ mwifiex_dbg(priv->adapter, INFO,
+ "info: WMM Parameter IE: version=%d,\t"
+ "qos_info Parameter Set Count=%d, Reserved=%#x\n",
+- wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
++ wmm_ie->version, wmm_ie->qos_info_bitmap &
+ IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
+ wmm_ie->reserved);
+
+diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
+index 65f60c2b702a..f7e673121864 100644
+--- a/drivers/staging/comedi/drivers/amplc_pci230.c
++++ b/drivers/staging/comedi/drivers/amplc_pci230.c
+@@ -2330,7 +2330,8 @@ static irqreturn_t pci230_interrupt(int irq, void *d)
+ devpriv->intr_running = false;
+ spin_unlock_irqrestore(&devpriv->isr_spinlock, irqflags);
+
+- comedi_handle_events(dev, s_ao);
++ if (s_ao)
++ comedi_handle_events(dev, s_ao);
+ comedi_handle_events(dev, s_ai);
+
+ return IRQ_HANDLED;
+diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
+index 3be927f1d3a9..e15e33ed94ae 100644
+--- a/drivers/staging/comedi/drivers/dt282x.c
++++ b/drivers/staging/comedi/drivers/dt282x.c
+@@ -557,7 +557,8 @@ static irqreturn_t dt282x_interrupt(int irq, void *d)
+ }
+ #endif
+ comedi_handle_events(dev, s);
+- comedi_handle_events(dev, s_ao);
++ if (s_ao)
++ comedi_handle_events(dev, s_ao);
+
+ return IRQ_RETVAL(handled);
+ }
+diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+index e3c3e427309a..f73edaf6ce87 100644
+--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
++++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+@@ -1086,6 +1086,7 @@ static int port_switchdev_event(struct notifier_block *unused,
+ dev_hold(dev);
+ break;
+ default:
++ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
+index 03d919a94552..93763d40e3a1 100644
+--- a/drivers/staging/mt7621-pci/pci-mt7621.c
++++ b/drivers/staging/mt7621-pci/pci-mt7621.c
+@@ -40,7 +40,7 @@
+ /* MediaTek specific configuration registers */
+ #define PCIE_FTS_NUM 0x70c
+ #define PCIE_FTS_NUM_MASK GENMASK(15, 8)
+-#define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8)
++#define PCIE_FTS_NUM_L0(x) (((x) & 0xff) << 8)
+
+ /* rt_sysc_membase relative registers */
+ #define RALINK_PCIE_CLK_GEN 0x7c
+diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+index a7230c0c7b23..8f5a8ac1b010 100644
+--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
++++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+@@ -124,10 +124,91 @@ static inline void handle_group_key(struct ieee_param *param,
+ }
+ }
+
+-static noinline_for_stack char *translate_scan(struct _adapter *padapter,
+- struct iw_request_info *info,
+- struct wlan_network *pnetwork,
+- char *start, char *stop)
++static noinline_for_stack char *translate_scan_wpa(struct iw_request_info *info,
++ struct wlan_network *pnetwork,
++ struct iw_event *iwe,
++ char *start, char *stop)
++{
++ /* parsing WPA/WPA2 IE */
++ u8 buf[MAX_WPA_IE_LEN];
++ u8 wpa_ie[255], rsn_ie[255];
++ u16 wpa_len = 0, rsn_len = 0;
++ int n, i;
++
++ r8712_get_sec_ie(pnetwork->network.IEs,
++ pnetwork->network.IELength, rsn_ie, &rsn_len,
++ wpa_ie, &wpa_len);
++ if (wpa_len > 0) {
++ memset(buf, 0, MAX_WPA_IE_LEN);
++ n = sprintf(buf, "wpa_ie=");
++ for (i = 0; i < wpa_len; i++) {
++ n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
++ "%02x", wpa_ie[i]);
++ if (n >= MAX_WPA_IE_LEN)
++ break;
++ }
++ memset(iwe, 0, sizeof(*iwe));
++ iwe->cmd = IWEVCUSTOM;
++ iwe->u.data.length = (u16)strlen(buf);
++ start = iwe_stream_add_point(info, start, stop,
++ iwe, buf);
++ memset(iwe, 0, sizeof(*iwe));
++ iwe->cmd = IWEVGENIE;
++ iwe->u.data.length = (u16)wpa_len;
++ start = iwe_stream_add_point(info, start, stop,
++ iwe, wpa_ie);
++ }
++ if (rsn_len > 0) {
++ memset(buf, 0, MAX_WPA_IE_LEN);
++ n = sprintf(buf, "rsn_ie=");
++ for (i = 0; i < rsn_len; i++) {
++ n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
++ "%02x", rsn_ie[i]);
++ if (n >= MAX_WPA_IE_LEN)
++ break;
++ }
++ memset(iwe, 0, sizeof(*iwe));
++ iwe->cmd = IWEVCUSTOM;
++ iwe->u.data.length = strlen(buf);
++ start = iwe_stream_add_point(info, start, stop,
++ iwe, buf);
++ memset(iwe, 0, sizeof(*iwe));
++ iwe->cmd = IWEVGENIE;
++ iwe->u.data.length = rsn_len;
++ start = iwe_stream_add_point(info, start, stop, iwe,
++ rsn_ie);
++ }
++
++ return start;
++}
++
++static noinline_for_stack char *translate_scan_wps(struct iw_request_info *info,
++ struct wlan_network *pnetwork,
++ struct iw_event *iwe,
++ char *start, char *stop)
++{
++ /* parsing WPS IE */
++ u8 wps_ie[512];
++ uint wps_ielen;
++
++ if (r8712_get_wps_ie(pnetwork->network.IEs,
++ pnetwork->network.IELength,
++ wps_ie, &wps_ielen)) {
++ if (wps_ielen > 2) {
++ iwe->cmd = IWEVGENIE;
++ iwe->u.data.length = (u16)wps_ielen;
++ start = iwe_stream_add_point(info, start, stop,
++ iwe, wps_ie);
++ }
++ }
++
++ return start;
++}
++
++static char *translate_scan(struct _adapter *padapter,
++ struct iw_request_info *info,
++ struct wlan_network *pnetwork,
++ char *start, char *stop)
+ {
+ struct iw_event iwe;
+ struct ieee80211_ht_cap *pht_capie;
+@@ -240,73 +321,11 @@ static noinline_for_stack char *translate_scan(struct _adapter *padapter,
+ /* Check if we added any event */
+ if ((current_val - start) > iwe_stream_lcp_len(info))
+ start = current_val;
+- /* parsing WPA/WPA2 IE */
+- {
+- u8 buf[MAX_WPA_IE_LEN];
+- u8 wpa_ie[255], rsn_ie[255];
+- u16 wpa_len = 0, rsn_len = 0;
+- int n;
+-
+- r8712_get_sec_ie(pnetwork->network.IEs,
+- pnetwork->network.IELength, rsn_ie, &rsn_len,
+- wpa_ie, &wpa_len);
+- if (wpa_len > 0) {
+- memset(buf, 0, MAX_WPA_IE_LEN);
+- n = sprintf(buf, "wpa_ie=");
+- for (i = 0; i < wpa_len; i++) {
+- n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
+- "%02x", wpa_ie[i]);
+- if (n >= MAX_WPA_IE_LEN)
+- break;
+- }
+- memset(&iwe, 0, sizeof(iwe));
+- iwe.cmd = IWEVCUSTOM;
+- iwe.u.data.length = (u16)strlen(buf);
+- start = iwe_stream_add_point(info, start, stop,
+- &iwe, buf);
+- memset(&iwe, 0, sizeof(iwe));
+- iwe.cmd = IWEVGENIE;
+- iwe.u.data.length = (u16)wpa_len;
+- start = iwe_stream_add_point(info, start, stop,
+- &iwe, wpa_ie);
+- }
+- if (rsn_len > 0) {
+- memset(buf, 0, MAX_WPA_IE_LEN);
+- n = sprintf(buf, "rsn_ie=");
+- for (i = 0; i < rsn_len; i++) {
+- n += snprintf(buf + n, MAX_WPA_IE_LEN - n,
+- "%02x", rsn_ie[i]);
+- if (n >= MAX_WPA_IE_LEN)
+- break;
+- }
+- memset(&iwe, 0, sizeof(iwe));
+- iwe.cmd = IWEVCUSTOM;
+- iwe.u.data.length = strlen(buf);
+- start = iwe_stream_add_point(info, start, stop,
+- &iwe, buf);
+- memset(&iwe, 0, sizeof(iwe));
+- iwe.cmd = IWEVGENIE;
+- iwe.u.data.length = rsn_len;
+- start = iwe_stream_add_point(info, start, stop, &iwe,
+- rsn_ie);
+- }
+- }
+
+- { /* parsing WPS IE */
+- u8 wps_ie[512];
+- uint wps_ielen;
++ start = translate_scan_wpa(info, pnetwork, &iwe, start, stop);
++
++ start = translate_scan_wps(info, pnetwork, &iwe, start, stop);
+
+- if (r8712_get_wps_ie(pnetwork->network.IEs,
+- pnetwork->network.IELength,
+- wps_ie, &wps_ielen)) {
+- if (wps_ielen > 2) {
+- iwe.cmd = IWEVGENIE;
+- iwe.u.data.length = (u16)wps_ielen;
+- start = iwe_stream_add_point(info, start, stop,
+- &iwe, wps_ie);
+- }
+- }
+- }
+ /* Add quality statistics */
+ iwe.cmd = IWEVQUAL;
+ rssi = r8712_signal_scale_mapping(pnetwork->network.Rssi);
+diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+index 68f08dc18da9..5e9187edeef4 100644
+--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
++++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+@@ -336,16 +336,13 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
+ return;
+ } else if (length == 0) {
+ /* stream ended */
+- if (buf) {
+- /* this should only ever happen if the port is
+- * disabled and there are buffers still queued
++ if (dev->capture.frame_count) {
++ /* empty buffer whilst capturing - expected to be an
++ * EOS, so grab another frame
+ */
+- vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+- pr_debug("Empty buffer");
+- } else if (dev->capture.frame_count) {
+- /* grab another frame */
+ if (is_capturing(dev)) {
+- pr_debug("Grab another frame");
++ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
++ "Grab another frame");
+ vchiq_mmal_port_parameter_set(
+ instance,
+ dev->capture.camera_port,
+@@ -353,8 +350,14 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
+ &dev->capture.frame_count,
+ sizeof(dev->capture.frame_count));
+ }
++ if (vchiq_mmal_submit_buffer(instance, port, buf))
++ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
++ "Failed to return EOS buffer");
+ } else {
+- /* signal frame completion */
++ /* stopping streaming.
++ * return buffer, and signal frame completion
++ */
++ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ complete(&dev->capture.frame_cmplt);
+ }
+ } else {
+@@ -576,6 +579,7 @@ static void stop_streaming(struct vb2_queue *vq)
+ int ret;
+ unsigned long timeout;
+ struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
++ struct vchiq_mmal_port *port = dev->capture.port;
+
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
+ __func__, dev);
+@@ -599,12 +603,6 @@ static void stop_streaming(struct vb2_queue *vq)
+ &dev->capture.frame_count,
+ sizeof(dev->capture.frame_count));
+
+- /* wait for last frame to complete */
+- timeout = wait_for_completion_timeout(&dev->capture.frame_cmplt, HZ);
+- if (timeout == 0)
+- v4l2_err(&dev->v4l2_dev,
+- "timed out waiting for frame completion\n");
+-
+ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
+ "disabling connection\n");
+
+@@ -619,6 +617,21 @@ static void stop_streaming(struct vb2_queue *vq)
+ ret);
+ }
+
++ /* wait for all buffers to be returned */
++ while (atomic_read(&port->buffers_with_vpu)) {
++ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
++ "%s: Waiting for buffers to be returned - %d outstanding\n",
++ __func__, atomic_read(&port->buffers_with_vpu));
++ timeout = wait_for_completion_timeout(&dev->capture.frame_cmplt,
++ HZ);
++ if (timeout == 0) {
++ v4l2_err(&dev->v4l2_dev, "%s: Timeout waiting for buffers to be returned - %d outstanding\n",
++ __func__,
++ atomic_read(&port->buffers_with_vpu));
++ break;
++ }
++ }
++
+ if (disable_camera(dev) < 0)
+ v4l2_err(&dev->v4l2_dev, "Failed to disable camera\n");
+ }
+diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
+index dade79738a29..12ac3ef61fe6 100644
+--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
++++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
+@@ -603,15 +603,28 @@ static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev,
+ struct v4l2_ctrl *ctrl,
+ const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ {
++ int ret;
+ struct vchiq_mmal_port *encoder_out;
+
+ dev->capture.encode_bitrate = ctrl->val;
+
+ encoder_out = &dev->component[MMAL_COMPONENT_VIDEO_ENCODE]->output[0];
+
+- return vchiq_mmal_port_parameter_set(dev->instance, encoder_out,
+- mmal_ctrl->mmal_id, &ctrl->val,
+- sizeof(ctrl->val));
++ ret = vchiq_mmal_port_parameter_set(dev->instance, encoder_out,
++ mmal_ctrl->mmal_id, &ctrl->val,
++ sizeof(ctrl->val));
++
++ v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
++ "%s: After: mmal_ctrl:%p ctrl id:0x%x ctrl val:%d ret %d(%d)\n",
++ __func__, mmal_ctrl, ctrl->id, ctrl->val, ret,
++ (ret == 0 ? 0 : -EINVAL));
++
++ /*
++ * Older firmware versions (pre July 2019) have a bug in handling
++ * MMAL_PARAMETER_VIDEO_BIT_RATE that result in the call
++ * returning -MMAL_MSG_STATUS_EINVAL. So ignore errors from this call.
++ */
++ return 0;
+ }
+
+ static int ctrl_set_bitrate_mode(struct bm2835_mmal_dev *dev,
+diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+index 16af735af5c3..29761f6c3b55 100644
+--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
++++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+@@ -161,7 +161,8 @@ struct vchiq_mmal_instance {
+ void *bulk_scratch;
+
+ struct idr context_map;
+- spinlock_t context_map_lock;
++ /* protect accesses to context_map */
++ struct mutex context_map_lock;
+
+ /* component to use next */
+ int component_idx;
+@@ -184,10 +185,10 @@ get_msg_context(struct vchiq_mmal_instance *instance)
+ * that when we service the VCHI reply, we can look up what
+ * message is being replied to.
+ */
+- spin_lock(&instance->context_map_lock);
++ mutex_lock(&instance->context_map_lock);
+ handle = idr_alloc(&instance->context_map, msg_context,
+ 0, 0, GFP_KERNEL);
+- spin_unlock(&instance->context_map_lock);
++ mutex_unlock(&instance->context_map_lock);
+
+ if (handle < 0) {
+ kfree(msg_context);
+@@ -211,9 +212,9 @@ release_msg_context(struct mmal_msg_context *msg_context)
+ {
+ struct vchiq_mmal_instance *instance = msg_context->instance;
+
+- spin_lock(&instance->context_map_lock);
++ mutex_lock(&instance->context_map_lock);
+ idr_remove(&instance->context_map, msg_context->handle);
+- spin_unlock(&instance->context_map_lock);
++ mutex_unlock(&instance->context_map_lock);
+ kfree(msg_context);
+ }
+
+@@ -239,6 +240,8 @@ static void buffer_work_cb(struct work_struct *work)
+ struct mmal_msg_context *msg_context =
+ container_of(work, struct mmal_msg_context, u.bulk.work);
+
++ atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
++
+ msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
+ msg_context->u.bulk.port,
+ msg_context->u.bulk.status,
+@@ -287,8 +290,6 @@ static int bulk_receive(struct vchiq_mmal_instance *instance,
+
+ /* store length */
+ msg_context->u.bulk.buffer_used = rd_len;
+- msg_context->u.bulk.mmal_flags =
+- msg->u.buffer_from_host.buffer_header.flags;
+ msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
+ msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
+
+@@ -379,6 +380,8 @@ buffer_from_host(struct vchiq_mmal_instance *instance,
+ /* initialise work structure ready to schedule callback */
+ INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
+
++ atomic_inc(&port->buffers_with_vpu);
++
+ /* prep the buffer from host message */
+ memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
+
+@@ -447,6 +450,9 @@ static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
+ return;
+ }
+
++ msg_context->u.bulk.mmal_flags =
++ msg->u.buffer_from_host.buffer_header.flags;
++
+ if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
+ /* message reception had an error */
+ pr_warn("error %d in reply\n", msg->h.status);
+@@ -1323,16 +1329,6 @@ static int port_enable(struct vchiq_mmal_instance *instance,
+ if (port->enabled)
+ return 0;
+
+- /* ensure there are enough buffers queued to cover the buffer headers */
+- if (port->buffer_cb) {
+- hdr_count = 0;
+- list_for_each(buf_head, &port->buffers) {
+- hdr_count++;
+- }
+- if (hdr_count < port->current_buffer.num)
+- return -ENOSPC;
+- }
+-
+ ret = port_action_port(instance, port,
+ MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
+ if (ret)
+@@ -1849,7 +1845,7 @@ int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
+
+ instance->bulk_scratch = vmalloc(PAGE_SIZE);
+
+- spin_lock_init(&instance->context_map_lock);
++ mutex_init(&instance->context_map_lock);
+ idr_init_base(&instance->context_map, 1);
+
+ params.callback_param = instance;
+diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
+index 22b839ecd5f0..b0ee1716525b 100644
+--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
++++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
+@@ -71,6 +71,9 @@ struct vchiq_mmal_port {
+ struct list_head buffers;
+ /* lock to serialise adding and removing buffers from list */
+ spinlock_t slock;
++
++ /* Count of buffers the VPU has yet to return */
++ atomic_t buffers_with_vpu;
+ /* callback on buffer completion */
+ vchiq_mmal_buffer_cb buffer_cb;
+ /* callback context */
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+index c557c9953724..aa20fcaefa9d 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+@@ -523,7 +523,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
+ (g_cache_line_size - 1)))) {
+ char *fragments;
+
+- if (down_killable(&g_free_fragments_sema)) {
++ if (down_interruptible(&g_free_fragments_sema) != 0) {
+ cleanup_pagelistinfo(pagelistinfo);
+ return NULL;
+ }
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+index ab7d6a0ce94c..62d8f599e765 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+@@ -532,7 +532,8 @@ add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
+ vchiq_log_trace(vchiq_arm_log_level,
+ "%s - completion queue full", __func__);
+ DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
+- if (wait_for_completion_killable(&instance->remove_event)) {
++ if (wait_for_completion_interruptible(
++ &instance->remove_event)) {
+ vchiq_log_info(vchiq_arm_log_level,
+ "service_callback interrupted");
+ return VCHIQ_RETRY;
+@@ -643,7 +644,7 @@ service_callback(VCHIQ_REASON_T reason, struct vchiq_header *header,
+ }
+
+ DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+- if (wait_for_completion_killable(
++ if (wait_for_completion_interruptible(
+ &user_service->remove_event)
+ != 0) {
+ vchiq_log_info(vchiq_arm_log_level,
+@@ -978,7 +979,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ has been closed until the client library calls the
+ CLOSE_DELIVERED ioctl, signalling close_event. */
+ if (user_service->close_pending &&
+- wait_for_completion_killable(
++ wait_for_completion_interruptible(
+ &user_service->close_event))
+ status = VCHIQ_RETRY;
+ break;
+@@ -1154,7 +1155,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+
+ DEBUG_TRACE(AWAIT_COMPLETION_LINE);
+ mutex_unlock(&instance->completion_mutex);
+- rc = wait_for_completion_killable(
++ rc = wait_for_completion_interruptible(
+ &instance->insert_event);
+ mutex_lock(&instance->completion_mutex);
+ if (rc != 0) {
+@@ -1324,7 +1325,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ do {
+ spin_unlock(&msg_queue_spinlock);
+ DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
+- if (wait_for_completion_killable(
++ if (wait_for_completion_interruptible(
+ &user_service->insert_event)) {
+ vchiq_log_info(vchiq_arm_log_level,
+ "DEQUEUE_MESSAGE interrupted");
+@@ -2328,7 +2329,7 @@ vchiq_keepalive_thread_func(void *v)
+ while (1) {
+ long rc = 0, uc = 0;
+
+- if (wait_for_completion_killable(&arm_state->ka_evt)
++ if (wait_for_completion_interruptible(&arm_state->ka_evt)
+ != 0) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s interrupted", __func__);
+@@ -2579,7 +2580,7 @@ block_resume(struct vchiq_arm_state *arm_state)
+ write_unlock_bh(&arm_state->susp_res_lock);
+ vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
+ "blocked clients", __func__);
+- if (wait_for_completion_killable_timeout(
++ if (wait_for_completion_interruptible_timeout(
+ &arm_state->blocked_blocker, timeout_val)
+ <= 0) {
+ vchiq_log_error(vchiq_susp_log_level, "%s wait for "
+@@ -2605,7 +2606,7 @@ block_resume(struct vchiq_arm_state *arm_state)
+ write_unlock_bh(&arm_state->susp_res_lock);
+ vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
+ __func__);
+- if (wait_for_completion_killable_timeout(
++ if (wait_for_completion_interruptible_timeout(
+ &arm_state->vc_resume_complete, timeout_val)
+ <= 0) {
+ vchiq_log_error(vchiq_susp_log_level, "%s wait for "
+@@ -2812,7 +2813,7 @@ vchiq_arm_force_suspend(struct vchiq_state *state)
+ do {
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+- rc = wait_for_completion_killable_timeout(
++ rc = wait_for_completion_interruptible_timeout(
+ &arm_state->vc_suspend_complete,
+ msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
+
+@@ -2908,7 +2909,7 @@ vchiq_arm_allow_resume(struct vchiq_state *state)
+ write_unlock_bh(&arm_state->susp_res_lock);
+
+ if (resume) {
+- if (wait_for_completion_killable(
++ if (wait_for_completion_interruptible(
+ &arm_state->vc_resume_complete) < 0) {
+ vchiq_log_error(vchiq_susp_log_level,
+ "%s interrupted", __func__);
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+index 0c387b6473a5..44bfa890e0e5 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+@@ -395,13 +395,21 @@ remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
+ init_waitqueue_head(wq);
+ }
+
++/*
++ * All the event waiting routines in VCHIQ used a custom semaphore
++ * implementation that filtered most signals. This achieved a behaviour similar
++ * to the "killable" family of functions. While cleaning up this code all the
++ * routines where switched to the "interruptible" family of functions, as the
++ * former was deemed unjustified and the use "killable" set all VCHIQ's
++ * threads in D state.
++ */
+ static inline int
+ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
+ {
+ if (!event->fired) {
+ event->armed = 1;
+ dsb(sy);
+- if (wait_event_killable(*wq, event->fired)) {
++ if (wait_event_interruptible(*wq, event->fired)) {
+ event->armed = 0;
+ return 0;
+ }
+@@ -560,7 +568,7 @@ reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
+ remote_event_signal(&state->remote->trigger);
+
+ if (!is_blocking ||
+- (wait_for_completion_killable(
++ (wait_for_completion_interruptible(
+ &state->slot_available_event)))
+ return NULL; /* No space available */
+ }
+@@ -830,7 +838,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
+ spin_unlock("a_spinlock);
+ mutex_unlock(&state->slot_mutex);
+
+- if (wait_for_completion_killable(
++ if (wait_for_completion_interruptible(
+ &state->data_quota_event))
+ return VCHIQ_RETRY;
+
+@@ -861,7 +869,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
+ service_quota->slot_use_count);
+ VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
+ mutex_unlock(&state->slot_mutex);
+- if (wait_for_completion_killable(
++ if (wait_for_completion_interruptible(
+ &service_quota->quota_event))
+ return VCHIQ_RETRY;
+ if (service->closing)
+@@ -1710,7 +1718,8 @@ parse_rx_slots(struct vchiq_state *state)
+ &service->bulk_rx : &service->bulk_tx;
+
+ DEBUG_TRACE(PARSE_LINE);
+- if (mutex_lock_killable(&service->bulk_mutex)) {
++ if (mutex_lock_killable(
++ &service->bulk_mutex) != 0) {
+ DEBUG_TRACE(PARSE_LINE);
+ goto bail_not_ready;
+ }
+@@ -2428,7 +2437,7 @@ vchiq_open_service_internal(struct vchiq_service *service, int client_id)
+ QMFLAGS_IS_BLOCKING);
+ if (status == VCHIQ_SUCCESS) {
+ /* Wait for the ACK/NAK */
+- if (wait_for_completion_killable(&service->remove_event)) {
++ if (wait_for_completion_interruptible(&service->remove_event)) {
+ status = VCHIQ_RETRY;
+ vchiq_release_service_internal(service);
+ } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
+@@ -2795,7 +2804,7 @@ vchiq_connect_internal(struct vchiq_state *state, VCHIQ_INSTANCE_T instance)
+ }
+
+ if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
+- if (wait_for_completion_killable(&state->connect))
++ if (wait_for_completion_interruptible(&state->connect))
+ return VCHIQ_RETRY;
+
+ vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
+@@ -2894,7 +2903,7 @@ vchiq_close_service(VCHIQ_SERVICE_HANDLE_T handle)
+ }
+
+ while (1) {
+- if (wait_for_completion_killable(&service->remove_event)) {
++ if (wait_for_completion_interruptible(&service->remove_event)) {
+ status = VCHIQ_RETRY;
+ break;
+ }
+@@ -2955,7 +2964,7 @@ vchiq_remove_service(VCHIQ_SERVICE_HANDLE_T handle)
+ request_poll(service->state, service, VCHIQ_POLL_REMOVE);
+ }
+ while (1) {
+- if (wait_for_completion_killable(&service->remove_event)) {
++ if (wait_for_completion_interruptible(&service->remove_event)) {
+ status = VCHIQ_RETRY;
+ break;
+ }
+@@ -3038,7 +3047,7 @@ VCHIQ_STATUS_T vchiq_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,
+ VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
+ do {
+ mutex_unlock(&service->bulk_mutex);
+- if (wait_for_completion_killable(
++ if (wait_for_completion_interruptible(
+ &service->bulk_remove_event)) {
+ status = VCHIQ_RETRY;
+ goto error_exit;
+@@ -3115,7 +3124,7 @@ waiting:
+
+ if (bulk_waiter) {
+ bulk_waiter->bulk = bulk;
+- if (wait_for_completion_killable(&bulk_waiter->event))
++ if (wait_for_completion_interruptible(&bulk_waiter->event))
+ status = VCHIQ_RETRY;
+ else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
+ status = VCHIQ_ERROR;
+diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
+index 6c519d8e48cb..8ee85c5e6f77 100644
+--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c
+@@ -50,7 +50,7 @@ void vchiu_queue_push(struct vchiu_queue *queue, struct vchiq_header *header)
+ return;
+
+ while (queue->write == queue->read + queue->size) {
+- if (wait_for_completion_killable(&queue->pop))
++ if (wait_for_completion_interruptible(&queue->pop))
+ flush_signals(current);
+ }
+
+@@ -63,7 +63,7 @@ void vchiu_queue_push(struct vchiu_queue *queue, struct vchiq_header *header)
+ struct vchiq_header *vchiu_queue_peek(struct vchiu_queue *queue)
+ {
+ while (queue->write == queue->read) {
+- if (wait_for_completion_killable(&queue->push))
++ if (wait_for_completion_interruptible(&queue->push))
+ flush_signals(current);
+ }
+
+@@ -77,7 +77,7 @@ struct vchiq_header *vchiu_queue_pop(struct vchiu_queue *queue)
+ struct vchiq_header *header;
+
+ while (queue->write == queue->read) {
+- if (wait_for_completion_killable(&queue->push))
++ if (wait_for_completion_interruptible(&queue->push))
+ flush_signals(current);
+ }
+
+diff --git a/drivers/staging/wilc1000/wilc_netdev.c b/drivers/staging/wilc1000/wilc_netdev.c
+index ba78c08a17f1..5338d7d2b248 100644
+--- a/drivers/staging/wilc1000/wilc_netdev.c
++++ b/drivers/staging/wilc1000/wilc_netdev.c
+@@ -530,17 +530,17 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
+ goto fail_locks;
+ }
+
+- if (wl->gpio_irq && init_irq(dev)) {
+- ret = -EIO;
+- goto fail_locks;
+- }
+-
+ ret = wlan_initialize_threads(dev);
+ if (ret < 0) {
+ ret = -EIO;
+ goto fail_wilc_wlan;
+ }
+
++ if (wl->gpio_irq && init_irq(dev)) {
++ ret = -EIO;
++ goto fail_threads;
++ }
++
+ if (!wl->dev_irq_num &&
+ wl->hif_func->enable_interrupt &&
+ wl->hif_func->enable_interrupt(wl)) {
+@@ -596,7 +596,7 @@ fail_irq_enable:
+ fail_irq_init:
+ if (wl->dev_irq_num)
+ deinit_irq(dev);
+-
++fail_threads:
+ wlan_deinitialize_threads(dev);
+ fail_wilc_wlan:
+ wilc_wlan_cleanup(dev);
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index d2f3310abe54..682300713be4 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1869,8 +1869,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+
+ status = serial_port_in(port, UART_LSR);
+
+- if (status & (UART_LSR_DR | UART_LSR_BI) &&
+- iir & UART_IIR_RDI) {
++ if (status & (UART_LSR_DR | UART_LSR_BI)) {
+ if (!up->dma || handle_rx_dma(up, iir))
+ status = serial8250_rx_chars(up, status);
+ }
+diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
+index 8b499d643461..8e41d70fd298 100644
+--- a/drivers/usb/dwc2/core.c
++++ b/drivers/usb/dwc2/core.c
+@@ -531,7 +531,7 @@ int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait)
+ }
+
+ /* Wait for AHB master IDLE state */
+- if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, GRSTCTL_AHBIDLE, 50)) {
++ if (dwc2_hsotg_wait_bit_set(hsotg, GRSTCTL, GRSTCTL_AHBIDLE, 10000)) {
+ dev_warn(hsotg->dev, "%s: HANG! AHB Idle timeout GRSTCTL GRSTCTL_AHBIDLE\n",
+ __func__);
+ return -EBUSY;
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 47be961f1bf3..c7ed90084d1a 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -997,7 +997,6 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
+ * earlier
+ */
+ gadget = epfile->ffs->gadget;
+- io_data->use_sg = gadget->sg_supported && data_len > PAGE_SIZE;
+
+ spin_lock_irq(&epfile->ffs->eps_lock);
+ /* In the meantime, endpoint got disabled or changed. */
+@@ -1012,6 +1011,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
+ */
+ if (io_data->read)
+ data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
++
++ io_data->use_sg = gadget->sg_supported && data_len > PAGE_SIZE;
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+
+ data = ffs_alloc_buffer(io_data, data_len);
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index 737bd77a575d..2929bb47a618 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -186,11 +186,12 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+ out = dev->port_usb->out_ep;
+ else
+ out = NULL;
+- spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (!out)
++ {
++ spin_unlock_irqrestore(&dev->lock, flags);
+ return -ENOTCONN;
+-
++ }
+
+ /* Padding up to RX_EXTRA handles minor disagreements with host.
+ * Normally we use the USB "terminate on short read" convention;
+@@ -214,6 +215,7 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+
+ if (dev->port_usb->is_fixed)
+ size = max_t(size_t, size, dev->port_usb->fixed_out_len);
++ spin_unlock_irqrestore(&dev->lock, flags);
+
+ skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
+ if (skb == NULL) {
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index 39fa2fc1b8b7..6036cbae8c78 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -802,9 +802,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
+ }
+
+ static void usbhsf_dma_complete(void *arg);
+-static void xfer_work(struct work_struct *work)
++static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
+ {
+- struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
+ struct usbhs_pipe *pipe = pkt->pipe;
+ struct usbhs_fifo *fifo;
+ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+@@ -812,12 +811,10 @@ static void xfer_work(struct work_struct *work)
+ struct dma_chan *chan;
+ struct device *dev = usbhs_priv_to_dev(priv);
+ enum dma_transfer_direction dir;
+- unsigned long flags;
+
+- usbhs_lock(priv, flags);
+ fifo = usbhs_pipe_to_fifo(pipe);
+ if (!fifo)
+- goto xfer_work_end;
++ return;
+
+ chan = usbhsf_dma_chan_get(fifo, pkt);
+ dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+@@ -826,7 +823,7 @@ static void xfer_work(struct work_struct *work)
+ pkt->trans, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc)
+- goto xfer_work_end;
++ return;
+
+ desc->callback = usbhsf_dma_complete;
+ desc->callback_param = pipe;
+@@ -834,7 +831,7 @@ static void xfer_work(struct work_struct *work)
+ pkt->cookie = dmaengine_submit(desc);
+ if (pkt->cookie < 0) {
+ dev_err(dev, "Failed to submit dma descriptor\n");
+- goto xfer_work_end;
++ return;
+ }
+
+ dev_dbg(dev, " %s %d (%d/ %d)\n",
+@@ -845,8 +842,17 @@ static void xfer_work(struct work_struct *work)
+ dma_async_issue_pending(chan);
+ usbhsf_dma_start(pipe, fifo);
+ usbhs_pipe_enable(pipe);
++}
++
++static void xfer_work(struct work_struct *work)
++{
++ struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
++ struct usbhs_pipe *pipe = pkt->pipe;
++ struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
++ unsigned long flags;
+
+-xfer_work_end:
++ usbhs_lock(priv, flags);
++ usbhsf_dma_xfer_preparing(pkt);
+ usbhs_unlock(priv, flags);
+ }
+
+@@ -899,8 +905,13 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
+ pkt->trans = len;
+
+ usbhsf_tx_irq_ctrl(pipe, 0);
+- INIT_WORK(&pkt->work, xfer_work);
+- schedule_work(&pkt->work);
++ /* FIXME: Workaound for usb dmac that driver can be used in atomic */
++ if (usbhs_get_dparam(priv, has_usb_dmac)) {
++ usbhsf_dma_xfer_preparing(pkt);
++ } else {
++ INIT_WORK(&pkt->work, xfer_work);
++ schedule_work(&pkt->work);
++ }
+
+ return 0;
+
+@@ -1006,8 +1017,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
+
+ pkt->trans = pkt->length;
+
+- INIT_WORK(&pkt->work, xfer_work);
+- schedule_work(&pkt->work);
++ usbhsf_dma_xfer_preparing(pkt);
+
+ return 0;
+
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 1d8461ae2c34..23669a584bae 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1029,6 +1029,7 @@ static const struct usb_device_id id_table_combined[] = {
+ { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
+ /* EZPrototypes devices */
+ { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
++ { USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) },
+ { } /* Terminating entry */
+ };
+
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 5755f0df0025..f12d806220b4 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -1543,3 +1543,9 @@
+ #define CHETCO_SEASMART_DISPLAY_PID 0xA5AD /* SeaSmart NMEA2000 Display */
+ #define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */
+ #define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */
++
++/*
++ * Unjo AB
++ */
++#define UNJO_VID 0x22B7
++#define UNJO_ISODEBUG_V1_PID 0x150D
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index a0aaf0635359..c1582fbd1150 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1343,6 +1343,7 @@ static const struct usb_device_id option_ids[] = {
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0601, 0xff) }, /* GosunCn ZTE WeLink ME3630 (RNDIS mode) */
+ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) }, /* GosunCn ZTE WeLink ME3630 (MBIM mode) */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(4) },
+diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
+index c674abe3cf99..a38d1409f15b 100644
+--- a/drivers/usb/typec/tps6598x.c
++++ b/drivers/usb/typec/tps6598x.c
+@@ -41,7 +41,7 @@
+ #define TPS_STATUS_VCONN(s) (!!((s) & BIT(7)))
+
+ /* TPS_REG_SYSTEM_CONF bits */
+-#define TPS_SYSCONF_PORTINFO(c) ((c) & 3)
++#define TPS_SYSCONF_PORTINFO(c) ((c) & 7)
+
+ enum {
+ TPS_PORTINFO_SINK,
+@@ -127,7 +127,7 @@ tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
+ }
+
+ static int tps6598x_block_write(struct tps6598x *tps, u8 reg,
+- void *val, size_t len)
++ const void *val, size_t len)
+ {
+ u8 data[TPS_MAX_LEN + 1];
+
+@@ -173,7 +173,7 @@ static inline int tps6598x_write64(struct tps6598x *tps, u8 reg, u64 val)
+ static inline int
+ tps6598x_write_4cc(struct tps6598x *tps, u8 reg, const char *val)
+ {
+- return tps6598x_block_write(tps, reg, &val, sizeof(u32));
++ return tps6598x_block_write(tps, reg, val, 4);
+ }
+
+ static int tps6598x_read_partner_identity(struct tps6598x *tps)
+diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
+index d536889ac31b..4941fe8471ce 100644
+--- a/fs/crypto/policy.c
++++ b/fs/crypto/policy.c
+@@ -81,6 +81,8 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg)
+ if (ret == -ENODATA) {
+ if (!S_ISDIR(inode->i_mode))
+ ret = -ENOTDIR;
++ else if (IS_DEADDIR(inode))
++ ret = -ENOENT;
+ else if (!inode->i_sb->s_cop->empty_dir(inode))
+ ret = -ENOTEMPTY;
+ else
+diff --git a/fs/iomap.c b/fs/iomap.c
+index 12654c2e78f8..da961fca3180 100644
+--- a/fs/iomap.c
++++ b/fs/iomap.c
+@@ -333,7 +333,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ if (iop)
+ atomic_inc(&iop->read_count);
+
+- if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
++ if (!ctx->bio || !is_contig || bio_full(ctx->bio, plen)) {
+ gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+ int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index e7276932e433..9bb18311a22f 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -470,13 +470,15 @@ static struct buffer_head *udf_getblk(struct inode *inode, udf_pblk_t block,
+ return NULL;
+ }
+
+-/* Extend the file by 'blocks' blocks, return the number of extents added */
++/* Extend the file with new blocks totaling 'new_block_bytes',
++ * return the number of extents added
++ */
+ static int udf_do_extend_file(struct inode *inode,
+ struct extent_position *last_pos,
+ struct kernel_long_ad *last_ext,
+- sector_t blocks)
++ loff_t new_block_bytes)
+ {
+- sector_t add;
++ uint32_t add;
+ int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
+ struct super_block *sb = inode->i_sb;
+ struct kernel_lb_addr prealloc_loc = {};
+@@ -486,7 +488,7 @@ static int udf_do_extend_file(struct inode *inode,
+
+ /* The previous extent is fake and we should not extend by anything
+ * - there's nothing to do... */
+- if (!blocks && fake)
++ if (!new_block_bytes && fake)
+ return 0;
+
+ iinfo = UDF_I(inode);
+@@ -517,13 +519,12 @@ static int udf_do_extend_file(struct inode *inode,
+ /* Can we merge with the previous extent? */
+ if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
+ EXT_NOT_RECORDED_NOT_ALLOCATED) {
+- add = ((1 << 30) - sb->s_blocksize -
+- (last_ext->extLength & UDF_EXTENT_LENGTH_MASK)) >>
+- sb->s_blocksize_bits;
+- if (add > blocks)
+- add = blocks;
+- blocks -= add;
+- last_ext->extLength += add << sb->s_blocksize_bits;
++ add = (1 << 30) - sb->s_blocksize -
++ (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
++ if (add > new_block_bytes)
++ add = new_block_bytes;
++ new_block_bytes -= add;
++ last_ext->extLength += add;
+ }
+
+ if (fake) {
+@@ -544,28 +545,27 @@ static int udf_do_extend_file(struct inode *inode,
+ }
+
+ /* Managed to do everything necessary? */
+- if (!blocks)
++ if (!new_block_bytes)
+ goto out;
+
+ /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
+ last_ext->extLocation.logicalBlockNum = 0;
+ last_ext->extLocation.partitionReferenceNum = 0;
+- add = (1 << (30-sb->s_blocksize_bits)) - 1;
+- last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
+- (add << sb->s_blocksize_bits);
++ add = (1 << 30) - sb->s_blocksize;
++ last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | add;
+
+ /* Create enough extents to cover the whole hole */
+- while (blocks > add) {
+- blocks -= add;
++ while (new_block_bytes > add) {
++ new_block_bytes -= add;
+ err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+ last_ext->extLength, 1);
+ if (err)
+ return err;
+ count++;
+ }
+- if (blocks) {
++ if (new_block_bytes) {
+ last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
+- (blocks << sb->s_blocksize_bits);
++ new_block_bytes;
+ err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+ last_ext->extLength, 1);
+ if (err)
+@@ -596,6 +596,24 @@ out:
+ return count;
+ }
+
++/* Extend the final block of the file to final_block_len bytes */
++static void udf_do_extend_final_block(struct inode *inode,
++ struct extent_position *last_pos,
++ struct kernel_long_ad *last_ext,
++ uint32_t final_block_len)
++{
++ struct super_block *sb = inode->i_sb;
++ uint32_t added_bytes;
++
++ added_bytes = final_block_len -
++ (last_ext->extLength & (sb->s_blocksize - 1));
++ last_ext->extLength += added_bytes;
++ UDF_I(inode)->i_lenExtents += added_bytes;
++
++ udf_write_aext(inode, last_pos, &last_ext->extLocation,
++ last_ext->extLength, 1);
++}
++
+ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ {
+
+@@ -605,10 +623,12 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ int8_t etype;
+ struct super_block *sb = inode->i_sb;
+ sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
++ unsigned long partial_final_block;
+ int adsize;
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ struct kernel_long_ad extent;
+- int err;
++ int err = 0;
++ int within_final_block;
+
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ adsize = sizeof(struct short_ad);
+@@ -618,18 +638,8 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ BUG();
+
+ etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
++ within_final_block = (etype != -1);
+
+- /* File has extent covering the new size (could happen when extending
+- * inside a block)? */
+- if (etype != -1)
+- return 0;
+- if (newsize & (sb->s_blocksize - 1))
+- offset++;
+- /* Extended file just to the boundary of the last file block? */
+- if (offset == 0)
+- return 0;
+-
+- /* Truncate is extending the file by 'offset' blocks */
+ if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
+ (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
+ /* File has no extents at all or has empty last
+@@ -643,7 +653,22 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ &extent.extLength, 0);
+ extent.extLength |= etype << 30;
+ }
+- err = udf_do_extend_file(inode, &epos, &extent, offset);
++
++ partial_final_block = newsize & (sb->s_blocksize - 1);
++
++ /* File has extent covering the new size (could happen when extending
++ * inside a block)?
++ */
++ if (within_final_block) {
++ /* Extending file within the last file block */
++ udf_do_extend_final_block(inode, &epos, &extent,
++ partial_final_block);
++ } else {
++ loff_t add = ((loff_t)offset << sb->s_blocksize_bits) |
++ partial_final_block;
++ err = udf_do_extend_file(inode, &epos, &extent, add);
++ }
++
+ if (err < 0)
+ goto out;
+ err = 0;
+@@ -745,6 +770,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ /* Are we beyond EOF? */
+ if (etype == -1) {
+ int ret;
++ loff_t hole_len;
+ isBeyondEOF = true;
+ if (count) {
+ if (c)
+@@ -760,7 +786,8 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
+ startnum = (offset > 0);
+ }
+ /* Create extents for the hole between EOF and offset */
+- ret = udf_do_extend_file(inode, &prev_epos, laarr, offset);
++ hole_len = (loff_t)offset << inode->i_blkbits;
++ ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len);
+ if (ret < 0) {
+ *err = ret;
+ newblock = 0;
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index 8da5e6637771..11f703d4a605 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -782,7 +782,7 @@ xfs_add_to_ioend(
+ atomic_inc(&iop->write_count);
+
+ if (!merged) {
+- if (bio_full(wpc->ioend->io_bio))
++ if (bio_full(wpc->ioend->io_bio, len))
+ xfs_chain_bio(wpc->ioend, wbc, bdev, sector);
+ bio_add_page(wpc->ioend->io_bio, page, len, poff);
+ }
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index f87abaa898f0..e36b8fc1b1c3 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -102,9 +102,23 @@ static inline void *bio_data(struct bio *bio)
+ return NULL;
+ }
+
+-static inline bool bio_full(struct bio *bio)
++/**
++ * bio_full - check if the bio is full
++ * @bio: bio to check
++ * @len: length of one segment to be added
++ *
++ * Return true if @bio is full and one segment with @len bytes can't be
++ * added to the bio, otherwise return false
++ */
++static inline bool bio_full(struct bio *bio, unsigned len)
+ {
+- return bio->bi_vcnt >= bio->bi_max_vecs;
++ if (bio->bi_vcnt >= bio->bi_max_vecs)
++ return true;
++
++ if (bio->bi_iter.bi_size > UINT_MAX - len)
++ return true;
++
++ return false;
+ }
+
+ static inline bool bio_next_segment(const struct bio *bio,
+diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
+index 77ac9c7b9483..762f793e92f6 100644
+--- a/include/linux/vmw_vmci_defs.h
++++ b/include/linux/vmw_vmci_defs.h
+@@ -62,9 +62,18 @@ enum {
+
+ /*
+ * A single VMCI device has an upper limit of 128MB on the amount of
+- * memory that can be used for queue pairs.
++ * memory that can be used for queue pairs. Since each queue pair
++ * consists of at least two pages, the memory limit also dictates the
++ * number of queue pairs a guest can create.
+ */
+ #define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
++#define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2)
++
++/*
++ * There can be at most PAGE_SIZE doorbells since there is one doorbell
++ * per byte in the doorbell bitmap page.
++ */
++#define VMCI_MAX_GUEST_DOORBELL_COUNT PAGE_SIZE
+
+ /*
+ * Queues with pre-mapped data pages must be small, so that we don't pin
+diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
+index ddc5396800aa..76b7c3f6cd0d 100644
+--- a/include/uapi/linux/usb/audio.h
++++ b/include/uapi/linux/usb/audio.h
+@@ -450,6 +450,43 @@ static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_desc
+ }
+ }
+
++/*
++ * Extension Unit (XU) has almost compatible layout with Processing Unit, but
++ * on UAC2, it has a different bmControls size (bControlSize); it's 1 byte for
++ * XU while 2 bytes for PU. The last iExtension field is a one-byte index as
++ * well as iProcessing field of PU.
++ */
++static inline __u8 uac_extension_unit_bControlSize(struct uac_processing_unit_descriptor *desc,
++ int protocol)
++{
++ switch (protocol) {
++ case UAC_VERSION_1:
++ return desc->baSourceID[desc->bNrInPins + 4];
++ case UAC_VERSION_2:
++ return 1; /* in UAC2, this value is constant */
++ case UAC_VERSION_3:
++ return 4; /* in UAC3, this value is constant */
++ default:
++ return 1;
++ }
++}
++
++static inline __u8 uac_extension_unit_iExtension(struct uac_processing_unit_descriptor *desc,
++ int protocol)
++{
++ __u8 control_size = uac_extension_unit_bControlSize(desc, protocol);
++
++ switch (protocol) {
++ case UAC_VERSION_1:
++ case UAC_VERSION_2:
++ default:
++ return *(uac_processing_unit_bmControls(desc, protocol)
++ + control_size);
++ case UAC_VERSION_3:
++ return 0; /* UAC3 does not have this field */
++ }
++}
++
+ /* 4.5.2 Class-Specific AS Interface Descriptor */
+ struct uac1_as_header_descriptor {
+ __u8 bLength; /* in bytes: 7 */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 6f3a35949cdd..f24a757f8239 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3255,6 +3255,7 @@ static void alc256_init(struct hda_codec *codec)
+ alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
+ alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit */
+ alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15);
++ alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
+ }
+
+ static void alc256_shutup(struct hda_codec *codec)
+@@ -7825,7 +7826,6 @@ static int patch_alc269(struct hda_codec *codec)
+ spec->shutup = alc256_shutup;
+ spec->init_hook = alc256_init;
+ spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
+- alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
+ break;
+ case 0x10ec0257:
+ spec->codec_variant = ALC269_TYPE_ALC257;
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index c703f8534b07..7498b5191b68 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -2303,7 +2303,7 @@ static struct procunit_info extunits[] = {
+ */
+ static int build_audio_procunit(struct mixer_build *state, int unitid,
+ void *raw_desc, struct procunit_info *list,
+- char *name)
++ bool extension_unit)
+ {
+ struct uac_processing_unit_descriptor *desc = raw_desc;
+ int num_ins;
+@@ -2320,6 +2320,8 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
+ static struct procunit_info default_info = {
+ 0, NULL, default_value_info
+ };
++ const char *name = extension_unit ?
++ "Extension Unit" : "Processing Unit";
+
+ if (desc->bLength < 13) {
+ usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
+@@ -2433,7 +2435,10 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
+ } else if (info->name) {
+ strlcpy(kctl->id.name, info->name, sizeof(kctl->id.name));
+ } else {
+- nameid = uac_processing_unit_iProcessing(desc, state->mixer->protocol);
++ if (extension_unit)
++ nameid = uac_extension_unit_iExtension(desc, state->mixer->protocol);
++ else
++ nameid = uac_processing_unit_iProcessing(desc, state->mixer->protocol);
+ len = 0;
+ if (nameid)
+ len = snd_usb_copy_string_desc(state->chip,
+@@ -2466,10 +2471,10 @@ static int parse_audio_processing_unit(struct mixer_build *state, int unitid,
+ case UAC_VERSION_2:
+ default:
+ return build_audio_procunit(state, unitid, raw_desc,
+- procunits, "Processing Unit");
++ procunits, false);
+ case UAC_VERSION_3:
+ return build_audio_procunit(state, unitid, raw_desc,
+- uac3_procunits, "Processing Unit");
++ uac3_procunits, false);
+ }
+ }
+
+@@ -2480,8 +2485,7 @@ static int parse_audio_extension_unit(struct mixer_build *state, int unitid,
+ * Note that we parse extension units with processing unit descriptors.
+ * That's ok as the layout is the same.
+ */
+- return build_audio_procunit(state, unitid, raw_desc,
+- extunits, "Extension Unit");
++ return build_audio_procunit(state, unitid, raw_desc, extunits, true);
+ }
+
+ /*
+diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt
+index 115eaacc455f..60d99e5e7921 100644
+--- a/tools/perf/Documentation/intel-pt.txt
++++ b/tools/perf/Documentation/intel-pt.txt
+@@ -88,16 +88,16 @@ smaller.
+
+ To represent software control flow, "branches" samples are produced. By default
+ a branch sample is synthesized for every single branch. To get an idea what
+-data is available you can use the 'perf script' tool with no parameters, which
+-will list all the samples.
++data is available you can use the 'perf script' tool with all itrace sampling
++options, which will list all the samples.
+
+ perf record -e intel_pt//u ls
+- perf script
++ perf script --itrace=ibxwpe
+
+ An interesting field that is not printed by default is 'flags' which can be
+ displayed as follows:
+
+- perf script -Fcomm,tid,pid,time,cpu,event,trace,ip,sym,dso,addr,symoff,flags
++ perf script --itrace=ibxwpe -F+flags
+
+ The flags are "bcrosyiABEx" which stand for branch, call, return, conditional,
+ system, asynchronous, interrupt, transaction abort, trace begin, trace end, and
+@@ -713,7 +713,7 @@ Having no option is the same as
+
+ which, in turn, is the same as
+
+- --itrace=ibxwpe
++ --itrace=cepwx
+
+ The letters are:
+
+diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
+index 66e82bd0683e..cfdbf65f1e02 100644
+--- a/tools/perf/util/auxtrace.c
++++ b/tools/perf/util/auxtrace.c
+@@ -1001,7 +1001,8 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
+ }
+
+ if (!str) {
+- itrace_synth_opts__set_default(synth_opts, false);
++ itrace_synth_opts__set_default(synth_opts,
++ synth_opts->default_no_sample);
+ return 0;
+ }
+
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 847ae51a524b..fb0aa661644b 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -3602,6 +3602,7 @@ int perf_event__synthesize_features(struct perf_tool *tool,
+ return -ENOMEM;
+
+ ff.size = sz - sz_hdr;
++ ff.ph = &session->header;
+
+ for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
+ if (!feat_ops[feat].synthesize) {
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index d6f1b2a03f9b..f7dd4657535d 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -2579,7 +2579,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
+ } else {
+ itrace_synth_opts__set_default(&pt->synth_opts,
+ session->itrace_synth_opts->default_no_sample);
+- if (use_browser != -1) {
++ if (!session->itrace_synth_opts->default_no_sample &&
++ !session->itrace_synth_opts->inject) {
+ pt->synth_opts.branches = false;
+ pt->synth_opts.callchain = true;
+ }
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index e0429f4ef335..faa8eb231e1b 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -709,9 +709,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
+ {
+ int i;
+ struct pmu_events_map *map;
+- struct pmu_event *pe;
+ const char *name = pmu->name;
+- const char *pname;
+
+ map = perf_pmu__find_map(pmu);
+ if (!map)
+@@ -722,28 +720,26 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
+ */
+ i = 0;
+ while (1) {
++ const char *cpu_name = is_arm_pmu_core(name) ? name : "cpu";
++ struct pmu_event *pe = &map->table[i++];
++ const char *pname = pe->pmu ? pe->pmu : cpu_name;
+
+- pe = &map->table[i++];
+ if (!pe->name) {
+ if (pe->metric_group || pe->metric_name)
+ continue;
+ break;
+ }
+
+- if (!is_arm_pmu_core(name)) {
+- pname = pe->pmu ? pe->pmu : "cpu";
+-
+- /*
+- * uncore alias may be from different PMU
+- * with common prefix
+- */
+- if (pmu_is_uncore(name) &&
+- !strncmp(pname, name, strlen(pname)))
+- goto new_alias;
++ /*
++ * uncore alias may be from different PMU
++ * with common prefix
++ */
++ if (pmu_is_uncore(name) &&
++ !strncmp(pname, name, strlen(pname)))
++ goto new_alias;
+
+- if (strcmp(pname, name))
+- continue;
+- }
++ if (strcmp(pname, name))
++ continue;
+
+ new_alias:
+ /* need type casts to override 'const' */
+diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
+index 4ba9e866b076..60c9d955c4d7 100644
+--- a/tools/perf/util/thread-stack.c
++++ b/tools/perf/util/thread-stack.c
+@@ -616,6 +616,23 @@ static int thread_stack__bottom(struct thread_stack *ts,
+ true, false);
+ }
+
++static int thread_stack__pop_ks(struct thread *thread, struct thread_stack *ts,
++ struct perf_sample *sample, u64 ref)
++{
++ u64 tm = sample->time;
++ int err;
++
++ /* Return to userspace, so pop all kernel addresses */
++ while (thread_stack__in_kernel(ts)) {
++ err = thread_stack__call_return(thread, ts, --ts->cnt,
++ tm, ref, true);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
+ static int thread_stack__no_call_return(struct thread *thread,
+ struct thread_stack *ts,
+ struct perf_sample *sample,
+@@ -896,7 +913,18 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
+ ts->rstate = X86_RETPOLINE_DETECTED;
+
+ } else if (sample->flags & PERF_IP_FLAG_RETURN) {
+- if (!sample->ip || !sample->addr)
++ if (!sample->addr) {
++ u32 return_from_kernel = PERF_IP_FLAG_SYSCALLRET |
++ PERF_IP_FLAG_INTERRUPT;
++
++ if (!(sample->flags & return_from_kernel))
++ return 0;
++
++ /* Pop kernel stack */
++ return thread_stack__pop_ks(thread, ts, sample, ref);
++ }
++
++ if (!sample->ip)
+ return 0;
+
+ /* x86 retpoline 'return' doesn't match the stack */
^ permalink raw reply related [flat|nested] 24+ messages in thread
end of thread, other threads:[~2019-10-07 17:46 UTC | newest]
Thread overview: 24+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-08-29 14:27 [gentoo-commits] proj/linux-patches:5.2 commit in: / Mike Pagano
-- strict thread matches above, loose matches on Subject: below --
2019-10-07 17:45 Mike Pagano
2019-10-05 11:43 Mike Pagano
2019-10-01 10:11 Mike Pagano
2019-09-21 16:23 Mike Pagano
2019-09-19 12:05 Mike Pagano
2019-09-19 10:05 Mike Pagano
2019-09-16 12:27 Mike Pagano
2019-09-10 11:15 Mike Pagano
2019-09-06 17:26 Mike Pagano
2019-08-28 18:18 Mike Pagano
2019-08-25 17:38 Mike Pagano
2019-08-23 22:19 Mike Pagano
2019-08-16 12:30 Mike Pagano
2019-08-16 12:16 Mike Pagano
2019-08-09 17:47 Mike Pagano
2019-08-06 19:20 Mike Pagano
2019-08-04 16:16 Mike Pagano
2019-07-31 14:53 Mike Pagano
2019-07-31 10:11 Mike Pagano
2019-07-28 16:23 Mike Pagano
2019-07-26 11:38 Mike Pagano
2019-07-21 14:43 Mike Pagano
2019-07-14 15:52 Mike Pagano
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox